-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
341 lines (297 loc) · 11.5 KB
/
main.py
File metadata and controls
341 lines (297 loc) · 11.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
import argparse
import csv
import os
import time
from pathlib import Path
from typing import Dict, List
import bs4
import requests
from bs4 import BeautifulSoup
GROBID_URL = os.getenv("GROBID_URL", "http://grobid:8070/api/processFulltextDocument")
HEADERS = {"Accept": "application/xml"}
GROBID_PARAMS = {"consolidateCitations": "1", "consolidateFunders": "1"}
SUBSTITUTE_TAGS = {
'persName',
'orgName',
'publicationStmt',
'titleStmt',
'biblScope'
}
def get_affiliation_from_grobid_xml(raw_xml: BeautifulSoup) -> Dict:
"""
Get affiliation from grobid xml
:param raw_xml:
:return:
"""
location_dict = dict()
laboratory_name = ""
institution_name = ""
if raw_xml and raw_xml.affiliation:
for child in raw_xml.affiliation:
if child.name == "orgname":
if child.has_attr("type"):
if child["type"] == "laboratory":
laboratory_name = child.text
elif child["type"] == "institution":
institution_name = child.text
elif child.name == "address":
for grandchild in child:
if grandchild.name and grandchild.text:
location_dict[grandchild.name] = grandchild.text
if laboratory_name or institution_name:
return {
"laboratory": laboratory_name,
"institution": institution_name,
"location": location_dict
}
return {}
def get_author_data_from_grobid_xml(raw_xml: BeautifulSoup) -> List[Dict]:
"""
Returns a list of dictionaries, one for each author,
containing the first and last names.
e.g.
{
"first": first,
"middle": middle,
"last": last,
"suffix": suffix,
"affiliation": {
"laboratory": "",
"institution": "",
"location": "",
},
"email": ""
}
"""
authors = []
for author in raw_xml.find_all("author"):
first = ""
middle = []
last = ""
suffix = ""
if author.persname:
# forenames include first and middle names
forenames = author.persname.find_all("forename")
# surnames include last names
surnames = author.persname.find_all("surname")
# name suffixes
suffixes = author.persname.find_all("suffix")
for forename in forenames:
if forename.has_attr("type"):
if forename["type"] == "first":
if not first:
first = forename.text
else:
middle.append(forename.text)
elif forename["type"] == "middle":
middle.append(forename.text)
if len(surnames) > 1:
for surname in surnames[:-1]:
middle.append(surname.text)
last = surnames[-1].text
elif len(surnames) == 1:
last = surnames[0].text
if len(suffix) >= 1:
suffix = " ".join([suffix.text for suffix in suffixes])
affiliation = get_affiliation_from_grobid_xml(author)
email = ""
if author.email:
email = author.email.text
author_dict = {
"first": first,
"middle": middle,
"last": last,
"suffix": suffix,
"affiliation": affiliation,
"email": email
}
authors.append(author_dict)
return authors
def get_publication_datetime_from_grobid_xml(raw_xml: BeautifulSoup) -> str:
"""
Finds and returns the publication datetime if it exists
:param raw_xml:
:return:
"""
if raw_xml.publicationStmt:
for child in raw_xml.publicationstmt:
if child.name == "date" \
and child.has_attr("type") \
and child["type"] == "published" \
and child.has_attr("when"):
return child["when"]
return ""
def clean_tags(el: bs4.element.Tag):
"""
Replace all tags with lowercase version
:param el:
:return:
"""
for sub_tag in SUBSTITUTE_TAGS:
for sub_el in el.find_all(sub_tag):
sub_el.name = sub_tag.lower()
def extract_paper_metadata_from_grobid_xml(tag: bs4.element.Tag) -> Dict:
"""
Extract paper metadata (title, authors, affiliation, year) from grobid xml
:param tag:
:return:
"""
clean_tags(tag)
paper_metadata = {
"title": tag.titlestmt.title.text,
"authors": get_author_data_from_grobid_xml(tag),
"year": get_publication_datetime_from_grobid_xml(tag)
}
return paper_metadata
def get_text(div: bs4.element.Tag):
"""
Extract text from div
:param div:
:return:
"""
if div.head:
head_texts = []
for p in div.head.next_siblings:
head_texts.append(p.text)
return f'{div.head.text}: {" ".join(head_texts)}'
else:
texts = []
if div.children:
for child in div.children:
texts.append(child.text)
elif div.text:
texts.append(div.text)
return " ".join(texts)
def extract_disclosure_from_tei_xml(
sp: BeautifulSoup
):
"""
Parse back matter from soup
"""
keywords = ['funding', 'competing', 'conflict', 'disclosure', 'statement', 'information']
back_text = []
found = False
if sp.back:
for div in sp.back.find_all('div'):
for child_div in div.find_all('div'):
back_text.append(get_text(child_div))
sp.back.decompose()
if sp.body:
for div in sp.body.find_all('div'):
if div.find_all('div'):
for child_div in div.find_all('div'):
for keyword in keywords:
if child_div.head and keyword in child_div.head.text.lower():
back_text.append(get_text(child_div))
found = True
break
else:
for keyword in keywords:
if div.head and keyword in div.head.text.lower():
back_text.append(get_text(div))
found = True
break
return back_text, found
def _clean_empty_and_duplicate_authors_from_grobid_parse(authors: List[Dict]) -> List[Dict]:
"""
Within affiliation, `location` is a dict with fields <settlement>, <region>, <country>, <postCode>, etc.
Too much hassle, so just take the first one that's not empty.
"""
# stripping empties
clean_authors_list = []
for author in authors:
clean_first = author['first'].strip()
clean_last = author['last'].strip()
clean_middle = [m.strip() for m in author['middle']]
clean_suffix = author['suffix'].strip()
if clean_first or clean_last or clean_middle:
author['first'] = clean_first
author['last'] = clean_last
author['middle'] = clean_middle
author['suffix'] = clean_suffix
clean_authors_list.append(author)
# combining duplicates (preserve first occurrence of author name as position)
key_to_author_blobs = {}
ordered_keys_by_author_pos = []
for author in clean_authors_list:
key = (author['first'], author['last'], ' '.join(author['middle']), author['suffix'])
if key not in key_to_author_blobs:
key_to_author_blobs[key] = author
ordered_keys_by_author_pos.append(key)
else:
if author['email']:
key_to_author_blobs[key]['email'] = author['email']
if author['affiliation'] and (author['affiliation']['institution'] or author['affiliation']['laboratory'] or author['affiliation']['location']):
key_to_author_blobs[key]['affiliation'] = author['affiliation']
dedup_authors_list = [key_to_author_blobs[key] for key in ordered_keys_by_author_pos]
return dedup_authors_list
def process_pdf(pdf_path):
"""Send a PDF file to GROBID and return extracted metadata."""
with open(pdf_path, "rb") as pdf_file:
response = requests.post(GROBID_URL, files={"input": pdf_file}, data=GROBID_PARAMS, headers=HEADERS)
if response.status_code == 200:
soup = BeautifulSoup(response.text, "xml")
open("/pdfs/debug.xml", "w").write(response.text)
metadata = extract_paper_metadata_from_grobid_xml(soup.fileDesc)
# clean metadata authors (remove dupes etc)
metadata['authors'] = _clean_empty_and_duplicate_authors_from_grobid_parse(metadata['authors'])
back_matter, found = extract_disclosure_from_tei_xml(soup)
print(f"Disclosure: {back_matter}, found: {found}")
disclosure = ' '.join(back_matter)
metadata['disclosure'] = disclosure
return metadata # XML response
else:
print(f"Error processing {pdf_path}: {response.status_code}")
return None
def save_to_csv(data, output_csv):
"""Save extracted metadata to a CSV file."""
with open(output_csv, mode="w", newline="", encoding="utf-8") as file:
writer = csv.writer(file, delimiter='\t')
writer.writerow(["PDF File", "Title", "Author Name", "Affiliation", "Email", "Disclosure Statement"])
for pdf_file, metadata in data.items():
for d in metadata:
writer.writerow([pdf_file] + list(d.values()))
def main(pdf_folder, output_csv):
"""Process all PDFs in a folder and save results to CSV."""
results = {}
pdf_files = list(Path(pdf_folder).glob("*.pdf"))
if not pdf_files:
print(f"No PDF files found in {pdf_folder}")
return
for pdf in pdf_files:
print(f"Processing {pdf}...")
metadata = process_pdf(pdf)
authors = metadata.get('authors', [])
disclosure = metadata.get('disclosure', '')
rows = []
for author in authors:
first_name = author['first']
middle_names = ' '.join(author['middle'])
last_name = author['last']
full_name = f"{first_name} {middle_names} {last_name}".strip()
affiliation = author.get('affiliation', {})
affiliation_name = ''
if len(affiliation) > 0:
lab = affiliation.get('laboratory', '')
institution = affiliation.get('institution', '')
country = affiliation.get('location', {}).get('country', '')
affiliation_name = f"{lab}, {institution}, {country}".strip(', ')
rows.append({
'Title': metadata['title'],
'Author Name': full_name.strip(),
'Affiliation': affiliation_name,
'Email': author.get('email', ''),
'Disclosure Statement': disclosure
})
if metadata:
results[pdf.name] = rows
time.sleep(1) # Avoid overloading the GROBID server
save_to_csv(results, output_csv)
print(f"Results saved to {output_csv}")
print(f'Press CTRL+C to quit.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Extract metadata from PDFs using GROBID")
parser.add_argument("pdf_folder", help="Folder containing PDF files to process")
parser.add_argument("--output", default="output.csv", help="Output CSV file")
args = parser.parse_args()
main(args.pdf_folder, args.output)