-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathi4replication_scraper.py
More file actions
33 lines (26 loc) · 1.01 KB
/
i4replication_scraper.py
File metadata and controls
33 lines (26 loc) · 1.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import requests
from bs4 import BeautifulSoup
import csv
from tqdm import tqdm
# Base URL
base_url = "https://ideas.repec.org/p/zbw/i4rdps/{}.html"
# Open a CSV file to save the abstracts
with open('abstracts.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['url', 'abstract'])
# Iterate over the range of URLs
for i in tqdm(range(1, 192), desc="Scraping abstracts"):
url = base_url.format(i)
response = requests.get(url)
# Check if request was successful
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
abstract_section = soup.find('div', id='abstract-body')
if abstract_section:
abstract = abstract_section.get_text(strip=True)
else:
abstract = "Abstract not found"
# Write URL and abstract to CSV
writer.writerow([url, abstract])
else:
print(f"Failed to retrieve {url}")