-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscraper.py
More file actions
492 lines (401 loc) · 19 KB
/
scraper.py
File metadata and controls
492 lines (401 loc) · 19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
#!/usr/bin/env python3
"""
Reactome.org Web Scraper
Scrapes reactome.org pages and saves:
- <div class="item-page"> elements
- <div class="leading-n" itemprop="blogpost"> elements
Files are organized in directories based on their URL routes.
"""
import os
import re
import time
import json
import logging
import hashlib
from urllib.parse import urljoin, urlparse
from collections import deque
import requests
from bs4 import BeautifulSoup
# Configuration
BASE_URL = "https://reactome.org"
OUTPUT_DIR = "scraped_pages"
DELAY_BETWEEN_REQUESTS = 1.0 # seconds, be polite to the server
MAX_PAGES = 1000 # Set to a number to limit pages, None for unlimited
REQUEST_TIMEOUT = 30
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('scraper.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
class ReactomeScraper:
def __init__(self, base_url=BASE_URL, output_dir=OUTPUT_DIR, delay=DELAY_BETWEEN_REQUESTS, max_pages=MAX_PAGES):
self.base_url = base_url
self.output_dir = output_dir
self.delay = delay
self.max_pages = max_pages
self.visited_urls = set()
self.downloaded_uploads = set() # Track downloaded uploads to avoid duplicates
self.urls_to_visit = deque()
self.session = requests.Session()
self.session.headers.update({
'User-Agent': 'Mozilla/5.0 (compatible; ReactomeScraper/1.0; Educational purposes)',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
})
# Create output directory
os.makedirs(self.output_dir, exist_ok=True)
def is_valid_url(self, url):
"""Check if URL should be scraped."""
parsed = urlparse(url)
# Only scrape URLs from reactome.org
if parsed.netloc and parsed.netloc not in ['reactome.org', 'www.reactome.org']:
return False
# Skip non-HTML resources
skip_extensions = ['.pdf', '.png', '.jpg', '.jpeg', '.gif', '.svg',
'.css', '.js', '.zip', '.tar', '.gz', '.xml', '.json']
path_lower = parsed.path.lower()
if any(path_lower.endswith(ext) for ext in skip_extensions):
return False
# Skip API endpoints and special paths
skip_paths = ['/ContentService', '/AnalysisService', '/PathwayBrowser',
'/download', '/icon-lib', '/gsa', '/content/detail', '/content/schema']
if any(parsed.path.startswith(skip) for skip in skip_paths):
return False
return True
def normalize_url(self, url):
"""Normalize URL for deduplication."""
parsed = urlparse(url)
# Remove fragment and normalize
normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
# Remove trailing slash for consistency
return normalized.rstrip('/')
def get_route_path(self, url):
"""Extract route path from URL for directory structure."""
parsed = urlparse(url)
path = parsed.path.strip('/')
if not path:
return 'index'
return path
def get_image_local_path(self, img_url, page_route):
"""
Determine the local path for an image based on the page route.
uploads are stored in: scraped_pages/uploads/<page_route>/<filename>
"""
parsed = urlparse(img_url)
# Get the original filename
original_filename = os.path.basename(parsed.path)
if not original_filename:
# Generate filename from URL hash
url_hash = hashlib.md5(img_url.encode()).hexdigest()[:10]
original_filename = f"image_{url_hash}.png"
# Clean up the filename
original_filename = re.sub(r'[^\w\-_\.]', '_', original_filename)
# Build the path: uploads/<page_route>/<filename>
image_dir = os.path.join(self.output_dir, 'uploads', page_route)
local_path = os.path.join(image_dir, original_filename)
return local_path, original_filename
def download_image(self, img_url, page_route):
"""
Download an image and save it locally.
Returns the relative path to the image from the page's perspective.
"""
# Normalize the image URL
if img_url.startswith('//'):
img_url = 'https:' + img_url
# Skip data URLs
if img_url.startswith('data:'):
return None
# Check if already downloaded
if img_url in self.downloaded_uploads:
local_path, filename = self.get_image_local_path(img_url, page_route)
# Return relative path from page to image
return os.path.join('uploads', page_route, filename)
try:
response = self.session.get(img_url, timeout=REQUEST_TIMEOUT, stream=True)
response.raise_for_status()
# Verify it's an image
content_type = response.headers.get('Content-Type', '')
if not any(t in content_type for t in ['image/', 'application/octet-stream']):
logger.debug(f"Skipping non-image content: {img_url}")
return None
local_path, filename = self.get_image_local_path(img_url, page_route)
# Create directory if needed
os.makedirs(os.path.dirname(local_path), exist_ok=True)
# Write the image
with open(local_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
self.downloaded_uploads.add(img_url)
logger.info(f"Downloaded image: {img_url} -> {local_path}")
# Return relative path from page to image
return os.path.join('uploads', page_route, filename)
except requests.exceptions.RequestException as e:
logger.warning(f"Failed to download image {img_url}: {e}")
return None
except Exception as e:
logger.warning(f"Error saving image {img_url}: {e}")
return None
def process_uploads_in_content(self, soup, current_url, page_route):
"""
Find all uploads in the content, download them, and update src attributes.
Returns the modified soup.
"""
for img in soup.find_all('img'):
src = img.get('src')
if not src:
continue
# Convert relative URLs to absolute
absolute_url = urljoin(current_url, src)
# Download the image
local_path = self.download_image(absolute_url, page_route)
if local_path:
# Update the src to point to local image
# Path is relative from scraped_pages root
img['src'] = local_path
img['data-original-src'] = absolute_url # Keep original for reference
return soup
def save_content(self, url, item_page_content, blog_post_content):
"""Save extracted content to files organized by route."""
route_path = self.get_route_path(url)
# Create directory structure
dir_path = os.path.join(self.output_dir, os.path.dirname(route_path))
if dir_path:
os.makedirs(dir_path, exist_ok=True)
base_filename = os.path.basename(route_path) or 'index'
# Save item-page content
if item_page_content:
filepath = os.path.join(self.output_dir, route_path)
if not filepath.endswith('.html'):
# Create as directory with index.html
os.makedirs(filepath, exist_ok=True)
filepath = os.path.join(filepath, 'item-page.html')
with open(filepath, 'w', encoding='utf-8') as f:
f.write(f"<!-- Source: {url} -->\n")
f.write(f"<!-- Scraped: {time.strftime('%Y-%m-%d %H:%M:%S')} -->\n")
f.write(item_page_content)
logger.info(f"Saved item-page: {filepath}")
# Save blog post content
if blog_post_content:
filepath = os.path.join(self.output_dir, route_path)
os.makedirs(filepath, exist_ok=True)
for i, content in enumerate(blog_post_content):
blog_filepath = os.path.join(filepath, f'blogpost-{i+1}.html')
with open(blog_filepath, 'w', encoding='utf-8') as f:
f.write(f"<!-- Source: {url} -->\n")
f.write(f"<!-- Scraped: {time.strftime('%Y-%m-%d %H:%M:%S')} -->\n")
f.write(content)
logger.info(f"Saved blogpost: {blog_filepath}")
def extract_content(self, soup, url):
"""Extract target div elements from the page."""
item_page_content = None
blog_post_content = []
# Get page route for image organization
page_route = self.get_route_path(url)
# Find <div class="item-page">
item_page = soup.find('div', class_='item-page')
if item_page:
# Process uploads in this content
self.process_uploads_in_content(item_page, url, page_route)
item_page_content = str(item_page)
logger.debug(f"Found item-page in {url}")
# Find <div class="leading-n" itemprop="blogpost">
# The class might be "leading-0", "leading-1", etc.
blog_posts = soup.find_all('div', attrs={'itemprop': 'blogPost'})
if not blog_posts:
# Try alternate case
blog_posts = soup.find_all('div', attrs={'itemprop': 'blogpost'})
# Also try finding by class pattern
if not blog_posts:
blog_posts = soup.find_all('div', class_=re.compile(r'^leading-\d+$'))
for post in blog_posts:
# Process uploads in this content
self.process_uploads_in_content(post, url, page_route)
blog_post_content.append(str(post))
logger.debug(f"Found blogpost in {url}")
return item_page_content, blog_post_content
def extract_links(self, soup, current_url):
"""Extract all valid internal links from the page."""
links = set()
for a_tag in soup.find_all('a', href=True):
href = a_tag['href']
# Skip empty, javascript, and mailto links
if not href or href.startswith(('javascript:', 'mailto:', '#')):
continue
# Convert relative URLs to absolute
absolute_url = urljoin(current_url, href)
normalized_url = self.normalize_url(absolute_url)
if self.is_valid_url(normalized_url):
links.add(normalized_url)
return links
def scrape_page(self, url):
"""Scrape a single page."""
try:
response = self.session.get(url, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
# Check content type
content_type = response.headers.get('Content-Type', '')
if 'text/html' not in content_type:
logger.debug(f"Skipping non-HTML content: {url}")
return set()
soup = BeautifulSoup(response.text, 'html.parser')
# Extract and save content
item_page_content, blog_post_content = self.extract_content(soup, url)
if item_page_content or blog_post_content:
self.save_content(url, item_page_content, blog_post_content)
else:
logger.debug(f"No target content found: {url}")
# Extract links for crawling
new_links = self.extract_links(soup, url)
return new_links
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching {url}: {e}")
return set()
except Exception as e:
logger.error(f"Error processing {url}: {e}")
return set()
def crawl(self, start_urls=None):
"""Crawl the website starting from given URLs."""
if start_urls is None:
start_urls = [self.base_url]
# Track URLs already queued to avoid duplicates in the queue
queued_urls = set()
# Initialize queue with start URLs
for url in start_urls:
normalized = self.normalize_url(url)
if normalized not in self.visited_urls and normalized not in queued_urls:
self.urls_to_visit.append(normalized)
queued_urls.add(normalized)
pages_scraped = 0
while self.urls_to_visit:
if self.max_pages and pages_scraped >= self.max_pages:
logger.info(f"Reached maximum pages limit: {self.max_pages}")
break
url = self.urls_to_visit.popleft()
if url in self.visited_urls:
continue
logger.info(f"Scraping ({pages_scraped + 1}): {url}")
self.visited_urls.add(url)
new_links = self.scrape_page(url)
# Add new links to queue (only if not visited and not already queued)
for link in new_links:
if link not in self.visited_urls and link not in queued_urls:
self.urls_to_visit.append(link)
queued_urls.add(link)
pages_scraped += 1
# Be polite - wait between requests
time.sleep(self.delay)
logger.info(f"Crawling complete. Total pages scraped: {pages_scraped}")
return pages_scraped
def get_seed_urls():
"""Get initial URLs to start crawling from the navigation structure."""
return [
"https://reactome.org",
"https://reactome.org/what-is-reactome",
"https://reactome.org/about/news",
"https://reactome.org/about/team",
"https://reactome.org/sab",
"https://reactome.org/about/funding",
"https://reactome.org/about/editorial-calendar",
"https://reactome.org/about/release-calendar",
"https://reactome.org/about/statistics",
"https://reactome.org/about/logo",
"https://reactome.org/license",
"https://reactome.org/about/privacy-notice",
"https://reactome.org/about/disclaimer",
"https://reactome.org/about/digital-preservation",
"https://reactome.org/staff",
"https://reactome.org/about/contact-us",
"https://reactome.org/content/toc",
"https://reactome.org/content/doi",
"https://reactome.org/content/schema",
"https://reactome.org/content/reactome-research-spotlight",
"https://reactome.org/orcid",
"https://reactome.org/covid-19",
"https://reactome.org/documentation",
"https://reactome.org/userguide",
"https://reactome.org/userguide/pathway-browser",
"https://reactome.org/userguide/searching",
"https://reactome.org/userguide/details-panel",
"https://reactome.org/userguide/analysis",
"https://reactome.org/userguide/diseases",
"https://reactome.org/userguide/cytomics",
"https://reactome.org/userguide/review-status",
"https://reactome.org/userguide/reactome-fiviz",
"https://reactome.org/dev",
"https://reactome.org/dev/graph-database",
"https://reactome.org/dev/analysis",
"https://reactome.org/dev/content-service",
"https://reactome.org/dev/pathways-overview",
"https://reactome.org/dev/diagram",
"https://reactome.org/icon-info",
"https://reactome.org/icon-info/ehld-specs-guideline",
"https://reactome.org/icon-info/icons-guidelines",
"https://reactome.org/documentation/data-model",
"https://reactome.org/documentation/curator-guide",
"https://reactome.org/documentation/release-documentation",
"https://reactome.org/documentation/inferred-events",
"https://reactome.org/documentation/faq",
"https://reactome.org/linking-to-us",
"https://reactome.org/cite",
"https://reactome.org/tools/reactome-fiviz",
"https://reactome.org/tools/site-search",
"https://reactome.org/community",
"https://reactome.org/community/collaboration",
"https://reactome.org/community/outreach",
"https://reactome.org/community/events",
"https://reactome.org/community/publications",
"https://reactome.org/community/partners",
"https://reactome.org/content/contributors",
"https://reactome.org/community/resources",
]
def main():
"""Main entry point."""
import argparse
parser = argparse.ArgumentParser(description='Scrape reactome.org pages')
parser.add_argument('--output', '-o', default=OUTPUT_DIR,
help='Output directory for scraped pages')
parser.add_argument('--delay', '-d', type=float, default=1.0,
help='Delay between requests in seconds')
parser.add_argument('--max-pages', '-m', type=int, default=None,
help='Maximum number of pages to scrape')
parser.add_argument('--seed-only', '-s', action='store_true',
help='Only scrape seed URLs, do not crawl further')
parser.add_argument('--urls-file', '-u', default=None,
help='Path to a JSON file containing an array of URLs to scrape')
args = parser.parse_args()
delay = args.delay
max_pages = args.max_pages
scraper = ReactomeScraper(output_dir=args.output, delay=delay, max_pages=max_pages)
if args.urls_file:
with open(args.urls_file, 'r', encoding='utf-8') as f:
seed_urls = json.load(f)
if not isinstance(seed_urls, list):
logger.error('URLs file must contain a JSON array of strings')
return
logger.info(f'Loaded {len(seed_urls)} URLs from {args.urls_file}')
else:
seed_urls = get_seed_urls()
if args.urls_file or args.seed_only:
# Scrape only the provided URLs, no recursive crawling
for i, url in enumerate(seed_urls):
if max_pages and i >= max_pages:
logger.info(f"Reached maximum pages limit: {max_pages}")
break
if url not in scraper.visited_urls:
logger.info(f"Scraping ({i + 1}/{len(seed_urls)}): {url}")
scraper.visited_urls.add(url)
scraper.scrape_page(url)
time.sleep(scraper.delay)
else:
# Full crawl
scraper.crawl(seed_urls)
print(f"\nScraping complete!")
print(f"Output directory: {os.path.abspath(args.output)}")
print(f"Total pages visited: {len(scraper.visited_urls)}")
if __name__ == '__main__':
main()