-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmain.py
More file actions
95 lines (79 loc) · 4.95 KB
/
main.py
File metadata and controls
95 lines (79 loc) · 4.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
from __future__ import annotations
import argparse
from pathlib import Path
from http.cookiejar import MozillaCookieJar
from rich import print as rprint
from ref_collector.workflow import run_pipeline
def _load_cookies(path: str):
"""Load a Netscape/Mozilla cookies.txt file into a cookie jar for reuse in requests."""
jar = MozillaCookieJar()
jar.load(path, ignore_discard=True, ignore_expires=True)
return jar
def parse_args() -> argparse.Namespace:
"""Parse CLI options for scraping, enrichment, and downloading steps."""
parser = argparse.ArgumentParser(
description="End-to-end helper to scrape Google Scholar, enrich with Crossref, download PDFs via Sci-Hub, and report missing papers."
)
source = parser.add_mutually_exclusive_group(required=False)
source.add_argument("--profile-url", help="Google Scholar profile URL (e.g., https://scholar.google.com/citations?user=XXXX)")
source.add_argument("--cited-url", help="Google Scholar 'cited by' search URL.")
source.add_argument("--from-xlsx", help="Skip scraping and start from an existing metadata XLSX/CSV.")
parser.add_argument("--workdir", default="_results/latest", help="Workspace folder to store outputs.")
parser.add_argument("--pagesize", type=int, default=100, help="Items per page when scraping profiles (Scholar caps at 100).")
parser.add_argument("--max-pages", type=int, help="Max pages to scrape (default: all).")
parser.add_argument("--scrape-delay", type=float, default=2.5, help="Delay between Scholar page fetches (seconds).")
parser.add_argument("--crossref-delay", type=float, default=1.0, help="Delay between Crossref API calls (seconds).")
parser.add_argument("--download-delay", type=float, default=1.5, help="Delay between Sci-Hub download attempts (seconds).")
parser.add_argument("--limit", type=int, help="Optional limit on number of papers to download (after scraping).")
parser.add_argument("--mailto", help="Email for Crossref polite pool User-Agent (recommended).")
parser.add_argument(
"--name-template",
default="{year}_{venue_abbrev}_{short_title}",
help="Filename template for PDFs. Fields: year, venue, venue_abbrev, short_title, title, doi.",
)
parser.add_argument("--pdf-dir", help="Custom PDF output folder (default: <workdir>/pdfs).")
parser.add_argument("--report-name", default="missing_or_failed.md", help="Filename for the missing/failed report in workdir.")
parser.add_argument("--no-download", action="store_true", help="Only build metadata spreadsheet, skip PDF downloads.")
parser.add_argument("--proxy", help="HTTP(S) proxy, e.g. http://user:pass@host:port . Applied to Scholar, Crossref, Sci-Hub.")
parser.add_argument("--cookies-file", help="Netscape/Mozilla cookies.txt file to reuse solved Scholar captcha session.")
return parser.parse_args()
def main() -> None:
"""Entry point: parse arguments, run the pipeline, and print a small summary."""
args = parse_args()
if not any([args.profile_url, args.cited_url, args.from_xlsx]):
raise SystemExit("Provide --profile-url, --cited-url, or --from-xlsx.")
# Auto-correct common mistake: cited-by URL supplied via --profile-url.
if args.profile_url and ("cites=" in args.profile_url or "oi=bibs" in args.profile_url):
if args.cited_url:
raise SystemExit("Provide only one source. Detected cited-by URL in --profile-url and also --cited-url.")
args.cited_url = args.profile_url
args.profile_url = None
result = run_pipeline(
profile_url=args.profile_url,
cited_url=args.cited_url,
from_xlsx=Path(args.from_xlsx) if args.from_xlsx else None,
workdir=Path(args.workdir),
pagesize=args.pagesize,
max_pages=args.max_pages,
scrape_delay=args.scrape_delay,
crossref_delay=args.crossref_delay,
mailto=args.mailto,
name_template=args.name_template,
pdf_dir=Path(args.pdf_dir) if args.pdf_dir else None,
download_delay=args.download_delay,
limit=args.limit,
report_name=args.report_name,
skip_download=args.no_download,
cookies=_load_cookies(args.cookies_file) if args.cookies_file else None,
proxies={"http": args.proxy, "https": args.proxy} if args.proxy else None,
)
rprint("\n:checkered_flag: [bold green]Pipeline complete[/bold green]")
rprint(f":page_facing_up: Metadata saved to : [cyan]{result['metadata_path']}[/cyan]")
rprint(f":open_file_folder: PDFs saved to : [cyan]{result['pdf_dir']}[/cyan]")
rprint(f":memo: Missing/failed MD : [cyan]{result['report_path']}[/cyan]")
rprint(f":bar_chart: Total papers : [yellow]{result['total']}[/yellow]")
rprint(f":dart: Attempted download: [yellow]{result['attempted']}[/yellow]")
rprint(f":white_check_mark: Downloaded : [green]{result['downloaded']}[/green]")
rprint(f":x: Failed/missing : [red]{result['failed']}[/red]")
if __name__ == "__main__":
main()