Spaces:
Sleeping
Sleeping
Create get_hf_daily_papers.py
Browse files- get_hf_daily_papers.py +95 -0
get_hf_daily_papers.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime, timedelta
|
2 |
+
import json
|
3 |
+
import time
|
4 |
+
from typing import List, Dict
|
5 |
+
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
+
import requests
|
8 |
+
|
9 |
+
|
10 |
+
def get_hf_daily_papers_info(date: str) -> List[Dict]:
|
11 |
+
url = 'https://huggingface.co/papers?date=' + date
|
12 |
+
response = requests.get(url)
|
13 |
+
|
14 |
+
if response.status_code == 200:
|
15 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
16 |
+
papers = []
|
17 |
+
articles = soup.find_all('article', class_='relative flex flex-col overflow-hidden rounded-xl border')
|
18 |
+
|
19 |
+
for article in articles:
|
20 |
+
title_tag = article.find('h3')
|
21 |
+
title = title_tag.get_text(strip=True)
|
22 |
+
link = title_tag.find('a')['href']
|
23 |
+
|
24 |
+
authors = [author_tag['title'] for author_tag in article.find_all('li', title=True)]
|
25 |
+
|
26 |
+
submitter_tag = article.find('div', class_='pointer-events-none absolute right-2 top-56 -mt-8 flex h-6 items-center gap-1 self-end whitespace-nowrap rounded-md border bg-white px-2 text-xs leading-none text-gray-700 shadow-sm dark:bg-gray-900 dark:text-gray-400 sm:text-sm md:top-64')
|
27 |
+
submitter = submitter_tag.get_text(strip=True).replace('Submitted by', '').strip()
|
28 |
+
|
29 |
+
paper_info = {
|
30 |
+
'title': title,
|
31 |
+
'link': "https://huggingface.co" + link,
|
32 |
+
'authors': authors,
|
33 |
+
'submitter': submitter,
|
34 |
+
'date': date
|
35 |
+
}
|
36 |
+
papers.append(paper_info)
|
37 |
+
|
38 |
+
return papers
|
39 |
+
else:
|
40 |
+
print(f"No papers, date: {date}")
|
41 |
+
return []
|
42 |
+
|
43 |
+
|
44 |
+
def get_abstract(url: str) -> str:
|
45 |
+
response = requests.get(url)
|
46 |
+
if response.status_code != 200:
|
47 |
+
raise Exception(f"Failed to load page {url}")
|
48 |
+
|
49 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
50 |
+
abstract_section = soup.find('div', class_='pb-8 pr-4 md:pr-16')
|
51 |
+
if not abstract_section:
|
52 |
+
raise Exception("Abstract section not found")
|
53 |
+
|
54 |
+
abstract_header = abstract_section.find('h2', string='Abstract')
|
55 |
+
if not abstract_header:
|
56 |
+
raise Exception("Abstract header not found")
|
57 |
+
|
58 |
+
abstract_paragraph = abstract_header.find_next('p')
|
59 |
+
if not abstract_paragraph:
|
60 |
+
raise Exception("Abstract paragraph not found")
|
61 |
+
|
62 |
+
return abstract_paragraph.get_text(strip=True).replace('\n', ' ')
|
63 |
+
|
64 |
+
|
65 |
+
def fetch_and_save_papers(start_date: str, end_date: str, output_file: str):
|
66 |
+
start_date = datetime.strptime(start_date, "%Y-%m-%d")
|
67 |
+
end_date = datetime.strptime(end_date, "%Y-%m-%d")
|
68 |
+
|
69 |
+
date_list = [(start_date + timedelta(days=x)).strftime("%Y-%m-%d") for x in range((end_date - start_date).days + 1)]
|
70 |
+
|
71 |
+
all_papers = []
|
72 |
+
for date in date_list:
|
73 |
+
papers = get_hf_daily_papers_info(date)
|
74 |
+
for paper in papers:
|
75 |
+
try:
|
76 |
+
paper['abstract'] = get_abstract(paper['link'])
|
77 |
+
except Exception as e:
|
78 |
+
print(f"Failed to get abstract for {paper['link']}: {e}")
|
79 |
+
all_papers.append(paper)
|
80 |
+
time.sleep(1.0) # sleep for 1 second to safe access to the website
|
81 |
+
|
82 |
+
with open(output_file, 'w', encoding='utf-8') as json_file:
|
83 |
+
json.dump(all_papers, json_file, ensure_ascii=False, indent=4)
|
84 |
+
|
85 |
+
print(f"Data saved to {output_file}")
|
86 |
+
|
87 |
+
|
88 |
+
if __name__ == '__main__':
|
89 |
+
# Example usage
|
90 |
+
args = {
|
91 |
+
'start_date': '2023-05-04',
|
92 |
+
'end_date': '2024-06-27',
|
93 |
+
'output_file': 'hf_daily_papers_2023-05-04_2024-06-27.json'
|
94 |
+
}
|
95 |
+
fetch_and_save_papers(**args)
|