forked from queensun/Nyspider
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaqistudy.py
106 lines (98 loc) · 2.76 KB
/
aqistudy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import requests
import time
from bs4 import BeautifulSoup
import os
import time
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate'}
def get_city():
html=requests.get('https://www.aqistudy.cn/historydata/',headers=headers).text
table=BeautifulSoup(html,'lxml').find('div',{'class':'all'}).find_all('a')
cities=[]
for item in table:
try:
name=item.get_text()
url='https://www.aqistudy.cn/historydata/'+item.get('href')
cities.append({'name':name,'url':url})
except:
continue
return cities
def get_data(url):
count=0
while True:
try:
html=requests.get(url,headers=headers,timeout=30).text
break
except:
count+=1
if count==5:
return []
table=BeautifulSoup(html,'lxml').find('table',{'class':'table-condensed'}).find_all('tr')
result=[]
for item in table[1:]:
try:
tds=item.find_all('td')
line=''
for td in tds:
line+=td.get_text().replace('\r','').replace('\n','').replace('\t','')+'\t'
result.append(line)
except:
continue
return result
def crawler(item):
try:
exists=os.listdir('result/'+item['name'])
except:
exists=[]
try:
os.mkdir('result')
except:
pass
try:
os.mkdir('result/'+item['name'])
except:
pass
count=0
while True:
try:
html=requests.get(item['url'],headers=headers,timeout=30).text
break
except:
count+=1
if count==5:
return
table=BeautifulSoup(html,'lxml').find('table',{'class':'table-condensed'}).find_all('tr')
dates=[]
for tr in table:
try:
date=tr.find('a').get_text()
if date+'.txt' in exists:
continue
dates.append(date)
except:
continue
for date in dates:
try:
data=get_data('https://www.aqistudy.cn/historydata/daydata.php?city=%s&month=%s'%(item['name'],date))
except:
continue
if data==[]:
continue
f=open('result/%s/%s.txt'%(item['name'],date),'w')
for line in data:
f.write(line+'\n')
f.close()
print(item['name'],date,'ok')
def main():
cities=get_city()
for city in cities:
try:
crawler(city)
except:
print(city,'failed')
print('完成')
main()
time.sleep(60)