-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sciscraper.py
163 lines (154 loc) · 5.75 KB
/
sciscraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
from datetime import datetime
import getopt
import re
import os
import sys
import pandas as pd
from scrapy.crawler import CrawlerProcess
from scraper.spiders.switchboard import SwitchboardSpider
from scraper.spiders.wfo import WfoSpider
USAGE_HINT = "\
USAGE:\n\
pipenv run python -m sciscraper <OPTIONS>\n\
\n\
OPTIONS:\n\
-s <source> | Set data source. Available source: SWITCHBOARD, WFO\n\
-i <inputfile> | Set input file\n\
-o <outputfile> | Set output file\n\
-c <inputcolumn> | Set name column from input csv/xlsx\n\
\n\
EXAMPLES:\n\
Normal usage: pipenv run python -m sciscraper -i samples/input.csv -o output.csv\n\
Set data source: pipenv run python -m sciscraper -i samples/input.csv -o output.csv -s WFO\n\
Set id and name column: pipenv run python -m sciscraper -i samples/input.csv -o output.csv -c ScientificName\n\
Show help message: pipenv run python -m sciscraper -h\n\
"
def readArgs():
datenow = datetime.now()
inputfile = "input.txt"
outputfile = datenow.strftime("result.%Y-%m-%d.%H%M%S.csv")
source = "ALL"
logfile = datenow.strftime("log.%Y-%m-%d.%H%M%S.txt")
inputcolumn = "Names" # default column name to be read from csv files
try:
opts, args = getopt.getopt(
sys.argv[1:], "hi:o:s:c:", ["ifile=", "ofile=", "source=", "column="]
)
except getopt.GetoptError:
print(USAGE_HINT)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(USAGE_HINT)
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-s", "--source"):
source = arg.upper()
elif opt in ("-c", "--column"):
inputcolumn = arg
print("Input file:", inputfile)
print("Output file:", outputfile)
if source != "ALL":
print(f"Source: {source}")
if inputcolumn:
print(f"Name Column: {inputcolumn}")
print("Log file:", logfile)
print("---------------------------------------------")
return inputfile, outputfile, source, inputcolumn, logfile
def run_crawlers(spiders, scientific_names):
for spider in spiders:
if not spider["enabled"]:
continue
start_urls = []
for name in scientific_names:
name_to_search = re.sub(r"[^\w]", " ", name).strip().replace(" ", "%20")
start_urls.append(spider["url"].format(name_to_search))
spider["class"].start_urls = start_urls
process = CrawlerProcess(
settings={
"FEEDS": {
outputfile: {"format": "csv", "fields": spider["fields"]},
},
"LOG_FILE": logfile,
}
)
process.crawl(spider["class"])
process.start()
if __name__ == "__main__":
try:
# Setup File
inputfile, outputfile, source, inputcolumn, logfile = readArgs()
if not os.path.isfile(inputfile):
print("Input file not found, please check your command.")
print(USAGE_HINT)
sys.exit()
if source not in ["SWITCHBOARD", "WFO"]:
print("[Error] Available sources: SWITCHBOARD, WFO")
sys.exit(2)
print("GENERATING URLS FROM INPUT FILE...")
scientific_names = []
if ".txt" in inputfile:
with open(inputfile, "r") as filehandle:
scientific_names = [name.rstrip() for name in filehandle.readlines()]
elif ".csv" in inputfile:
scientific_names = pd.read_csv(inputfile)[inputcolumn].tolist()
elif ".xlsx" in inputfile:
scientific_names = pd.read_excel(inputfile)[inputcolumn].tolist()
print("RUNNING CRAWLERS...")
print("It may take a while, please wait...")
spiders = [
{
"class": SwitchboardSpider,
"url": "http://apps.worldagroforestry.org/products/switchboard/index.php/species_search/{}",
"fields": [
"Query",
"Species Name",
"Scientific Name",
"Note",
"Switchboard",
"African Wood Density Database",
"Agroforestree Database",
"Genetic Resources Unit Database",
"RELMA-ICRAF Useful Trees",
"Tree Functional Attributes and Ecological Database",
"Tree Seed Suppliers Directory",
"Useful Tree Species for Africa Map",
"vegetationmap4africa",
],
"enabled": source in ["ALL", "SWITCHBOARD"],
},
{
"class": WfoSpider,
"url": "http://www.worldfloraonline.org/search?query={}&view=&limit=5&start=0&sort=",
"fields": [
"Query",
"Source",
"Source Key",
"Status",
"Rank",
"Accepted Name",
"Scientific Name",
"Canonical Name",
"Authorship",
"Kingdom",
"Phylum",
"Class",
"Order",
"Family",
"Genus",
"Species",
"Threat Status",
"URL",
"Search URL",
],
"enabled": source in ["ALL", "WFO"],
},
]
run_crawlers(spiders, scientific_names)
print("Done!")
except KeyboardInterrupt:
print("Stopped!")
sys.exit(0)