-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsidbi_finance.py
75 lines (66 loc) · 2.05 KB
/
sidbi_finance.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import urllib2
from bs4 import BeautifulSoup,Comment
import re
import json
class Scraper:
def webscraper(self):
url='http://sidbi.in/?q=finance-upgradation-modernisation#overlay-context=front-page%3Fq%3Dfront-page'
print 'scraping %s '%(url)
arr =[]
dict = {}
soup = BeautifulSoup( urllib2.urlopen(url).read() )
i=soup.find("div",{"class":"field-item even"} ).find_next("div",{"class":"field-item even"} ).find_next("ul")
for m in i.find_all("li"):
z=m.find("h4").get_text()
l="http://sidbi.in"+m.find("h4").find("a").get("href")
dict=return_dict(z,l)
arr.append(dict)
#print arr
return arr
#print i
#for m in
'''
for m in i.find_all("li"):
print "------------------------------------------------------------------------------------"
z=(m.find_next("p").get_text()).strip()
x= m.find_next("p").find_next("b").get_text()
x=' '.join([segment for segment in x.split()])
i=m.find_next("p").find_next("a").get("href")
if z=='':
z= m.find_next("div").get_text()
x= m.find_next("div").find_next("a").find_next("b").get_text()
x=' '.join([segment for segment in x.split()])
i=m.find_next("div").find_next("a").find_next("a").get("href")
if i[0]=="/":
i="http://dcmsme.gov.in"+i
z=' '.join([segment for segment in z.split()])
m=z.split()
m=m[:-2]
z=" ".join(m)
if 'Participation in the International Exhibitions/ Fairs ' in z:
z=removeAfter(z,'Participation in the International Exhibitions/ Fairs ')
m=z.split()
m=m[:-9]
#print " ".join(m)
dict=return_dict(z,x,i)
arr.append(dict)
return arr
'''
def removeAfter(string, suffix):
return string[:string.index(suffix) + len(suffix)]
def return_dict(title,url):
dict = {}
dict['title']=title
dict['url']=url
try:
print ' |||||| %s ||||||||| %s'%(title,url)
except:
pass
return dict
def main():
scrape = Scraper()
arr1 = scrape.webscraper()
with open("sidbi_finance.txt", "w") as f:
f.write(json.dumps(arr1))
if __name__ == '__main__':
main()