爬虫工作的基本原理就是,给定一个初始的url,下载这个url的网页,然后找出网页上所有满足下载要求的链接,然后把这些链接对应的url下载下来,然
后再找下载下来的这些网页的url,我们可以用广度优先搜索实现这个算法.
#!/usr/bin/python
import urllib2
import re
def downURL(url,filename):
print url
print filename
try:
fp = urllib2.urlopen(url)
except:
print 'download exception'
return 0
op = open(filename,"wb")
while 1:
s = fp.read()
if not s:
break
op.write(s)
fp.close()
fp.close()
return 1
#downURL('','http.log')
def getURL(url):
try:
fp = urllib2.urlopen(url)
except:
print 'get url exception'
return 0
pattern = re.compile("[^\>]+.shtml")
while 1:
s = fp.read()
if not s:
break
urls = pattern.findall(s)
fp.close()
return urls
def spider(startURL,times):
urls = []
urls.append(startURL)
i = 0
while 1:
if i > times:
break;
if len(urls)>0:
url = urls.pop(0)
print url,len(urls)
downURL(url,str(i)+'.htm')
i = i + 1
if len(urls) urllist = getURL(url)
for url in urllist:
if urls.count(url) == 0:
urls.append(url)
else:
break
return 1
spider('',10)
阅读(10211) | 评论(1) | 转发(2) |