#-*- coding:utf-8 -*-
import urllib
import urllib2
# 1. 导入Python SSL处理模块
import ssl
import time
import sys
import random
import codecs
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf8')
def getUrl(url):
# 1. 表示忽略未经核实的SSL证书认证
context = ssl._create_unverified_context()
#url = ""
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
request = urllib2.Request(url, headers = headers)
# 1. 在urlopen()方法里 指明添加 context 参数
response = urllib2.urlopen(request, context = context)
data = response.read()
soup = BeautifulSoup(data, 'lxml')
for k in soup.table:
res = k.td.get_text()
if not res or res=="":
continue
f = codecs.open("all.txt", "a", "utf-8")
txt = unicode(res)
f.write(res+"\n")
f.close()
#print k.td.find_all( class_="suit")
for i in range(100000):
url = "/%d" % (i)
print url
try:
getUrl(url)
except Exception,e:
print e
sleepS = random.randint(1, 3)
time.sleep(sleepS)
python3中用下面这个更简单。还没一大堆的字符转换问题。
-
import ssl
-
import requests
-
import traceback
-
def req_http_get(url):
-
context = ssl._create_unverified_context()
-
num = 0
-
num_max = 3
-
data = ""
-
try:
-
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
-
while num < num_max:
-
req = requests.get(url=url, headers=headers)
-
data = req.text
-
if len(data) > 0:
-
break
-
num += 1
-
return data
-
-
except Exception as e:
-
traceback.print_exc()
-
return data
阅读(2296) | 评论(0) | 转发(0) |