'''
Created on Apr 23, 2010
@author: Leyond
'''
import urllib
from BeautifulSoup import BeautifulSoup
import re
def saveToFile(dir, htmlContent, title,url=""):
nFail = 0
dir +="/%s" % (url)
#print dir
while nFail < 1:
try:
myfile = open(dir, 'w')
myfile.write(""+str(title)+""+str(htmlContent)+"
")
myfile.close()
return
except:
nFail += 1
print "%s download Fail." % (title)
def findNextBlogHtml(user,htmlContent):
urls = re.findall(r"var.*pre.*?/blog/item/.*?html",htmlContent,re.I)
if(len(urls)==1):
blogUrl = re.findall(r"/blog/item/\w*.html",urls[0],re.I)
print blogUrl[0]
if(len(blogUrl[0])>17):
htmlAddr = blogUrl[0][11:]
#print htmlAddr
else:
htmlAddr ="None"
else:
htmlAddr ="None"
return htmlAddr
def getBlogContentAndTitle(user,htmlUrl):
blogUrl="" + user+"/blog/item/"+htmlUrl
sock = urllib.urlopen(blogUrl)
blogHtmlContent = sock.read()
sock.close()
htmlContent = unicode(blogHtmlContent,'gb2312','ignore').encode('utf-8','ignore')
# parser the html content
htmlsoup = BeautifulSoup(htmlContent)
blogContentBlock = htmlsoup.findAll("div",{"id":"m_blog"})
blogContentBlockZero = blogContentBlock[0].findAll("table",{"style":"table-layout:fixed;width:100%"})
#get the title
blogTitleZero = blogContentBlock[0].findAll("div",{"class":"tit"})
blogTitle = blogTitleZero[0].string
#get blog publish date
blogPublishDate = blogContentBlock[0].findAll("div",{"class":"date"})
blogDate = blogPublishDate[0].string
blogData =str(""+blogDate+"") + str(blogContentBlockZero[0])
return blogData,blogTitle,htmlContent
def backUpBlog(user,firstBlogUrl ):
#first read first blog
|