视频1 视频21 视频41 视频61 视频文章1 视频文章21 视频文章41 视频文章61 推荐1 推荐3 推荐5 推荐7 推荐9 推荐11 推荐13 推荐15 推荐17 推荐19 推荐21 推荐23 推荐25 推荐27 推荐29 推荐31 推荐33 推荐35 推荐37 推荐39 推荐41 推荐43 推荐45 推荐47 推荐49 关键词1 关键词101 关键词201 关键词301 关键词401 关键词501 关键词601 关键词701 关键词801 关键词901 关键词1001 关键词1101 关键词1201 关键词1301 关键词1401 关键词1501 关键词1601 关键词1701 关键词1801 关键词1901 视频扩展1 视频扩展6 视频扩展11 视频扩展16 文章1 文章201 文章401 文章601 文章801 文章1001 资讯1 资讯501 资讯1001 资讯1501 标签1 标签501 标签1001 关键词1 关键词501 关键词1001 关键词1501 专题2001
python实现批量下载新浪博客的方法
2020-11-27 14:34:02 责编:小采
文档


本文实例讲述了python实现批量下载新浪博客的方法。分享给大家供大家参考。具体实现方法如下:

# coding=utf-8 
import urllib2
import sys, os
import re
import string
from BeautifulSoup import BeautifulSoup
def encode(s):
 return s.decode('utf-8').encode(sys.stdout.encoding, 'ignore')
def getHTML(url):
 #proxy_handler = urllib2.ProxyHandler({'http':'http://211.138.124.211:80'})
 #opener = urllib2.build_opener(proxy_handler)
 #urllib2.install_opener(opener)
 req = urllib2.Request(url)
 response = urllib2.urlopen(req, timeout=15)
 return BeautifulSoup(response, convertEntities=BeautifulSoup.HTML_ENTITIES)
def visible(element):
 '''抓取可见的文本元素'''
 if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
 return False
 elif re.match('', str(element)):
 return False
 elif element == u'xa0':
 return False
 return True
def delReturn(element):
 '''删除元素内的换行'''
 return re.sub('(?"|xa0]', '', filename)
def writeToFile(text, filename, dirname):
 if not os.path.exists(dirname):
 os.makedirs(dirname)
 print encode('保存到目录'), dirname
 filename = validFilename(filename)
 print encode('保存文章'), filename
 path = os.path.join(dirname, filename)
 if not os.path.exists(path):
 f = open(path, 'w')
 f.write(text)
 f.close()
 else:
 print filename, encode('已经存在')
def formatContent(url, title=''):
 '''格式化文章内容'''
 page = getHTML(url)
 content = page.find('div', {'class':'articalContent'})
 art_id = re.search('blog_(w+).html', url).group(1)
 blog_name = page.find('span', id='blognamespan').string
 if title == '':
 title = page.find('h2', id=re.compile('^t_')).string
 temp_data = filter(visible, content.findAll(text=True)) # 去掉不可见元素
 temp_data = ''.join(map(delReturn, temp_data)) # 删除元素内的换行符
 temp_data = temp_data.strip() # 删除文章首尾的空行
 temp_data = re.sub('
{2,}', '

', temp_data) # 删除文章内过多的空行
 # 
输出到文件 # 编码问题 temp_data = '本文地址:'.decode('utf-8') + url + ' ' + temp_data op_text = temp_data.encode('utf-8') op_file = title + '_' + art_id +'.txt' writeToFile(op_text, op_file, blog_name) def articlelist(url): articles = {} page = getHTML(url) pages = page.find('ul', {'class':'SG_pages'}).span.string page_num = int(re.search('(d+)', pages).group(1)) for i in range(1, page_num+1): print encode('生成第%d页文章索引'%i) if i != 1: url = re.sub('(_)d+(.html)$', 'g<1>'+str(i)+'g<2>', url) page = getHTML(url) article = page.findAll('span', {'class':'atc_title'}) for art in article: art_title = art.a['title'] art_href = art.a['href'] articles[art_title] = art_href return articles def blog_dld(articles): if not isinstance(articles, dict): return False print encode('开始下载文章') for art_title, art_href in articles.items(): formatContent(art_href, art_title) if __name__ == '__main__': sel = raw_input(encode('你要下载的是(1)全部文章还是(2)单篇文章,输入1或者2: ')) if sel == '1': #articlelist_url = 'http://blog.sina.com.cn/s/articlelist_1303481411_0_1.html' articlelist_url = raw_input(encode('请输入博客文章目录链接: ')) articles = articlelist(articlelist_url) blog_dld(articles) else: #article_url = 'http://blog.sina.com.cn/s/blog_4db18c430100gxc5.html' article_url = raw_input(encode('请输入博客文章链接: ')) formatContent(article_url)

希望本文所述对大家的Python程序设计有所帮助。

下载本文
显示全文
专题