Python3爬虫爬取百度贴吧

要求:为了爬取贴吧中楼主所发表的帖子,并把内容提取出来保存到txt文件中。

注意:这份代码写的比较早,所以里面提取内容基本上用的全是正则表达式,并没有调用一些非常高级的包。下面直接附上代码:

# -*- coding:utf-8 -*-
import urllib.request
import urllib.parse
import re

class Tools:
    removeImg=re.compile('<img.*?>| {7}')
    removeAddr=re.compile('<a.*?>|</a>')
    replaceLine=re.compile('</div>|</p>')
    replaceTD=re.compile('<td>')
    replacePara=re.compile('<p.*>')
    replaceBR=re.compile('<br><br><br>')
    removeExtraTag=re.compile('<.*?>|(&lt;)+')
    def replace(self,x):
        x=re.sub(self.removeImg,'',x)        
        x=re.sub(self.removeAddr,'',x)
        x=re.sub(self.replaceLine,'\n',x)
        x=re.sub(self.replaceTD,'\t',x)
        x=re.sub(self.replacePara,'\n',x)
        x=re.sub(self.replaceBR,'\n',x)
        x=re.sub(self.removeExtraTag,'',x)
        return x.strip()

class BDTB:
    def __init__(self,baseUrl,seeLZ):
        self.baseURL=baseUrl
        self.seeLZ='?see_lz='+str(seeLZ)
        self.tool=Tools()
        
    def getPage(self,pageNum):
        try:
            url=self.baseURL+self.seeLZ+'&pn='+str(pageNum)
            request=urllib.request.Request(url)
            response=urllib.request.urlopen(request)
            return response
        except urllib.request.URLError as e:
            if hasattr(e,'reason'):
                print('连接百度贴吧失败,错误原因:'+e.reason)
                return None
            
    def getPageNum(self):
        page=self.getPage(1).read().decode('utf-8')
        pattern=re.compile('"total_page":\d+',re.S)
        result=re.findall(pattern,page)
        return result[0].split(':')[1]
    
    def getTitle(self):
        page=self.getPage(1).read().decode('utf-8')
        pattern=re.compile('title: ".*?"',re.S)
        result=re.findall(pattern,page)[0].split(':')[1]
        return result
        
    def getContent(self,page):
        pattern=re.compile(r'<div id="post_content_.*?>.*?</div>',re.S)
        items=re.findall(pattern,page)
        new_items=[]
        for item in items:
            new_items.append(self.tool.replace(item))
        return new_items
            
baseURL='http://tieba.baidu.com/p/4643889032'
bdtb=BDTB(baseURL,1)
with open('tb.txt','w') as teibatxt:
    title=bdtb.getTitle().strip(' ').strip('"')
    teibatxt.write(title+'\n\n\n')
    pageNum=bdtb.getPageNum()
    for page_num in range(int(pageNum)):
        page=bdtb.getPage(page_num+1).read().decode('utf-8')
        page_items=bdtb.getContent(page)
        for item in page_items:
            teibatxt.write(item+'\n')

猜你喜欢

转载自blog.csdn.net/yeshang_lady/article/details/82258546