python爬虫爬取虎扑步行街爆照区图片

开始动手的时候毫无头绪,主要借鉴了以下帖子:

https://blog.csdn.net/sinat_28797501/article/details/70195443

但是后来发现代码中还是有一些不太妥当之处,不知是因为网站更新换代、结构改变,还是代码或环境有所差异,修修补补之后,还是完成了图片的爬取。(ps:原贴做的还是很好的哈哈哈膜一波)

主要修改的几个地方:

原帖中图片存储时,格式出现了一点问题,可能是作者手误(gif图片无法保存为gif格式,只能为jepg)或者有其他考虑;

可能存在网站更新换代的情况,在收集帖子url的时候发现不存在原帖中的<tr>?跑到hupu爆照区重新看了一眼网页代码发现已经没这个结构了(原贴貌似用的是table),换成<ul>和<a>了(tagtable和tagstr);

环境不同,作者似乎用的是python2,我的是anaconda3,用到的urllib包版本也不同,作者是urllib2;

调试期间的一些小改动,为找bug,不再赘述(比如为了省事注释掉写文本文件的部分-_-!)。

之后爬了爆照区和什么图都有区,效果嘛,如下~

代码如下:


import urllib
import urllib3
from bs4 import BeautifulSoup
import logging


class Item(object):
    title = None  # 帖子标题
    firstAuthor = None  # 帖子原作者
    firstTime = None  # 帖子创建时间
    reNum = None  # 帖子回复浏览数量
    LastTime = None  # 帖子最后回复时间
    LastAuthor = None  # 帖子最后回复作者
    link = None  # 帖子链接


# 全局方法获取网页内容
def getResponseContent(url):
    try:
        response = urllib.request.urlopen(url,timeout = 20)
        htext = response.read()
    except:
        logging.error(u'Python返回URL:{}数据失败'.format(url))
        # print(url)
        return None
    else:
        logging.info(u'Python返回URL:{}数据成功'.format(url))
        return htext


class getHupuInfo(object):
    def __init__(self, url):
        self.url = url
        self.pageSum = 100  # 帖子页数做多100页
        self.urls = self.getUrls(self.pageSum)
        self.items = self.spider(self.urls)
        self.pipelines(self.items)

    def getUrls(self, pageSum):
        urls = []
        urls.append(self.url)
        for pn in range(1, pageSum):
            tempurl = self.url + '-' + str(pn + 1)
            urls.append(tempurl)
        logging.info(u'获取URLS成功!\n')
        return urls

    def spider(self, urls):
        items = []
        for url in urls:
            htmlContent = getResponseContent(url)
            # print(url)

            if htmlContent != None:
                soup = BeautifulSoup(htmlContent, 'lxml')
                tagtable = soup.find('ul','for-list')
                if tagtable == None:
                    continue
                # tagtable = _tagtable.find_all('a')
                # print(tagtable)
                tagstr = tagtable.find_all('a','truetit')
                # print("tagggggggggggggg   ")
                # print(tagstr)

                flag = 0  # 跳过标题栏
                for tag in tagstr:
                    # print('taaaag ')
                    # print(tag)
                    if flag == 0:
                        flag += 1
                        continue
                    else:
                        flag += 1
                        item = Item()
                        item.link = tag.get('href')  # 直接抓取href属性作为帖子链接
                        # item.title = tag.find('td', attrs={'class': 'p_title'}).find('a',
                        #                                                          href=item.link).get_text()  # 通过item.link来确定标题
                        # item.firstAuthor = tag.find('td', attrs={'class': 'p_author'}).a.get_text()
                        # item.firstTime = tag.find('td', attrs={'class': 'p_author'}).get_text()
                        # item.reNum = tag.find('td', attrs={'class': 'p_re'}).get_text()
                        # item.LastAuthor = tag.find('td', attrs={'class': 'p_retime'}).a.get_text()
                        # item.LastTime = tag.find('td', attrs={'class': 'p_retime'}).get_text()
                        items.append(item)
        logging.info(u'获取帖子成功')
        return items

    def pipelines(self, items):
        # i = 0
        fileName = u'Hupu_bxj.txt'
        with open(fileName, 'w') as fp:
            for item in items:
                ss = 0
                # i = i + 1
                # fp.write('{}\t{}\t{}\t{}\t{}\t{}\n{}\n\n'.format(item.title,item.firstAuthor,item.firstTime,item.reNum,item.LastAuthor,item.LastTime,item.link))
                # fp.write('i = '+str(i)+'\n')  # 为了生词词云,这里只提取了题目
        logging.info(u'写入文本成功')

    def getpiclink(self):
        piclink = []
        for item in self.items:
            piclink.append(self.url[0:20] + item.link)
        logging.info(u'返回图片帖子链接成功')
        return piclink


class picInfo(object):
    def __init__(self, links):
        self.links = links
        self.imgurls = []
        self.spider()
        self.pipeline()

    def spider(self):

        if self.links == None:
            logging.error('无图片链接')
        else:
            for link in self.links:
                htmlContent = getResponseContent(link)
                if htmlContent == None:
                    continue
                soup = BeautifulSoup(htmlContent, 'lxml')
                tagDiv = soup.find('div', attrs={'id': 'tpc'})
                img = tagDiv.find('div', attrs={'class': 'quote-content'}).find_all('img')
                if img == None:
                    continue
                else:
                    for subimg in img:
                        # 解决图片未加载的情况
                        if subimg.get('data-original') == None:
                            imgurl = subimg.get('src')
                        else:
                            imgurl = subimg.get('data-original')
                        self.imgurls.append(imgurl)
        logging.info(u'获取图片链接成功')

    def pipeline(self):

        for i in range(len(self.imgurls)):
            # 根据链接后缀确定图片类型
            if self.imgurls[i][-3:] == 'png':
                imgname = str(i) + '.png'
            elif self.imgurls[i][-3:] == 'jpg':
                imgname = str(i) + '.jpg'
            elif self.imgurls[i][-4:] == 'jpeg':
                imgname = str(i) + '.jpeg'
            elif self.imgurls[i][-3:] == 'gif':
                imgname = str(i) + '.gif'
            elif self.imgurls[i][-4:] == 'webp':
                imgname = str(i) + '.webp'
            img = getResponseContent(self.imgurls[i])

            with open('file_with_10pages/'+imgname, 'ab') as fp:
                if img == None:
                    continue
                fp.write(img)
        logging.info(u'写入图片成功')


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    url = u'https://bbs.hupu.com/4846'
    HUPU = getHupuInfo(url)
    picurls = HUPU.getpiclink()
    PIC = picInfo(picurls)

本人是个菜鸡,欢迎各位提出改进意见哈~

猜你喜欢

转载自blog.csdn.net/qq_35014850/article/details/81231325