爬虫_糗事百科 demo(1)

# 单线程v2 
import requests
from lxml import etree


# 爬取糗事百科

class QiubaiSpider:
    def __init__(self):
        self.temp_url = 'https://www.qiushibaike.com/8hr/page/{}/'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36 QQBrowser/4.4.108.400'

        }

    def getUrl_list(self):
        url_list = [self.temp_url.format(i) for i in range(1,14)]
        return url_list

    def parse_url(self,url):

        resp = requests.get(url,headers=self.headers)
        return resp.content.decode()


    def get_content_list(self, html_str):

        html = etree.HTML(html_str)

        div_list = html.xpath('//div[@id="content-left"]/div')
        content_list =[]

        for div in div_list:
            item = {}
            text = div.xpath('.//div[@class="content"]/span/text()')
            author = div.xpath('.//h2/text()')
            # print(text)
            item['author'] = author
            item['text'] = text
            # item['text'] = [i for i in div.xpath('.//div[@class="content"]/span/text()')]
            content_list.append(item)

        return content_list

    def save_content_list(self,content_list):
        for content in content_list:
            print(content)



    def run(self):
        url_list = self.getUrl_list()
        for url in url_list:
            print(url)
            html_str = self.parse_url(url)

            content_list= self.get_content_list(html_str)
            self.save_content_list(content_list)




        print('爬取完成。。。')


if __name__ == '__main__':
    qiubai = QiubaiSpider()
    qiubai.run()

#author: Zheng 
#time: 2018/7/11 09:02
# 爬取糗事百科
import requests
from lxml import etree

class Qiubai(object):
    def __init__(self):
        self.tem_url = "https://www.qiushibaike.com/8hr/page/{}/"
        self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}

    def get_url_list(self):
        return [self.tem_url.format(i) for i in range(1, 14)]

    def parse_url(self,url): #发送请求,获取响应
        print(url)
        return requests.get(url,headers=self.headers).content.decode()

    @staticmethod
    def save_content(content_list):
        for text in content_list:
            f = open('qiubai.txt', 'a')
            f.writelines(text)

    def get_content(self,html):
        html = etree.HTML(html)
        div_list = html.xpath("//div[@id='content-left']/div")

        self.content_list = []
        for div in div_list:
            content = {}
            content['content'] = div.xpath(".//div[@class='content']/span/text()")

            for text in content.values():
                self.content_list.append(text)

        return self.content_list

    def run(self):
        url_list = self.get_url_list()

        for url in url_list:

            html = self.parse_url(url)

            content_list = self.get_content(html)

            self.save_content(content_list)

if __name__ == '__main__':
    qiubai = Qiubai()
    qiubai.run()

猜你喜欢

转载自blog.csdn.net/zzw19951261/article/details/80996551
今日推荐