爬取【王琳杰-博客园】的博文

  1. 获取页面信息,用XPath  做数据提取

  2. 获取每个blog里的用标题、正文、阅读次数信息

  3. 保存到 json 文件内

# -*- coding:utf-8 -*-

import urllib
import urllib2
from lxml import etree

def loadPage(url):
    """
        作用:根据url发送请求,获取服务器响应文件
        url: 需要爬取的url地址
    """
    #print url
    #headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}

    request = urllib2.Request(url)
    html = urllib2.urlopen(request).read()
    # 解析HTML文档为HTML DOM模型
    content = etree.HTML(html)
    #print content
    # 返回所有匹配成功的列表集合
    link_list = content.xpath('//div[@class="postTitle"]/a/@href')
    for link in link_list:
        
        #print link
        loadpage(link)

# 取出每个文章的链接
def loadpage(link):
    headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
    request = urllib2.Request(link, headers = headers)
    html = urllib2.urlopen(request).read()
    # 解析
    content = etree.HTML(html)
    # 取出每层发送的文章链接集合
    link_list = content.xpath('//div[@class="post"]//a')[0].text
    print link_list
    # 取出每个标题,正文,阅读的连接
    #for link in link_list:
    #   print link
    #   write(link)

def blogSpider(url, beginPage, endPage):
    """
        作用:贴吧爬虫调度器,负责组合处理每个页面的url
        url : 贴吧url的前部分
        beginPage : 起始页
        endPage : 结束页
    """
    for page in range(beginPage, endPage + 1):
        pn = page
        fullurl = url + str(pn)
        #print fullurl
        loadPage(fullurl)
        #print html

        print "谢谢使用"

if __name__ == "__main__":
    beginPage = int(raw_input("请输入起始页:"))
    endPage = int(raw_input("请输入结束页:"))

    url = "http://www.cnblogs.com/wanglinjie/default.html?page="
    blogSpider(url, beginPage, endPage)

 待完。。。

猜你喜欢

转载自www.cnblogs.com/wanglinjie/p/9194068.html