BS4_个人博客整理

个人博客整理

源代码:

import requests
from bs4 import BeautifulSoup


def get_content(url):
    try:
        user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'
        response = requests.get(url,headers={'User-Agent':user_agent})
        response.raise_for_status()     # 如果返回的状态码不是200,则抛出异常
        response.encoding = response.apparent_encoding      # 根据响应信息判断网页的编码格式,便于response.text知道如何解码
    except Exception as e:
        print('爬取错误')
    else:
        print('爬取成功')
        return response.text

def parser_content(htmlContent):
    # 实例化soup对象
    soup = BeautifulSoup(htmlContent,'html.parser')
    # 获取每篇博客    <div class="article-item-box csdn-tracking-statistics" data-articleid="87380182">
    # 博客特征:div标签,class名称一致
    divObjs = soup.find_all('div',class_='article-item-box')

    # 依次遍历每个div标签,获取博客标题
    # <h4 class="">
    #     <a href="https://blog.csdn.net/King15229085063/article/details/87380182" target="_blank">
    #     <span class="article-type type-1">
    #         原        </span>
    #     socket应用场景      </a>
    # </h4>
    # 博客标题特征:h4标签里的a标签的内容
    for divObj in divObjs[1:]:      # 去掉默认的广告,保留个人博客内容
        # 获取博客标题,去掉原创或转载的信息,只需要博客的标题名称
        blogTitle = divObj.h4.a.get_text().split()[1]
        # 获取博客链接,即a标签中href对应的值
        blogUrl = divObj.h4.a.get('href')
        blogInfo.append((blogTitle,blogUrl))

if __name__ == '__main__':
    blogPage = 3
    # 全局变量,用于保存所有博客信息
    blogInfo = []
    for page in range(1,blogPage+1):
        url = 'https://blog.csdn.net/King15229085063/article/list/%s' %(page)
        content = get_content(url)
        parser_content(content)
        print('第%d页整理结束......' %(page))

    with open('doc/myblog.md','a') as f:
        for index,info in enumerate(blogInfo[::-1]):
            f.write('- 第%d篇博客:[%s](%s)\n' %(index+1,info[0],info[1]))
    print('博客整理完成')

运行结果:
在这里插入图片描述
爬取结果(https://blog.csdn.net/King15229085063/article/details/87779841 ):
在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/King15229085063/article/details/87779231
BS4