爬虫——Python爬百度学术论文的标题、摘要,并保存在本地

只能爬标题,部分摘要(链接页显示啥就是啥),也可以爬年份,来源,作者,下载链接(代码中我注释掉了)

import requests
from bs4 import BeautifulSoup
from urllib.request import quote

#"百度学术是用utf-8编码的,因而这里汉字以这个形式编码
search = input('请输入关键词:')
kwen = search.encode('utf-8') #将汉字,用utf格式编码,赋值给gbkkw
f = open('百度学术.txt','w',encoding ='utf-8') #创建txt格式文件,方便等会存储
#添加请求头,模拟浏览器正常访问,避免被反爬虫
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
for x in range(200): #想要爬取多少页数据
    url = 'http://xueshu.baidu.com/s?wd='+quote(kwen)+'&pn='+str(x*10)+'&tn=SE_baiduxueshu_c1gjeupa&ie=utf-8&sc_f_para=sc_tasktype%3D%7BfirstSimpleSearch%7D&sc_hit=1'
    res = requests.get(url,headers = headers)
    print(res.status_code) #查看是否能获取数据
    bs1 = BeautifulSoup(res.text, 'html.parser') #解析数据
    list_titles = bs1.find_all('div', class_="sc_content")
    for i in list_titles:
        title = i.find('h3', class_="t c_font").text  # 爬到标题
        print(title)
        f.write("题目:"+title.strip())
        f.write('\n')
        #获取文章跳转链接
        half_link = i.find('h3',class_="t c_font").find('a')['href']
        wholelink = 'http:'+str(half_link)
        re = requests.get(wholelink, headers=headers) #爬取该网站内容
        if re.status_code == 200:
            bs2 = BeautifulSoup(re.text, 'html.parser') #解析该网站内容
            infos = bs2.find('div',class_="main-info").find('div',class_="c_content")
            papers = bs2.find('div', class_="paper_src_content")
            #author = infos.find('div', class_="author_wr").find('p', class_="author_text").text.strip()
            #print(author + '\n')
            #f.write(author)
            if infos.find('div', class_="abstract_wr") != None:
                abstract = infos.find('div', class_="abstract_wr").find('p', class_="abstract").text.strip()
                print(abstract)
                f.write("摘要: "+abstract)
                f.write('\r\n')
                # if infos.find('div', class_="year_wr") != None:
                #     year = infos.find('div', class_="year_wr").find('p', class_="kw_main").text.strip()
                #     print(year)
                #     f.write('\n' + year)
                #     # if papers.find('div', class_="allversion_content") != None:
                #     #     list_dl = papers.find('div', class_="allversion_content").find_all('span', class_="dl_item_span")
                #     #     for z in list_dl:
                #     #         wb = z.text.strip()
                #     #         wblink = z.find('a')['href']
                #     #         print(wb + ':' + wblink + '\n')
                #     #         f.write('\n' + wb + ':' + wblink)
                #     #     f.write('\r\n')
                #     # else:
                #     #     print('该文章来源未知' + '\n')
                #     #     f.write('\n' + '该文章来源未知')
                #     #     f.write('\r\n')  # txt文件换行:不同的是光标的位置:\n在下一行开头,\r在本行的开头
                # else:
                #     print('该文章出版年份未知')
                #     f.write('该文章出版年份未知')
                #     if papers.find('div', class_="allversion_content") != None:
                #         list_dl = papers.find('div', class_="allversion_content").find_all('span', class_="dl_item_span")
                #         for z in list_dl:
                #             wb = z.text.strip()
                #             wblink = z.find('a')['href']
                #             print(wb + ':' + wblink + '\n')
                #             f.write('\n' + wb + ':' + wblink)
                #         f.write('\r\n')
            else:
                print('该文章无摘要, 详情请查看官网:' + wholelink + '\n')
                f.write('该文章无摘要, 详情请查看官网:' + wholelink)
                f.write('\r\n')
        else:
            print('该文章无链接')
            f.write('该文章无链接')

f.close()

代码来源如下:https://www.pythonf.cn/read/104963

我以中医药为关键字爬取了一部分信息如下
在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/zeshen123/article/details/109293563