爬取贴吧数据:
1.输入要爬取贴吧的名称(例如:海贼王)
2.输入起始页和终止页(1-2)
3.把每一页的内容保存到本地(例如1.html 2.html)
思路:
页数的规律
https://tieba.baidu.com/f?kw=%???&pn=0 第一页
https://tieba.baidu.com/f?kw=%???&pn=50 第二页
https://tieba.baidu.com/f?kw=%???&pn=100 第三页
pn = (page-1)*50
获取网面的内容(发起请求获取响应)
保存数据
# @ Time : 2021/2/21 16:55
# @ Author : Ellen
import urllib.request
import urllib.parse
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
}
# 主体架构
name = input('请输入贴吧的名字:')
begin = int(input('请输入起始页:'))
end = int(input('请输入结束页'))
kw = {
'kw': name}
kw = urllib.parse.urlencode(kw)
# 拼接url 发请求 获响应
for i in range(begin, end+1):
pn = (i-1)*50
# print(pn) https://tieba.baidu.com/f?kw=%???&pn=0
baseurl = 'https://tieba.baidu.com/f?'
url = baseurl + kw + '&pn=' + str(pn)
# print(url)
# 发起请求
req = urllib.request.Request(url, headers=headers)
res = urllib.request.urlopen(req)
html = res.read().decode('utf-8')
# 写入文件
filename = '第' + str(i) + '页.html'
with open(filename, 'w', encoding='utf-8') as f:
print('正在爬取第%d页'%i)
f.write(html)
通过函数方式:
# @ Time : 2021/2/21 17:47
# @ Author : Ellen
import urllib.request
import urllib.parse
# 读取页面
def readPage(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
}
# 发起请求
req = urllib.request.Request(url, headers=headers)
res = urllib.request.urlopen(req)
html = res.read().decode('utf-8')
return html
# 写入文件
def writePage(filename, html):
with open(filename, 'w', encoding='utf-8') as f:
f.write(html)
print('写入成功')
# 主函数
def main():
name = input('请输入贴吧的名字:')
begin = int(input('请输入起始页:'))
end = int(input('请输入结束页'))
kw = {
'kw': name}
kw = urllib.parse.urlencode(kw)
for i in range(begin, end + 1):
pn = (i - 1) * 50
# print(pn) https://tieba.baidu.com/f?kw=%???&pn=0
baseurl = 'https://tieba.baidu.com/f?'
url = baseurl + kw + '&pn=' + str(pn)
# 调用函数
html = readPage(url)
filename = '第' + str(i) + '页.html'
writePage(filename, html)
if __name__ == '__main__':
main()
面向对象方式
# @ Time : 2021/2/21 18:06
# @ Author : Ellen
import urllib.request
import urllib.parse
class BaiduSpider():
# 把常用的不变的放到init方法里面
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
}
self.baseurl = 'https://tieba.baidu.com/f?'
def readPage(self, url):
# 发起请求
req = urllib.request.Request(url, headers=self.headers)
res = urllib.request.urlopen(req)
html = res.read().decode('utf-8')
return html
def writePage(self, filename, html):
with open(filename, 'w', encoding='utf-8') as f:
f.write(html)
print('写入成功')
def main(self):
name = input('请输入贴吧的名字:')
begin = int(input('请输入起始页:'))
end = int(input('请输入结束页'))
kw = {
'kw': name}
kw = urllib.parse.urlencode(kw)
for i in range(begin, end + 1):
pn = (i - 1) * 50
# print(pn) https://tieba.baidu.com/f?kw=%???&pn=0
# baseurl = 'https://tieba.baidu.com/f?'
url = self.baseurl + kw + '&pn=' + str(pn)
# 调用函数
html = self.readPage(url)
filename = '第' + str(i) + '页.html'
self.writePage(filename, html)
if __name__ == '__main__':
# 我们要调用main()方法 就需要实例化类
spider = BaiduSpider()
spider.main()