思路
爬虫嘛,就是批量获取目标网站上内容。首先需要知道目标网站的url,尤其是需要获取目标网站里面子链接中的内容时,需要先批量获取所有子链接的url。
其次是从大量的信息中提取并整理自己想要的信息。
是不是很简单~~
工具
一般用Beautiful Soup 库,专门用来提取网页的数据,用作爬虫 很好用。
Beautiful Soup简介
官方文档
也可以直接用正则表达式提取。
案例
获取目标url
from bs4 import BeautifulSoup #Beautiful Soup是python的一个库,最主要的功能是从网页抓取数据
import requests
import time
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
} #好像还不能轻易更改为其他的 headers
for i in range(0, 1330, 35):
print(i)
time.sleep(2) #休息2秒,防止被识别为机器
url = 'https://music.163.com/discover/playlist/?cat=欧美&order=hot&limit=35&offset=' + str(i)
response = requests.get(url=url, headers=headers)
html = response.text #将网页源码转换为 文本
soup = BeautifulSoup(html, 'html.parser') #将文本格式转为与源码格式一致,
# 获取包含歌单详情页网址的标签
ids = soup.select('.dec a') #select是 CSS选择器,可以直接识别子标签, .dec为父标签 a为子标签
# 获取包含歌单索引页信息的标签
lis = soup.select('#m-pl-container li')
print(len(lis))
for j in range(len(lis)):
# 获取歌单详情页地址
url = ids[j]['href']
# 获取歌单标题
title = ids[j]['title']
# 获取歌单播放量
play = lis[j].select('.nb')[0].get_text()
# 获取歌单贡献者名字
user = lis[j].select('p')[1].select('a')[0].get_text()
# 输出歌单索引页信息
print(url, title, play, user)
# 将信息写入CSV文件中
with open('playlist.csv', 'a+', encoding='utf-8-sig') as f:
f.write(url + ',' + title + ',' + play + ',' + user + '\n')
获取想要的信息
from bs4 import BeautifulSoup
import pandas as pd
import requests
import time
df = pd.read_csv('playlist.csv', header=None, error_bad_lines=False, names=['url', 'title', 'play', 'user'])
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}
for i in df['url']:
time.sleep(2)
url = 'https://music.163.com' + i
response = requests.get(url=url, headers=headers)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
# 获取歌单标题
title = soup.select('h2')[0].get_text().replace(',', ',')
# 获取标签
tags = []
tags_message = soup.select('.u-tag i')
for p in tags_message:
tags.append(p.get_text())
# 对标签进行格式化
if len(tags) > 1:
tag = '-'.join(tags)
else:
tag = tags[0]
# 获取歌单介绍
if soup.select('#album-desc-more'):
text = soup.select('#album-desc-more')[0].get_text().replace('', '').replace(',', ',')
else:
text = '无'
# 获取歌单收藏量
collection = soup.select('#content-operation i')[1].get_text().replace('(', '').replace(')', '')
# 歌单播放量
play = soup.select('.s-fc6')[0].get_text()
# 歌单内歌曲数
songs = soup.select('#playlist-track-count')[0].get_text()
# 歌单评论数
comments = soup.select('#cnt_comment_count')[0].get_text()
# 输出歌单详情页信息
print(title, tag, text, collection, play, songs, comments)
# 将详情页信息写入CSV文件中
with open('music_message.csv', 'a+', encoding='utf-8-sig') as f:
f.write(title + ',' + tag + ',' + text + ',' + collection + ',' + play + ',' + songs + ',' + comments + '')
# 获取歌单内歌曲名称
li = soup.select('.f-hide li a')
for j in li:
with open('music_name.csv', 'a+', encoding='utf-8-sig') as f:
f.write(j.get_text() + '')