首先找到要下载的歌曲排行榜的链接,这里用的是:
https://music.163.com/discover/toplist?id=3778678
然后更改你要保存的目录,目录要先建立好文件夹,例如我的是保存在D盘-360下载-网易云热歌榜文件夹内,就可以完成下载。
如果文件夹没有提前建好,会报错[Errno 2] No such file or directory。
代码实现:
from urllib import request from bs4 import BeautifulSoup import re import requests import time class Music(object): def __init__(self, baseurl, path): head = { "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36" } self.baseurl = baseurl self.headers = head self.path = path def main(self): html = self.askurl() bs4 = self.analysis(html) name1 = self.matching(bs4) self.save(name1) def askurl(self): req = request.Request(url=self.baseurl, headers=self.headers) response = request.urlopen(req) html = response.read().decode("utf-8") return html def analysis(self, html): soup = BeautifulSoup(html, "html.parser") bs4 = soup.find_all("textarea") bs4 = str(bs4) return bs4 def matching(self, bs4): rule0 = re.compile(r'"name":"(.*?)","tns":[],"alias":[]') name0 = re.findall(rule0, bs4) str = "" for i in name0: str = str + "," + i str = str.replace("\xa0", " ") rule1 = re.compile(r'jpg,(.*?),(.*?)","id":(\d*)') name1 = re.findall(rule1, str) return name1 def save(self, name1): for j in name1: print("正在下载:" + j[1] + " - " + j[0] + "...") url = "http://music.163.com/song/media/outer/url?id=" + j[2] content = requests.get(url=url, headers=self.headers).content with open(self.path + j[1] + " - " + j[0] + ".mp3", "wb") as f: f.write(content) print(j[1] + " - " + j[0] + "下载完毕。\n") time.sleep(0.5) return if __name__ == "__main__": baseurl = "https://music.163.com/discover/toplist?id=3778678" # 要爬取的热歌榜链接 path = "D:/360下载/网易云热歌榜/" # 保存的文件目录 demo0 = Music(baseurl, path) demo0.main() print("下载完毕")
内容扩展:
Python3实战之爬虫抓取网易云音乐的热门评论
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import re import urllib.request import urllib.error import urllib.parse import json def get_all_hotSong(): #获取热歌榜所有歌曲名称和id url='http://music.163.com/discover/toplist?id=3778678' #网易云云音乐热歌榜url html=urllib.request.urlopen(url).read().decode('utf8') #打开url html=str(html) #转换成str pat1=r'' #进行第一次筛选的正则表达式 result=re.compile(pat1).findall(html) #用正则表达式进行筛选 result=result[0] #获取tuple的第一个元素 pat2=r'
以上就是python爬取网易云音乐热歌榜实例代码的详细内容,更多关于python爬取网易云音乐热歌榜的资料请关注其它相关文章!