十年网站开发经验 + 多家企业客户 + 靠谱的建站团队
量身定制 + 运营维护+专业推广+无忧售后,网站问题一站解决
本篇文章给大家分享的是有关利用python爬虫怎么对小说进行爬取,小编觉得挺实用的,因此分享给大家学习,希望大家阅读完这篇文章后可以有所收获,话不多说,跟着小编一起来看看吧。

代码
import requests
import time
from tqdm import tqdm
from bs4 import BeautifulSoup
"""
Author:
Jack Cui
Wechat:
https://mp.weixin.qq.com/s/OCWwRVDFNslIuKyiCVUoTA
"""
def get_content(target):
req = requests.get(url = target)
req.encoding = 'utf-8'
html = req.text
bf = BeautifulSoup(html, 'lxml')
texts = bf.find('div', id='content')
content = texts.text.strip().split('\xa0'*4)
return content
if __name__ == '__main__':
server = 'https://www.xsbiquge.com'
book_name = '诡秘之主.txt'
target = 'https://www.xsbiquge.com/15_15338/'
req = requests.get(url = target)
req.encoding = 'utf-8'
html = req.text
chapter_bs = BeautifulSoup(html, 'lxml')
chapters = chapter_bs.find('div', id='list')
chapters = chapters.find_all('a')
for chapter in tqdm(chapters):
chapter_name = chapter.string
url = server + chapter.get('href')
content = get_content(url)
with open(book_name, 'a', encoding='utf-8') as f:
f.write(chapter_name)
f.write('\n')
f.write('\n'.join(content))
f.write('\n')