初学爬虫-糗事百科爬虫

import requests
from lxml import etree

目标链接,请求头

url = 'https://www.qiushibaike.com/text/'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'
}

获取糗事标题,内容,url

def get_page_content():
	    url_page= 'https://www.qiushibaike.com/article/123448116'
	    response = requests.get(url_page,headers=headers)
	    response.encoding = 'utf-8'
	    result = etree.HTML(response.text)
	    content ={}
	    content['title'] = result.xpath('//*[@id="content"]/div/div[2]/h1/text()')
	    content['url'] = url_page
	    content['content'] = result.xpath('//*[@id="single-next-link"]/div/text()')[0].strip()
	    return content

获取每页中糗事的链接.

def get_url_list(url):
    response = requests.get(url,headers=headers)
    result = etree.HTML(response.text)
    urls = result.xpath('//div[@id="content"]/div/div[2]/div/a/@href')
    url_lists = []
    for url in urls:
        u = 'https://www.qiushibaike.com' + url
        url_lists.append(u)
    return url_lists
if __name__ == '__main__':
    # 段子总共有13页这里用for循环生成下翻页链接
    lists = []
    for i in range(1,14):

        url = "https://www.qiushibaike.com/text/page/{}/".format(i)
        lists += get_url_list(url)
    for url in lists:
        print(get_page_content())
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值