import requests
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import re
# 将操作码保存为txt文件
def text_save(filename, data): # filename为写入txt文件的路径,data为要写入数据列表.
file = open(filename, 'w', encoding = 'utf-8')
for i in range(len(data)):
s = str(data[i]).replace('[', '').replace(']', '') # 去除[],这两行按数据不同,可以选择
s = str(data[i]).replace('(', '').replace(')', '')
s = s.replace("'", '').replace(',', '') + '\n' # 去除单引号,逗号,每行末尾追加换行符
file.write(s)
file.close()
print("保存文件成功")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'
}#爬虫[Requests设置请求头Headers],伪造浏览器
# 核心爬取代码
url = "http://www.fortunechina.com/fortune500/c/2020-08/10/content_372148.htm"
ret = Request(url,headers=headers)
html = urlopen(ret)
bs = BeautifulSoup(html,"html.parser")
tr = bs.find('tbody').find_all('tr')
listall=[]
for j in tr[0:]:
td = j.find_all('td')#td表格
rank = td[0].get_text().strip() #遍历排名
corporate_name = td[1].get_text().strip() #遍历公司名称
marketing_revenue = td[2].get_text().strip() #遍历营销收入
profit = td[3].get_text().strip() #遍历利润
country = td[4].get_text().strip() #遍历国家
list = "{0:<10}\t{1:<20}\t{2:<20}\t{3:<20}\t{4:<20}".format(rank, marketing_revenue, profit, country, corporate_name, chr(12288))
listall.append(list)
list = []
text_save('Wealth Rankings.txt', listall)
Python爬虫学习之爬取2020年《财富》世界500强排行榜写入txt文件
最新推荐文章于 2024-04-21 12:50:40 发布