from bs4 import BeautifulSoup as bsp
import requests
import re
import csv
#从页面抓取相应的内容
i = 549
titles3 = []
while i > 538:
urls = "http://www.view.sdu.edu.cn/xyxw/" + str(i) + ".htm"
page_response = requests.get(urls, timeout = 5)
page_content = bsp(page_response.content,"html.parser")
for sublist in page_content.find_all(class_ = "sublist"):
titles = sublist.find_all("li")
titles3 += [title.text for title in titles]
i -= 1
titles4 = str(titles3)
titles5=titles4.replace(",","\n")
temp = []
time_t = []
#将抓取的文件存入.txt文件
with open("viewsdu5.txt", mode = "w", encoding = "utf-8") as f:
f.write(titles5)
f.close()
#对字符串进行处理,首先根据日期(正则表达式)分开
#第二部将字符串中的逗号变为回车,从而将内容分层
with open("viewsdu5.txt", mode = "r", encoding = "utf-8") as f:
for i in f.readlines():
str = (re.search(r"(\d{4}-\d{1,2}-\d{1,2})",i))
str1 = str.group(0)
time_t.append(str1)
titles6 = i.replace(str1, " ")
temp.append(titles6)
f.close()
#存入最终格式为.csv文件(表格),需要先将数据进行分组
with open("viewsdu5.txt", mode = "w", encoding = "utf-8") as f:
for i in temp:
f.write(i)
f.close()
headers = ["内容", "时间"]
rows = []
for (tem,tim) in zip(temp,time_t):
rows.append([tem,tim])
#将列表存入,该列表是n行2列的列表
with open("csv01.csv", "w", newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
for row in rows:
f_csv.writerow(row)
f.close()
#summary:1、先按照需求信息爬取网站内容
# 2、对所有的信息进行字符串处理,也就是所谓数据清洗的工作。
python爬虫小样+csv文件的存储
最新推荐文章于 2023-07-10 11:20:04 发布