diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..02bfa5d --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 yhf + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 98b807f..6f96817 100644 --- a/README.md +++ b/README.md @@ -1,52 +1,76 @@ -# PythonCrawler: 用python编写的爬虫项目集合 -``` - ( - )\ ) ) ) ( ( -(()/( ( ( /( ( /( )\ ( ) ( ( )\ ( ( - /(_)))\ ) )\()))\()) ( ( (((_) )( ( /( )\))( ((_) ))\ )( -(_)) (()/( (_))/((_)\ )\ )\ ) )\___ (()\ )(_))((_)()\ _ /((_)(()\ -| _ \ )(_))| |_ | |(_) ((_) _(_/(((/ __| ((_)((_)_ _(()((_)| |(_)) ((_) -| _/| || || _|| ' \ / _ \| ' \))| (__ | '_|/ _` |\ V V /| |/ -_) | '_| -|_| \_, | \__||_||_|\___/|_||_| \___||_| \__,_| \_/\_/ |_|\___| |_| +```shell + ( + )\ ) ) ) ( ( + (()/( ( ( /( ( /( )\ ( ) ( ( )\ ( ( + /(_)))\ ) )\()))\()) ( ( (((_) )( ( /( )\))( ((_) ))\ )( + (_)) (()/( (_))/((_)\ )\ )\ ) )\___ (()\ )(_))((_)()\ _ /((_)(()\ + | _ \ )(_))| |_ | |(_) ((_) _(_/(((/ __| ((_)((_)_ _(()((_)| |(_)) ((_) + | _/| || || _|| ' \ / _ \| ' \))| (__ | '_|/ _` |\ V V /| |/ -_) | '_| + |_| \_, | \__||_||_|\___/|_||_| \___||_| \__,_| \_/\_/ |_|\___| |_| |__/ - —————— by yanghangfeng + —————— by yanghangfeng ``` - +#
PythonCrawler: 用 python编写的爬虫项目集合:bug:(本项目代码仅作为爬虫技术学习之用,学习者务必遵循中华人民共和国法律!)
+ ++ + + + + + + + + + + + + + + +
+ +# IPWO全球代理资源 | 为采集、跨境与测试项目提供支持(免费试用,爬虫使用强烈推荐!!!) +### 官网地址 +[👉 访问 IPWO 官网](https://www.ipwo.net/?code=WSESV2ONN) +### 产品简介 +* 免费试用,先体验再选择 +* 9000万+真实住宅IP,覆盖220+国家和地区 +* 支持动态住宅代理、静态住宅代理(ISP) +* 适用于数据抓取、电商、广告验证、SEO监控等场景 +* 支持HTTP/HTTPS/SOCKS5协议,兼容性强 +* 纯净IP池,实时更新,99.9%连接成功率 +* 支持指定国家城市地区访问,保护隐私 # spiderFile模块简介 -##### 1. [baidu_sy_img.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/baidu_sy_img.py): 抓取百度的‘高清摄影’图片。 - -##### 2. [baidu_wm_img.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/baidu_wm_img.py): 抓取百度图片‘唯美意境’模块。 - -##### 3. [get_photos.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/get_photos.py): 抓取百度贴吧某话题下的所有图片。 - -##### 4. [get_web_all_img.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/get_web_all_img.py): 抓取整个网站的图片。 - -##### 5. [lagou_position_spider.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/lagou_position_spider.py): 任意输入关键字,一键抓取与关键字相关的职位招聘信息,并保存到本地文件。 - -##### 6. [student_img.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/student_img.py): 基于本学校官网的url漏洞,获取所有注册学生学籍证件照。 - -##### 7. [JD_spider.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/JD_spider.py): 大批量抓取京东商品id和标签。 - -##### 8. [ECUT_pos_html.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/ECUT_pos_html.py): 抓取学校官网所有校园招聘信息,并保存为html格式,图片也会镶嵌在html中。 - -##### 9. [ECUT_get_grade.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/ECUT_get_grade.py): 模拟登陆学校官网,抓取成绩并计算平均学分绩。 - -##### 10. [github_hot.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/github_hot.py): 抓取github上面热门语言所对应的项目,并把项目简介和项目主页地址保存到本地文件。 - -##### 11.[xz_picture_spider.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/xz_picture_spider.py): 应一位知友的请求,抓取某网站上面所有的写真图片。 - -##### 12.[one_img.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/one_img.py): 抓取one文艺网站的图片。 - -##### 13.[get_baike.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/get_baike.py): 任意输入一个关键词抓取百度百科的介绍。 - -##### 14.[kantuSpider.py](https://github.com/Fenghuapiao/PythonCrawler/blob/master/spiderFile/kantuSpider.py): 抓取看图网站上的所有图片。 - +1. [baidu_sy_img.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/baidu_sy_img.py): **抓取百度的`高清摄影`图片。** +2. [baidu_wm_img.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/baidu_wm_img.py): **抓取百度图片`唯美意境`模块。** +3. [get_photos.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/get_photos.py): **抓取百度贴吧某话题下的所有图片。** +4. [get_web_all_img.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/get_web_all_img.py): **抓取整个网站的图片。** +5. [lagou_position_spider.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/lagou_position_spider.py): **任意输入关键字,一键抓取与关键字相关的职位招聘信息,并保存到本地文件。** +6. [student_img.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/student_img.py): **自动化获取自己学籍证件照。** +7. [JD_spider.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/JD_spider.py): **大批量抓取京东商品id和标签。** +8. [ECUT_pos_html.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/ECUT_pos_html.py): **抓取学校官网所有校园招聘信息,并保存为html格式,图片也会镶嵌在html中。** +9. [ECUT_get_grade.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/ECUT_get_grade.py): **模拟登陆学校官网,抓取成绩并计算平均学分绩。** +10. [github_hot.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/github_hot.py): **抓取github上面热门语言所对应的项目,并把项目简介和项目主页地址保存到本地文件。** +11. [xz_picture_spider.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/xz_picture_spider.py): **应一位知友的请求,抓取某网站上面所有的写真图片。** +12. [one_img.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/one_img.py): **抓取one文艺网站的图片。** +13. [get_baike.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/get_baike.py): **任意输入一个关键词抓取百度百科的介绍。** +14. [kantuSpider.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/kantuSpider.py): **抓取看图网站上的所有图片。** +15. [fuckCTF.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/fuckCTF.py): **通过selenium模拟登入合天网站,自动修改原始密码。** +16. [one_update.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/one_update.py): **更新抓取one文艺网站的代码,添加一句箴言的抓取。** +17. [get_history_weather.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/get_history_weather.py): **抓取广州市2019年第一季度的天气数据。** +18. [search_useful_camera_ip_address.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/search_useful_camera_ip_address.py): **摄像头弱密码安全科普。** +19. [get_top_sec_com.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/get_top_sec_com.py): **异步编程获取A股市场网络安全版块公司市值排名情况,并以图片格式保存下来。** +20. [get_tf_accident_info.py](https://github.com/yhangf/PythonCrawler/blob/master/spiderFile/get_tj_accident_info.py): **同步和异步编程结合获取天津市应急管理局所有事故信息。** --- # spiderAPI模块简介 + #### 本模块提供一些网站的API爬虫接口,功能可能不是很全因此可塑性很大智慧的你如果有兴趣可以继续改进。 + ##### 1.大众点评 + ```python from spiderAPI.dianping import * @@ -69,7 +93,7 @@ restaurantlist=restaurantList('http://www.dianping.com/search/category/2/10/p2') ``` ##### 2.获取代理IP -爬取http://proxy.ipcn.org,获取可用代理 +爬取[代理IP](http://proxy.ipcn.org) ```python from spiderAPI.proxyip import get_enableips @@ -78,7 +102,8 @@ enableips=get_enableips() ``` ##### 3.百度地图 -百度地图提供的API,对查询有一些限制,这里找出了web上查询的接口 + +百度地图提供的API,对查询有一些限制,这里找出了web上查询的接口。 ```python from spiderAPI.baidumap import * diff --git a/spiderFile/baidu_sy_img.py b/spiderFile/baidu_sy_img.py index faaf6e2..b663ea0 100644 --- a/spiderFile/baidu_sy_img.py +++ b/spiderFile/baidu_sy_img.py @@ -42,11 +42,11 @@ def get_img(page, headers): reg = re.compile('http://.*?\.jpg') imglist1 = re.findall(reg, page) imglist2 = imglist1[0: len(imglist1): 3] -# [img_url_list.append(i) for i in imglist if not i in img_url_list] + # [img_url_list.append(i) for i in imglist if not i in img_url_list] x = 0 for imgurl in imglist2: bin = requests.get(imgurl, headers=headers).content - with open('E:/Pic2/%s.jpg' % x, 'wb') as file: + with open('./%s.jpg' % x, 'wb') as file: file.write(bin) x += 1 diff --git a/spiderFile/fuckCTF.py b/spiderFile/fuckCTF.py new file mode 100644 index 0000000..76597f1 --- /dev/null +++ b/spiderFile/fuckCTF.py @@ -0,0 +1,142 @@ + +""" +author: 杨航锋 +date : 2018年8月19日 +mood : 嗯,比较无聊,甚至还有点想吃黄焖鸡米饭😋 +""" + + +import os +import random +import functools + +from PIL import Image +from selenium import webdriver + + +class fuckCTF: + + def __init__(self, username, old_password): + self.url = "http://hetianlab.com/" + self.login_url = "http://hetianlab.com/loginLab.do" + self.username = username + self.old_password = old_password + self.new_password = (yield_new_password(), "******")[0] + self.options = webdriver.FirefoxOptions() + self.options.add_argument("-headless") + self.browser = webdriver.Firefox(options=self.options) + print("init ok") + + def login_hetian(self): + self.browser.get(self.login_url) + self.browser.find_element_by_id("userEmail").clear() + self.browser.find_element_by_id("userEmail").send_keys(self.username) + self.browser.find_element_by_id("passwordIn").clear() + self.browser.find_element_by_id("passwordIn").send_keys(self.old_password) + self.browser.get_screenshot_as_file(self.username + '/' + "login.png") + self.browser.find_element_by_id("registButIn").click() + self.browser.get(self.url) + print("login_hetian running ok!") + + def get_personl_information_page(self): + grzx_btn = self.browser.find_element_by_xpath("/html/body/div[1]/div[1]/div/div/div[2]/ul/li[2]/a") + self.browser.execute_script("$(arguments[0]).click()", grzx_btn) + self.browser.get("http://hetianlab.com/getUserInfo.do") + print("get_personl_information_page running ok!") + + def get_password_setting_page(self): + mmsz_btn = self.browser.find_element_by_xpath("/html/body/div[2]/div/div[1]/ul/ul[3]/li[2]") + self.browser.execute_script("$(arguments[0]).click()", mmsz_btn) + self.browser.find_element_by_id("person").click() + self.browser.find_element_by_class_name("check") + print("get_password_setting_page running ok!") + + def setting_password(self): + self.browser.find_element_by_id("oldpwd").clear() + self.browser.find_element_by_id("oldpwd").send_keys(self.old_password) + self.browser.find_element_by_id("newpwd").clear() + self.browser.find_element_by_id("newpwd").send_keys(self.new_password) + self.browser.find_element_by_id("quepwd").clear() + self.browser.find_element_by_id("quepwd").send_keys(self.new_password) + print("setting_password running ok!") + + def get_v_code(self): + status = self.browser.get_screenshot_as_file(self.username + '/' + "v_code.png") + if status: + img = Image.open(self.username + '/' + "v_code.png") + img.show() + self.v_code = input("请输入验证码: ") + self.browser.find_element_by_class_name("code").send_keys(self.v_code) + else: + raise("截屏失败!") + print("get_v_code running ok!") + + def submit_data(self): + self.browser.find_element_by_id("submitbtn").click() + self.browser.get_screenshot_as_file(self.username + '/' + "result.png") + self.browser.quit() + print("submit_data running ok!") + + def make_portfolio(self): + if not os.path.exists(self.username): + os.makedirs(self.username) + print("make_portfolio running ok!") + + def save_success_data(self): + with open("./username_and_password_data_successed.log", "a+") as fp: + fp.write( + "username" + ": {}".format(self.username) + "\t" + "password" + ": {}".format(self.new_password) + + "\n" + ) + print("save_success_data running ok!") + + def save_failed_data(self): + with open("./username_and_password_data_failed.log", "a+") as fp: + fp.write( + "username" + ": {}".format(self.username) + "\n" + ) + print("save_failed_data running ok!") + + def main(self): + try: + self.make_portfolio() + self.login_hetian() + self.get_personl_information_page() + self.get_password_setting_page() + self.setting_password() + self.get_v_code() + self.submit_data() + self.save_success_data() + except: + self.save_failed_data() + + +def gen_decorator(gen): + @functools.wraps(gen) + def inner(*args, **kwargs): + return next(gen(*args, **kwargs)) + return inner + + +@gen_decorator +def yield_new_password(): + strings = list("abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()") + yield "".join(random.choices(strings, k=6)) + + +def yield_usernames(n): + prefix = "ctf2018_gzhu" + postfix = "@dh.com" + for num in range(n): + if num < 10: + infix = '0' + str(num) + else: + infix = str(num) + yield prefix + infix + postfix + + +if __name__ == "__main__": + for username in yield_usernames(100): + ctfer = fuckCTF(username, "******") + ctfer.main() diff --git a/spiderFile/get_history_weather.py b/spiderFile/get_history_weather.py new file mode 100644 index 0000000..77176fc --- /dev/null +++ b/spiderFile/get_history_weather.py @@ -0,0 +1,31 @@ +import re +import pandas as pd +import requests as rq +from bs4 import BeautifulSoup + + +def get_data(url): + html = rq.get(url).content.decode("gbk") + soup = BeautifulSoup(html, "html.parser") + tr_list = soup.find_all("tr") + dates, conditions, temperatures = [], [], [] + for data in tr_list[1:]: + sub_data = data.text.split() + dates.append(sub_data[0]) + conditions.append("".join(sub_data[1:3])) + temperatures.append("".join(sub_data[3:6])) + _data = pd.DataFrame() + _data["日期"] = dates + _data["天气状况"] = conditions + _data["气温"] = temperatures + return _data + +# 获取广州市2019年第一季度天气状况 +data_1_month = get_data("http://www.tianqihoubao.com/lishi/guangzhou/month/201901.html") +data_2_month = get_data("http://www.tianqihoubao.com/lishi/guangzhou/month/201902.html") +data_3_month = get_data("http://www.tianqihoubao.com/lishi/guangzhou/month/201903.html") + + +data = pd.concat([data_1_month, data_2_month, data_3_month]).reset_index(drop=True) + +data.to_csv("guangzhou_history_weather_data.csv", index=False, encoding="utf-8") diff --git a/spiderFile/get_tj_accident_info.py b/spiderFile/get_tj_accident_info.py new file mode 100644 index 0000000..b8b2237 --- /dev/null +++ b/spiderFile/get_tj_accident_info.py @@ -0,0 +1,77 @@ +import re +import joblib +import asyncio +import aiohttp +import requests as rq +from bs4 import BeautifulSoup + +def yield_all_page_url(root_url, page=51): + """生成所有的页面url + @param root_url: 首页url + type root_url: str + @param page: 爬取的页面个数 + type page: int + """ + # 观察网站翻页结构可知 + page_url_list = [f"{root_url}index_{i}.html" for i in range(1, page)] + # 添加首页url + page_url_list.insert(0, root_url) + return page_url_list + +async def get_info_page_url(url, session): + regex = re.compile("') + html = rq.get(url, headers=HEADERS).content.decode("utf-8") + soup = BeautifulSoup(html) + title = re.search(title_regex, html) + content_1 = soup.find("div", class_="TRS_UEDITOR TRS_WEB") + content_2 = soup.find("div", class_="view TRS_UEDITOR trs_paper_default trs_word") + content_3 = soup.find("div", class_="view TRS_UEDITOR trs_paper_default trs_web") + if content_1: + content = content_1.text + elif content_2: + content = content_2.text + elif content_3: + content = content_3.text + else: + content = "" + return {"title": title.groups()[0], "content": content} + +def get_all_data(all_info_page_url_list): + all_data = [] + for i, url in enumerate(all_info_page_url_list): + all_data.append(get_data(url)) + print(i, url, all_data[-1]) + joblib.dump(all_data, "all_data.joblib") + + +if __name__ == "__main__": + root_url = "http://yjgl.tj.gov.cn/ZWGK6939/SGXX3106/" + agent_part_1 = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " + agent_part_2 = "(KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36" + HEADERS = {"Host": "yjgl.tj.gov.cn", + "Connection": "keep-alive", + "User-Agent": agent_part_1 + agent_part_2, + "Referer": "http://static.bshare.cn/"} + page_url_list = yield_all_page_url(root_url, page=51) + all_info_page_url_list = asyncio.run(get_all_info_page_url(root_url, page_url_list)) + joblib.dump("all_info_page_url_list", all_info_page_url_list) diff --git a/spiderFile/get_top_sec_com.py b/spiderFile/get_top_sec_com.py new file mode 100644 index 0000000..f1fce0a --- /dev/null +++ b/spiderFile/get_top_sec_com.py @@ -0,0 +1,95 @@ +import re +import os +import time +import joblib +import asyncio +import aiohttp +import requests as rq + +import pandas as pd +import matplotlib.pyplot as plt +# import nest_asyncio +# nest_asyncio.apply() + +class getTopSecCom: + def __init__(self, top=None): + self.headers = {"Referer": "http://quote.eastmoney.com/", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36"} + self.bk_url = "http://71.push2.eastmoney.com/api/qt/clist/get?cb=jQuery1124034348162124675374_1612595298605&pn=1&pz=85&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&fid=f62&fs=b:BK0655&fields=f12,f14&_=1612595298611" + self.shares_api = "https://xueqiu.com/S/" + self.top = top + if not os.path.exists("./useful_sec_com_list"): + self.useful_sec_com_list = self.get_sec_com_code() + else: + with open("./useful_sec_com_list", "rb") as fp: + self.useful_sec_com_list = joblib.load(fp) + + def get_sec_com_code(self): + html = rq.get(self.bk_url, headers=self.headers).content.decode("utf-8") + sec_com_list = eval(re.findall("\[(.*?)\]", html)[0]) + useful_sec_com_list = [[i["f12"], i["f14"]] for i in sec_com_list if "ST" not in i["f14"]] + + # 0和3开头的为深证上市股票前缀为sz,6开头的为上证上市股票前缀为sh + for sec_com in useful_sec_com_list: + if sec_com[0][0] == "6": + sec_com[0] = "sh" + sec_com[0] + else: + sec_com[0] = "sz" + sec_com[0] + with open("useful_sec_com_list", "wb") as fp: + joblib.dump(useful_sec_com_list, fp) + return useful_sec_com_list + + async def async_get_shares_details(self, sec_com, url): + async with aiohttp.ClientSession() as session: + async with session.get(url, headers=self.headers) as response: + html = await response.text() + market_value = re.search(" 总市值:(.*?)亿", html) + if market_value: + return [*sec_com, market_value.groups()[0]] + + async def async_get_all_shares(self): + tasks = [] + for sec_com in self.useful_sec_com_list: + url = self.shares_api + sec_com[0] + tasks.append( + asyncio.create_task( + self.async_get_shares_details(sec_com, url) + ) + ) + done, pendding = await asyncio.wait(tasks) + return [share.result() for share in done if share.result()] + + def get_shares_details(self): + all_shares = [] + for sec_com in self.useful_sec_com_list: + url = self.shares_api + sec_com[0] + response = rq.get(url, headers=self.headers).content.decode("utf-8") + market_value = re.search(" 总市值:(.*?)亿", response) + if market_value: + all_shares.append([*sec_com, market_value.groups()[0]]) + return all_shares + + def yield_picture(self, save_path): + # all_shares = self.get_shares_details() # 同步代码 + all_shares = asyncio.run(self.async_get_all_shares()) # 异步代码 + df = pd.DataFrame(all_shares, columns=["股票代码", "公司", "市值(亿)"]) + df["市值(亿)"] = df["市值(亿)"].astype(float) + date = time.strftime("%Y年%m月%d日", time.localtime()) + df.sort_values(by="市值(亿)", ascending=False, inplace=True) + df.index = range(1, df.shape[0]+1) + + plt.rcParams['font.sans-serif'] = ['SimHei'] + plt.rcParams['axes.unicode_minus'] = False + + + fig = plt.figure(dpi=400) + ax = fig.add_subplot(111, frame_on=False) + ax.xaxis.set_visible(False) + ax.yaxis.set_visible(False) + _ = pd.plotting.table(ax, df, loc="best", cellLoc="center") + ax.set_title(f"{date}A股网安版块公司市值排名", fontsize=10) + plt.savefig(save_path, bbox_inches="tight") + +if __name__ == "__main__": + m = getTopSecCom() + m.yield_picture("rank.png") diff --git a/spiderFile/one_update.py b/spiderFile/one_update.py new file mode 100644 index 0000000..d457785 --- /dev/null +++ b/spiderFile/one_update.py @@ -0,0 +1,38 @@ +import re +import requests as rq + +ROOT_URL = "http://wufazhuce.com/one/" +URL_NUM = 14 + +def yield_url(ROOT_URL, URL_NUM): + return ROOT_URL + str(URL_NUM) + +def get_html(url): + return rq.get(url).content.decode("utf-8") + +def get_data(html): + img_url_regex = re.compile('') + cite_regex = re.compile('