网站开发设计公,网站建设的税收编码,长治网站制作怎么做,武当王也如果想要保存到excel中可以看我的这个爬虫
使用Scrapy 框架开启多进程爬取贝壳网数据保存到excel文件中#xff0c;包括分页数据、详情页数据#xff0c;新手保护期快来看#xff01;#xff01;仅供学习参考#xff0c;别乱搞_爬取贝壳成交数据c端用户登录-CSDN博客 最终… 如果想要保存到excel中可以看我的这个爬虫
使用Scrapy 框架开启多进程爬取贝壳网数据保存到excel文件中包括分页数据、详情页数据新手保护期快来看仅供学习参考别乱搞_爬取贝壳成交数据c端用户登录-CSDN博客 最终数据展示
QuotesSpider 爬虫程序
import scrapy
import refrom weibo_top.items import WeiboTopItemclass QuotesSpider(scrapy.Spider):name weibo_topallowed_domains [s.weibo.com]def start_requests(self):yield scrapy.Request(urlhttps://s.weibo.com/top/summary?caterealtimehot)def parse(self, response, **kwargs):trs response.css(#pl_top_realtimehot table tbody tr)count 0for tr in trs:if count 30: # 获取前3条数据break # 停止处理后续数据item WeiboTopItem()title tr.css(.td-02 a::text).get()link https://s.weibo.com/ tr.css(.td-02 a::attr(href)).get()item[title] titleitem[link] linkif link:count 1 # 增加计数器yield scrapy.Request(urllink, callbackself.parse_detail, meta{item: item})else:yield itemdef parse_detail(self, response, **kwargs):item response.meta[item]list_items response.css(div.card-wrap[action-typefeed_list_item])limit 0for li in list_items:if limit 1:break # 停止处理后续数据else:content li.xpath(.//p[classtxt]/text()).getall()processed_content [re.sub(r[^\u4e00-\u9fa5a-zA-Z0-9【】,], , text) for text in content]processed_content [text.strip() for text in processed_content if text.strip()]processed_content ,.join(processed_content).replace(【,,【)item[desc] processed_contentprint(processed_content)yield itemlimit 1 # 增加计数器item 定义数据结构
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.htmlimport scrapyclass WeiboTopItem(scrapy.Item):title scrapy.Field() # 名称link scrapy.Field() # 详情地址desc scrapy.Field() # descpass中间件 设置cookie\User-Agent\Host
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.htmlfrom scrapy import signals
from fake_useragent import UserAgent
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapterclass WeiboTopSpiderMiddleware:# Not all methods need to be defined. If a method is not defined,# scrapy acts as if the spider middleware does not modify the# passed objects.classmethoddef from_crawler(cls, crawler):# This method is used by Scrapy to create your spiders.s cls()crawler.signals.connect(s.spider_opened, signalsignals.spider_opened)return sdef process_spider_input(self, response, spider):# Called for each response that goes through the spider# middleware and into the spider.# Should return None or raise an exception.return Nonedef process_spider_output(self, response, result, spider):# Called with the results returned from the Spider, after# it has processed the response.# Must return an iterable of Request, or item objects.for i in result:yield idef process_spider_exception(self, response, exception, spider):# Called when a spider or process_spider_input() method# (from other spider middleware) raises an exception.# Should return either None or an iterable of Request or item objects.passdef process_start_requests(self, start_requests, spider):# Called with the start requests of the spider, and works# similarly to the process_spider_output() method, except# that it doesn’t have a response associated.# Must return only requests (not items).for r in start_requests:yield rdef spider_opened(self, spider):spider.logger.info(Spider opened: %s % spider.name)class WeiboTopDownloaderMiddleware:# Not all methods need to be defined. If a method is not defined,# scrapy acts as if the downloader middleware does not modify the# passed objects.def __init__(self):self.cookie_string SUB_2AkMS10-nf8NxqwFRmfoXyG3jaoxxygHEieKki758JRMxHRl-yT9vqhIrtRB6OVdhSYUGwRsrtuQyFPy_aLfaay7wguyu; SUBP0033WrSXqPxfM72-Ws9jqgMF55529P9D9WhBJpfihr9Mo_TDhk.fIHFo; _s_tentrywww.baidu.com; UORwww.baidu.com,s.weibo.com,www.baidu.com; Apache5259811159487.941.1709629772294; SINAGLOBAL5259811159487.941.1709629772294; ULV1709629772313:1:1:1:5259811159487.941.1709629772294:# self.referer https://sh.ke.com/chengjiao/classmethoddef from_crawler(cls, crawler):# This method is used by Scrapy to create your spiders.s cls()crawler.signals.connect(s.spider_opened, signalsignals.spider_opened)return sdef process_request(self, request, spider):cookie_dict self.get_cookie()request.cookies cookie_dictrequest.headers[User-Agent] UserAgent().randomrequest.headers[Host] s.weibo.com# request.headers[referer] self.refererreturn Nonedef get_cookie(self):cookie_dict {}for kv in self.cookie_string.split(;):k kv.split()[0]v kv.split()[1]cookie_dict[k] vreturn cookie_dictdef process_response(self, request, response, spider):# Called with the response returned from the downloader.# Must either;# - return a Response object# - return a Request object# - or raise IgnoreRequestreturn responsedef process_exception(self, request, exception, spider):# Called when a download handler or a process_request()# (from other downloader middleware) raises an exception.# Must either:# - return None: continue processing this exception# - return a Response object: stops process_exception() chain# - return a Request object: stops process_exception() chainpassdef spider_opened(self, spider):spider.logger.info(Spider opened: %s % spider.name)管道 数据保存到记事本
# Define your item pipelines here
#
# Dont forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html# useful for handling different item types with a single interface
from itemadapter import ItemAdapterclass WeiboTopPipeline:def __init__(self):self.items []def process_item(self, item, spider):# 将item添加到列表中self.items.append(item)print(\n\nitem,item)return itemdef close_spider(self, spider):# 打开文件将所有items写入文件with open(weibo_top_data.txt, w, encodingutf-8) as file:for item in self.items:title item.get(title, )desc item.get(desc, )output_string f{title}\n{desc}\n\nfile.write(output_string)
settings 配置多线程、延迟
# Scrapy settings for weibo_top project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME weibo_topSPIDER_MODULES [weibo_top.spiders]
NEWSPIDER_MODULE weibo_top.spiders# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT weibo_top (http://www.yourdomain.com)# Obey robots.txt rules
ROBOTSTXT_OBEY False# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS 8# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN 16
#CONCURRENT_REQUESTS_PER_IP 16# Disable cookies (enabled by default)
#COOKIES_ENABLED False# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED False# Override the default request headers:
#DEFAULT_REQUEST_HEADERS {
# Accept: text/html,application/xhtmlxml,application/xml;q0.9,*/*;q0.8,
# Accept-Language: en,
#}# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES {
# weibo_top.middlewares.WeiboTopSpiderMiddleware: 543,
#}# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES {weibo_top.middlewares.WeiboTopDownloaderMiddleware: 543,
}# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS {
# scrapy.extensions.telnet.TelnetConsole: None,
#}# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES {weibo_top.pipelines.WeiboTopPipeline: 300,
}# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED True
# The initial download delay
AUTOTHROTTLE_START_DELAY 80
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY 160
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG False# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED True
#HTTPCACHE_EXPIRATION_SECS 0
#HTTPCACHE_DIR httpcache
#HTTPCACHE_IGNORE_HTTP_CODES []
#HTTPCACHE_STORAGE scrapy.extensions.httpcache.FilesystemCacheStorage# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION 2.7
TWISTED_REACTOR twisted.internet.asyncioreactor.AsyncioSelectorReactor
FEED_EXPORT_ENCODING utf-8