# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class BaiduSearchSpiderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class BaiduSearchSpiderDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
没有合适的资源?快使用搜索试试~ 我知道了~
用Scrapy框架进行百度搜索并爬取搜索结果进行持久化
共17个文件
py:8个
xml:5个
cfg:1个
1 下载量 56 浏览量
2023-12-21
16:43:50
上传
评论
收藏 14KB ZIP 举报
温馨提示
- 提供关键词后搜索结果: - 从搜索结果中提取 - 标题 - 链接 - 描述 - 来源 - 存为csv文件或者数据库 1. 设置爬取的网址(关键词为“python入门到放弃”,百度链接需要将中文转码) 2. 用css解析目标网页信息 3. settings文件设置请求头和pipeline、机器协议 4. pipeline持久化处理存为CSV文件 百度的很多搜索结果可以为我们的行业挣得信息差,并且统计数据后可以发现规律,根据规律寻找盈利点。所以我们先来试下小demo来尝试爬取百度的搜索结果。
资源推荐
资源详情
资源评论
收起资源包目录
baidu_search_spider.zip (17个子文件)
baidu_search_spider
baidu_search_spider
__init__.py 0B
pipelines.py 730B
spiders
__init__.py 161B
baidu_search_demo.py 2KB
__pycache__
items.py 407B
spider_main.py 129B
settings.py 3KB
__pycache__
baidu_search_result.csv 4KB
middlewares.py 4KB
scrapy.cfg 281B
.idea
workspace.xml 8KB
baidu_search_spider.iml 445B
misc.xml 310B
inspectionProfiles
Project_Default.xml 5KB
profiles_settings.xml 174B
modules.xml 297B
.gitignore 184B
共 17 条
- 1
资源评论
code_space
- 粉丝: 324
- 资源: 15
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功