1. 安装Anaconda:
2. 创建虚拟环境:
3. 安装pytorch:
4. 下载源码和安装依赖库:
5. 数据标注:
import os
import re
import sys
import urllib
import json
import socket
import urllib.request
import urllib.parse
import urllib.error
#
设置超时
from random import randint
import time
timeout = 5
socket.setdefaulttimeout(timeout)
class Crawler:
#
睡眠时⻓
__time_sleep = 0.1
__amount = 0
__start_amount = 0
__counter = 0
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
__per_page = 30
#
获取图⽚
url
内容等
# t
下载图⽚时间间隔
def __init__(self, t=0.1):
self.time_sleep = t
#
获取后缀名
@staticmethod
def get_suffix(name):
m = re.search(r'\.[^\.]*$', name)
if m.group(0) and len(m.group(0)) <= 5:
return m.group(0)
else:
return '.jpeg'
#
保存图⽚
def save_image(self, rsp_data, word):
if not os.path.exists("./" + word):
os.mkdir("./" + word)
#
判断名字是否重复,获取图⽚⻓度
self.__counter = len(os.listdir('./' + word)) + 1
for image_info in rsp_data['data']:
try:
if 'replaceUrl' not in image_info or len(image_info['replaceUrl']) < 1:
continue
obj_url = image_info['replaceUrl'][0]['ObjUrl']
thumb_url = image_info['thumbURL']
url = 'https://image.baidu.com/search/down?tn=download&ipn=dwnl&word=download&ie=utf8&fr=result&url=%s&thumburl=%s'
urllib.parse.quote(obj_url), urllib.parse.quote(thumb_url))
time.sleep(self.time_sleep)
suffix = self.get_suffix(obj_url)
#
指定
UA
和
referrer
,减少
403
opener = urllib.request.build_opener()
opener.addheaders = [
('User-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
]
urllib.request.install_opener(opener)
#
保存图⽚
filepath = './{}/PME_{}_A{}'.format(word, randint(
1000000, 500000000), str(self.__counter) + str(suffix))
for _ in range(5):
urllib.request.urlretrieve(url, filepath)
if os.path.getsize(filepath) >= 5:
break
if os.path.getsize(filepath) < 5:
print("下载到了空⽂件,跳过!")
os.unlink(filepath)
continue
except urllib.error.HTTPError as urllib_err:
print(urllib_err)
continue
except Exception as err:
time.sleep(1)
print(err)
print("产⽣未知错误,放弃保存")
continue
else:
print("图+1,已有" + str(self.__counter) + "张图")
self.__counter += 1
return
#
开始获取
def get_images(self, word):
search = urllib.parse.quote(word)
# pn int
图⽚数
pn = self.__start_amount
while pn < self.__amount:
url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%s&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=&latest=©right=&word=%s&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn=%s&rn=%d&gsm=1e&1594447993172='
search, search, str(pn), self.__per_page)
#
设置
header
防
403
try:
time.sleep(self.time_sleep)
req = urllib.request.Request(url=url, headers=self.headers)
page = urllib.request.urlopen(req)
rsp = page.read()
except UnicodeDecodeError as e:
print(e)
print('-----UnicodeDecodeErrorurl:', url)
except urllib.error.URLError as e:
print(e)
print("-----urlErrorurl:", url)
except socket.timeout as e:
print(e)
print("-----socket timout:", url)
else:
#
解析
json
try:
rsp_data = json.loads(rsp)
self.save_image(rsp_data, word)
#
读取下⼀⻚
print("下载下⼀⻚")
pn += 60
except Exception as e:
continue
finally:
page.close()
print("下载任务结束")
return
def start(self, word, total_page=2, start_page=1, per_page=30):
"""
爬⾍⼊⼝
:param word: 抓取的关键词
:param total_page: 需要抓取数据⻚数 总抓取图⽚数量为 ⻚数 x per_page
:param start_page:起始⻚码
:param per_page: 每⻚数量
:return:
"""
self.__per_page = per_page
self.__start_amount = (start_page - 1) * self.__per_page
self.__amount = total_page * self.__per_page + self.__start_amount
self.get_images(word)
if __name__ == '__main__':
crawler = Crawler(0.05) #
抓取延迟为
0.05
crawler.start('玩⼿机')