#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 2016-10-25
@author: xue365.taobao.com
"""
import url_manager, html_downloader, html_parser, html_outputer
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url):
"""
1.通过root_url抓取页面,返回new_urls和new_data
2.将new_urls时放入待抓取的集合。同时,将new_data放到outputer.datas里面
3.循环检查new_urls,抓取内容。
4.输出html结果
:param root_url:
:return:
"""
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print 'craw %d:%s' % (count, new_url)
html_cont = self.downloader.downloader(new_url)
# 拿到在new_url页面爬取到的新的url(列表),以及解析出来的指定数据(词条title和词条概要)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls) # 抓取到的new_urls,放入new_urls 集合
self.outputer.collect_data(new_data) # 同时,保存new_data
if count == 10:
break # 抓取10个资源之后,停止爬虫
count += 1
except:
print 'craw failed'
self.outputer.output_html()
if __name__ == "__main__":
root_url = "http://baike.baidu.com/view/21087.htm"
obj_spider = SpiderMain()
obj_spider.craw(root_url)