# encoding=utf8
from baike_spider import url_manager, html_downloader, html_parser,\
html_outputer
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutPuter()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url(): #如果有未下载的url
try:
new_url = self.urls.get_new_url() #取一个未下载的url
print 'craw %d : %s'%(count,new_url)
html_cont = self.downloader.download(new_url) #由下载器下载对应的url
new_urls,new_data = self.parser.parse(new_url,html_cont) #由解析器解析得到urls和data
self.urls.add_new_urls(new_urls) #将新的urls加入url管理器
self.outputer.collect_data(new_data) #将新的数据收集
if count == 1000:
break
count = count + 1
except:
print 'craw failed'
self.outputer.output_html() #输出生成一个html文件
if __name__ == "__main__":
# root_url = "http://baike.baidu.com/view/21087.htm"
root_url = "http://baike.baidu.com/view/9936357.htm"
obj_spider = SpiderMain()
obj_spider.craw(root_url)