# -*- coding: utf-8 -*-
import urllib2
from bs4 import BeautifulSoup
import os
import re
import time
import sys
##过滤HTML中的标签
#将HTML中标签等信息去掉
#@param htmlstr HTML字符串.
def filter_tags(htmlstr):
#先过滤CDATA
re_cdata=re.compile('//<!\[CDATA\[[^>]*//\]\]>',re.I) #匹配CDATA
re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>',re.I)#Script
re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>',re.I)#style
re_br=re.compile('<br\s*?/?>')#处理换行
re_h=re.compile('</?\w+[^>]*>')#HTML标签
re_comment=re.compile('<!--[^>]*-->')#HTML注释
s=re_cdata.sub('',htmlstr)#去掉CDATA
s=re_script.sub('',s) #去掉SCRIPT
s=re_style.sub('',s)#去掉style
s=re_br.sub('\n',s)#将br转换为换行
s=re_h.sub('',s) #去掉HTML 标签
s=re_comment.sub('',s)#去掉HTML注释
#去掉多余的空行
blank_line=re.compile('\n+')
s=blank_line.sub('\n',s)
s=replaceCharEntity(s)#替换实体
return s
##替换常用HTML字符实体.
#使用正常的字符替换HTML中特殊的字符实体.
#你可以添加新的实体字符到CHAR_ENTITIES中,处理更多HTML字符实体.
#@param htmlstr HTML字符串.
def replaceCharEntity(htmlstr):
CHAR_ENTITIES={'nbsp':' ','160':' ',
'lt':'<','60':'<',
'gt':'>','62':'>',
'amp':'&','38':'&',
'quot':'"','34':'"',}
re_charEntity=re.compile(r'&#?(?P<name>\w+);')
sz=re_charEntity.search(htmlstr)
while sz:
entity=sz.group()#entity全称,如>
key=sz.group('name')#去除&;后entity,如>为gt
try:
htmlstr=re_charEntity.sub(CHAR_ENTITIES[key],htmlstr,1)
sz=re_charEntity.search(htmlstr)
except KeyError:
#以空串代替
htmlstr=re_charEntity.sub('',htmlstr,1)
sz=re_charEntity.search(htmlstr)
return htmlstr
def repalce(s,re_exp,repl_string):
return re_exp.sub(repl_string,s)
pdf_url="http://wenhui.sumg.com.cn/images/"
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
source_url = "http://wenhui.sumg.com.cn/html/"+time.strftime("%Y-%m", time.localtime())+"/"+time.strftime("%d", time.localtime())+"/"+sys.argv[1]
print source_url
home_path = "/usr/crawler/wenhui/"
disk0 = "/usr/crawler/program/"
# 获取某个版面
date_dir=home_path+time.strftime("%Y%m%d", time.localtime())+"/"+sys.argv[2]
home_out = open(date_dir + "/"+sys.argv[1]+".txt", 'w')
print "-----------------------------------------------"
news_web_response = urllib2.urlopen(source_url)
news_readable_page = news_web_response.read()
news_soup = BeautifulSoup(news_readable_page, "lxml")
home_out.write(u"标题\n".encode('utf-8'))
title_soup0 = news_soup.find(name="h1", attrs={"class": "title1"})
home_out.write(filter_tags(title_soup0.prettify().encode('utf-8')))
title_soup = news_soup.find(name="h1", attrs={"class": "title"})
home_out.write(filter_tags(title_soup.prettify().encode('utf-8')))
title_soup2 = news_soup.find(name="h2", attrs={"class": "title2"})
home_out.write(filter_tags(title_soup2.prettify().encode('utf-8')))
home_out.write(u"正文\n".encode('utf-8'))
content_soup = news_soup.find(name="div", attrs={"class": "articleText textLR"})
home_out.write(filter_tags(content_soup.prettify().encode('utf-8')))
img_soup2 = news_soup.find(name="table", attrs={"id": "tableInfo"})
if img_soup2 is not None:
#home_out.write(img_soup2.prettify().encode('utf-8'))
image_soup=img_soup2.find_all(name="img")
if image_soup is not None:
home_out.write(u"\n\n插图URL\n".encode('utf-8'))
#home_out.write(image_soup.prettify().encode('utf-8'))
for image_line in image_soup:
image_link = image_line.get('src')
print image_link
image_name = image_link[image_link.rfind('/')+1:]
image_link=image_link.replace("../../../images/"+time.strftime("%Y-%m", time.localtime())+"/"+time.strftime("%d", time.localtime())+"/","")
home_out.write("\n"+image_link+"\n")
image_name = image_link[image_link.rfind('/')+1:]
image_dir=date_dir +"/"+sys.argv[1]
if not os.path.exists(image_dir):
os.makedirs(image_dir)
image = urllib2.urlopen(pdf_url+time.strftime("%Y-%m", time.localtime())+"/"+time.strftime("%d", time.localtime())+"/"+image_link).read()
with open(image_dir +"/"+ image_name, 'wb') as code:
code.write(image)
code.close()
home_out.close()
评论1
最新资源