Python通过解析网页实现看报程序的方法,python看报
Python通过解析网页实现看报程序的方法,python看报
本文所述实例可以实现基于Python的查看图片报纸《参考消息》并将当天的图片报纸自动下载到本地供查看的功能,具体实现代码如下:
# coding=gbk import urllib2 import socket import re import time import os # timeout in seconds #timeout = 10 #socket.setdefaulttimeout(timeout) timeout = 10 urllib2.socket.setdefaulttimeout(timeout) home_url = "http://www.hqck.net" home_page = "" try: home_page_context = urllib2.urlopen(home_url) home_page = home_page_context.read() print "Read home page finishd." print "-------------------------------------------------" except urllib2.URLError,e: print e.code exit() except: print e.code exit() reg_str = r'<a class="item-baozhi" href="/arc/jwbt/ckxx/\d{4}/\d{4}/\w+\.html" rel="external nofollow" ><span class.+>.+</span></a>' news_url_reg = re.compile(reg_str) today_cankao_news = news_url_reg.findall(home_page) if len(today_cankao_news) == 0: print "Cannot find today's news!" exit() my_news = today_cankao_news[0] print "Latest news link = " + my_news print url_s = my_news.find("/arc/") url_e = my_news.find(".html") url_e = url_e + 5 print "Link index = [" + str(url_s) + "," + str(url_e) + "]" my_news = my_news[url_s:url_e] print "part url = " + my_news full_news_url = home_url + my_news print "full url = " + full_news_url print image_folder = "E:\\new_folder\\" if (os.path.exists(image_folder) == False): os.makedirs(image_folder) today_num = time.strftime('%Y-%m-%d',time.localtime(time.time())) image_folder = image_folder + today_num + "\\" if (os.path.exists(image_folder) == False): os.makedirs(image_folder) print "News image folder = " + image_folder print context_uri = full_news_url[0:-5] first_page_url = context_uri + ".html" try: first_page_context = urllib2.urlopen(first_page_url) first_page = first_page_context.read() except urllib2.HTTPError, e: print e.code exit() tot_page_index = first_page.find("共") tot_page_index = tot_page_index tmp_str = first_page[tot_page_index:tot_page_index+10] end_s = tmp_str.find("页") page_num = tmp_str[2:end_s] print page_num page_count = int(page_num) print "Total " + page_num + " pages:" print page_index = 1 download_suc = True while page_index <= page_count: page_url = context_uri if page_index > 1: page_url = page_url + "_" + str(page_index) page_url = page_url + ".html" print "News page link = " + page_url try: news_img_page_context = urllib2.urlopen(page_url) except urllib2.URLError,e: print e.reason download_suc = False break news_img_page = news_img_page_context.read() #f = open("e:\\page.html", "w") #f.write(news_img_page) #f.close() reg_str = r'http://image\S+jpg' image_reg = re.compile(reg_str) image_results = image_reg.findall(news_img_page) if len(image_results) == 0: print "Cannot find news page" + str(page_index) + "!" download_suc = False break image_url = image_results[0] print "News image url = " + image_url news_image_context = urllib2.urlopen(image_url) image_name = image_folder + "page_" + str(page_index) + ".jpg" imgf = open(image_name, 'wb') print "Getting image..." try: while True: date = news_image_context.read(1024*10) if not date: break imgf.write(date) imgf.close() except: download_suc = False print "Save image " + str(page_index) + " failed!" print "Unexpected error: " + sys.exc_info()[0] + sys.exc_info()[1] else: print "Save image " + str(page_index) + " succeed!" print page_index = page_index + 1 if download_suc == True: print "News download succeed! Path = \"" + str(image_folder) + "\"" print "Enjoy it! ^^" else: print "news download failed!"
你可以用用现成的python模板:beautifulsoup。
或者最起码你得了解Python的正则,然后自己去用正则解析网页。
下面这个程序是抓取网页的一个例子,MyOpener类是为了模拟浏览器客户端,并采用随机选取的方式以防网站将你认为是机器人。
MyFunc函数抓取你指定的url,并提取了其中的href链接,图片的获取类似,一般是<img src=xxx>这样的形式,其他的功能应该也不难,去网上搜下应该有些例子。
import re
from urllib import FancyURLopener
from random import choice
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
]
class MyOpener(FancyURLopener, object):
version = choice(user_agents)
def MyFunc(url):
myopener = MyOpener()
s = myopener.open(url).read()
ss=s.replace("\n"," ")
urls=re.findall(r"<a.*?href=.*?<\/a>",ss,re.I)#寻找href链接
for i in urls:
do sth.
相关内容
- Python生成pdf文件的方法,
- 基于Python实现的扫雷游戏实例代码,python扫雷
- Python代码的打包与发布详解,python代码详解
- Python使用PyGreSQL操作PostgreSQL数据库教程,pygresqlpostgre
- python新手经常遇到的17个错误分析,python新手遇到17
- Python中apply函数的用法实例教程,pythonapply
- Python中zip()函数用法实例教程,python实例教程
- python处理文本文件实现生成指定格式文件的方法,
- Python中关键字is与==的区别简述,python简述
- 用python删除java文件头上版权信息的方法,pythonjava
评论关闭