37,743
社区成员
发帖
与我相关
我的任务
分享
#!/usr/bin/env python
# -*- coding:gb2312 -*-
"""
要实现的功能是 根据提供的目录网页(list)分析其子页面
将其子页面的链接保存在一个列表中,然后循环下载这些子页面
分析这些子页面中的图片,下载保存到本地硬盘上
"""
import os,sys,HTMLParser
import urllib,re
import httplib,urlparse
class main():
def __init__(self):
self.anatext = "111222" #保存截取出来的要分析的网页
def run(self):
global chdurllist
global picurllist
global urlstring
#先要求提供一个目录页地址:
parurl = raw_input("Please in the url of the website:")
#判断能否正常链接到该地址:
runn = self.httpExists(parurl)
#如果地址能够访问,进行解析,提取网页中的子网页地址
if runn == True:
from urlparse import urlparse
a = urlparse(parurl)
urlstring = a[0]+'://'+a[1]+'/'
"""
在这里控制整个程序的运行!
"""
parstartstr = "<DIV class=list>" #list页采集开始处的代码
parendstr = "<DIV class=page>" #list页采集结束处的代码
chdstartstr = "<DIV class=\"center margintop border clear main\">" #图片页采集开始处的代码
chdendstr = "</DIV></A></div>" #图片页采集结束处的代码
self.paranalyze(parurl,parstartstr,parendstr)
lar = parselinks()
lar.feed(self.anatext)#____问题出现在这里____提供的参数不符合规格!
print "1"
for url in chdurllist:
self.paranalyze(url,chdstartstr,chdendstr)
lar.feed(self.anatext)
self.downpic()
lar.close
else:
print "The url you input can not link!\nPlease input another url:"
def paranalyze(self,url,anastart,anaend):
#开始解析网页了!先获取网页内容
webpage = urllib.urlopen(url)
webtext = webpage.read()
#print len(webtext)
#开始解析网页内容
a = webtext.find(anastart)
b = webtext.find(anaend)
#print webtext[a:b]
ab = webtext[a:b]
self.anatext = ab.decode("cp936")
webpage.close()
#开始下载图片urlretrieve
def downpic(self):
global chdurllist
global picurllist
fildir = "c:\\123\\dmm\\"
a = len(picurllist[1])
filename = picurllist[1][a-4:]
i = 10
print '2'
if picurllist[0]!=picurllist[1]:
print '3'
for url in picurllist:
if httpExists(url):
print '4'
i += 1
urllib.urlretrieve(url,fildir+str(i)+filename)
else:
return False
else:
return False
def httpExists(self,url):
host , path = urlparse.urlsplit(url)[1:3]
isok = False
try:
conn = httplib.HTTPConnection(host)
conn.request("HEAD",path)
resp = conn.getresponse()
if resp.status == 200:
isok = True
else:
isok = False
except Exception, e:
print e.__class__, e, url
return isok
#提取网页中文字链接的方法
class parselinks(HTMLParser.HTMLParser):
def handle_starttag(self,tag,attrs):
global chdurllist
global picurllist
global urlstring
if tag == 'a':
for name,value in attrs:
if name == 'href':
#print value
chdurllist.append(urlstring+value)
if tag == 'img':
for name,value in attrs:
if name == 'src' :
#print value
picurllist.append(value)
if __name__ == '__main__':
chdurllist = [] #保存子页面链接地址的列表
picurllist = [] #保存从子页面获得的图片地址
urlstring = ""
"""
lParser = parselinks()
lParser.feed(urllib.urlopen("http://www.python.org/index.html").read())
lParser.close()
"""
app = main()
app.run()
#!/usr/bin/env python
#coding=utf-8
from BeautifulSoup import BeautifulSoup
import urllib2
page = urllib2.urlopen('http://www.baidu.com')
soup = BeautifulSoup(page)
for s in soup('a'):
b = str(s)
print b.decode('utf-8').encode("gb2312")