6,230
社区成员




import urllib.request
import urllib.response
import urllib.parse
from lxml import etree
beacon = urllib.parse.quote('蔚蓝档案')
def create_request(page):
if(page == 1):
# url = 'https://sc.chinaz.com/tupian/dongman.html'
url = 'https://www.vilipix.com/tags/'+'beacon'+'/illusts'
else:
# url = 'https://sc.chinaz.com/tupian/dongman_' + str(page)+'.html'
url = 'https://www.vilipix.com/tags/'+'beacon'+'/illusts?p='+'str(page)'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
}
request = urllib.request.Request(url=url,headers=headers)
return request
def get_content(request):
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
return content
def down_load(content):
# 下载图片
# urllib.request.urlretrieve('图片地址','文件名字')
tree = etree.HTML(content)
# name_list = tree.xpath('//div/img[@class="lazy"]/@alt')
name_list = tree.xpath('//div/img[@class="el-image__inner"]/@alt')
# src_list = tree.xpath('//div/img[@class="lazy"]/@data-original')
src_list = tree.xpath('//div/img[@class="el-image__inner"]/@src')
for i in range(len(name_list)):
name = name_list[i]
src = src_list[i]
# url = 'https:' + src
# url = src
print(name,src)
# print(len(name_list),len(src_list))
urllib.request.urlretrieve(url=src ,filename='./站长素材--cartoon/' + name + '.jpg')
if __name__ == '__main__':
start_page = int(input('请输入起始页码'))
end_page = int(input('请输入结束页码'))
for page in range(start_page,end_page+1):
# (1) 请求对象的定制
request = create_request(page)
# (2) 获取网页的源码
content = get_content(request)
# (3) 下载
down_load(content)
救救孩子吧,孩子的代码在那个网站一直爬不出