爬虫代码问题查询

lyw2073327 2020-11-28 01:25:25
想通过爬虫获取这个店铺所有商品的名称、单价和销量,然后写了如下代码,可一直报错,请问能帮忙看看是哪里出问题了吗,谢谢

import requests
import re


def getHtmlText(url):
try:
head_new = {
'authority': 'me-too1980.taobao.com',
'method': 'GET',
'path': '/i/asynSearch.htm?_ksTS=1606466671243_136&callback=jsonp137&mid=w-22507069265-0&wid=22507069265&path=/search.htm&search=y&spm=a1z10.1-c-s.0.0.2ba78a4bjfYUkQ',
'scheme': 'https',
'accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'cookie': 'cna=L3qgF0z1fzYCAXOudfa9Bs9A; t=b0ff490425c6e9789cd7b1f4355d2c82; sgcookie=E100Vt88QJkcmkvNRBieAvzEL8KntYgf5606%2B7MJneqtizObXIp%2BBN4j6osTugMSg1CojdzEJ'
'aA4Rv7wBC5jfaY96g%3D%3D; uc3=nk2=D9rlm5djaUbfsA%3D%3D&vt3=F8dCufwnjBZ3KgJvp3g%3D&lg2=VT5L2FSpMGV7TQ%3D%3D&id2=VyyX76rnQ%2BA%3D; lgc=lyw2073327; uc4=nk4=0'
'%40DfmlAp2lINszlDOyHRpgmvZNLbTe&id4=0%40VXtYgyhn5No6YbkaAOxXkcpnHA%3D%3D; tracknick=lyw2073327; _cc_=UIHiLt3xSw%3D%3D; enc=Ngu0DnS8%2BPHIrgXMJWHtbd%2Bo2Wk3'
'AZii4jNaSwBjhCYDzDbDv5DZLVqszwhIrMtUB7%2FzoOuNG82LNPOimqiCkA%3D%3D; mt=ci=82_1; thw=cn; hng=CN%7Czh-CN%7CCNY%7C156; xlly_s=1; _samesite_flag_=true; '
'cookie2=19458b4b63d3c65bcb564ced611db1b6; _tb_token_=ee559db67bee1; v=0; _m_h5_tk=6b865df9243e0245a83a1020ffde836c_1606472572267; '
'_m_h5_tk_enc=c78a604959ad955b3cab2c8ba50b5e5d; uc1=cookie14=Uoe0azJUmbQGew%3D%3D; pnm_cku822=098%23E1hvNQvUvbpvUvCkvvvvvjiWP2dp0jlbRssptjYHPmPOzjYWP2Ly6jD8RL'
'MZAj18RvhvCvvvvvvRvpvhvv2MMg9CvvOUvvVvJh%2FIvpvUvvmvR6kIPjTgvpvIvvvvvhCvvvvvvvjlphvUoQvvvQCvpvACvvv2vhCv2RvvvvWvphvWgv9CvhQvpeWvCluQD7zhVutKjrcnI4mODVQEfwClYb'
'8rJm7g%2BX7t%2BsIICExrzj7JRAYVyO2v%2Bb8raoF6D7zvd3ODN%2BClYW9XV7QEfaClY80KvvhvC9vhvvCvp8OCvvpvvUmm; tfstk=cbcRBvG77nxlUUax7YpcdaSUpH8cZNQ8xaZd9FlOe7G1Bo5dinhi6'
'4UDFyzpqEC..; l=eBaxkpAPOSzOUC-XBOfanurza77OSIRYouPzaNbMiOCP9Z1p502hWZRYJn89C3GVhsMDR3rEk3ObBeYBqIfXNmyn1xaaZ7Dmn; isg=BFJSCPliWS0EuKUgSuu1ZWRgoxg0Y1b9mzmYjxyr'
'foXwL_IpBPOmDVhJn4sTWc6V',
'referer': 'https://me-too1980.taobao.com/search.htm?spm=a1z10.1-c-s.0.0.2ba78a4bjfYUkQ&search=y',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.47',
'x-requested-with': 'XMLHttpRequest'
}
r = requests.get(url, headers=head_new)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print("爬取失败")
return ""


def parsePage(ilist, html):
try:
goods_name = re.findall(r'<img alt=".*?"', html)
goods_price = re.findall(r'c-price">\d+\.\d* <', html)
goods_sale_count = re.findall(r'sale-num">\d+<', html)
for i in range(len(goods_name)):
price = eval(re.split(r'[>| ]', goods_price[i])[1])
sale_count = eval(re.split(r'[>|<]', goods_sale_count[i])[1])
name = goods_name[i].split('\"')[1]
ilist.append([name, price, sale_count])
except:
print("解析出错")


def printGoodsList(ilist):
print("=====================================================================================================")
tplt = "{0:<3}\t{1:<70}\t{2:<6}\t{3:<6}"
print(tplt.format("序号", "商品名称", "单价", "销量"))
count = 0
for g in ilist:
count += 1
print(tplt.format(count, g[0], g[1], g[2], g[3]))
print("=====================================================================================================")


def main():
depth = 2 # 这里的数字表示搜索到第几页,这是一个范围,而不是只对应这一页,比如2 就表示搜索1-2页的数据
start_url = "https://me-too1980.taobao.com/i/asynSearch.htm?_ksTS="
infoList = []
for i in range(depth):
try:
url = start_url + '&pageNo=' + str(1 + i)
html = getHtmlText(url)
parsePage(infoList, html)
except:
continue

printGoodsList(infoList)


main()
...全文
236 2 打赏 收藏 转发到动态 举报
写回复
用AI写文章
2 条回复
切换为时间正序
请发表友善的回复…
发表回复
lyw2073327 2020-11-30
  • 打赏
  • 举报
回复
把start url 给重新调整了,"爬取失败"和"解析出错"倒是没有弹出了,但发现不知道为什么正则表达式始终抓取不了,把html里面的信息单独拿出来做抓取测试倒是可以,但直接在HTML里面还是不行,哪位大神能帮忙看一看原因不,多谢 def main(): depth = 2 start_url = "https://me-too1980.taobao.com/i/asynSearch.htm?&callback=jsonp137&mid=w-22507069265-0&wid=22507069265" infoList = [] for i in range(depth): try: url = start_url + '&pageNo=' + str(1 + i) html = getHtmlText(url) parsePage(infoList, html) except: continue
mklpo147 2020-11-28
  • 打赏
  • 举报
回复
淘宝官方有店铺工具的,没必要费功夫弄这个吧

37,719

社区成员

发帖
与我相关
我的任务
社区描述
JavaScript,VBScript,AngleScript,ActionScript,Shell,Perl,Ruby,Lua,Tcl,Scala,MaxScript 等脚本语言交流。
社区管理员
  • 脚本语言(Perl/Python)社区
  • IT.BOB
加入社区
  • 近7日
  • 近30日
  • 至今

试试用AI创作助手写篇文章吧