#淘宝商品价格定向爬虫
import requests import re def getHTMLText(url): try: headers = {
'cookie':'thw=cn; v=0; t=ab66dffdedcb481f77fd563809639584; cookie2=1f14e41c704ef58f8b66ff509d0d122e; _tb_token_=5e6bed8635536; cna=fGOnFZvieDECAXWIVi96eKju; unb=1864721683; sg=下3f; _l_g_=Ug==; skt=83871ef3b7a49a0f; cookie1=BqeGegkL+LUif2jpoUcc6t6Ogy0RFtJuYXR4VHB7W0A=; csg=3f233d33; uc3=vt3=F8dBy3/50cpZbAursCI=&id2=UondEBnuqeCnfA==&nk2=u/5wdRaOPk21wDx/&lg2=VFC/uZ9ayeYq2g==; existShop=MTU2MjUyMzkyMw==; tracknick=\u4E36\u541B\u4E34\u4E3F\u5929\u4E0B; lgc=\u4E36\u541B\u4E34\u4E3F\u5929\u4E0B; _cc_=WqG3DMC9EA==; dnk=\u4E36\u541B\u4E34\u4E3F\u5929\u4E0B; _nk_=\u4E36\u541B\u4E34\u4E3F\u5929\u4E0B; cookie17=UondEBnuqeCnfA==; tg=0; enc=2GbbFv3joWCJmxVZNFLPuxUUDA7QTpES2D5NF0D6T1EIvSUqKbx15CNrsn7nR9g/z8gPUYbZEI95bhHG8M9pwA==; hng=CN|zh-CN|CNY|156; mt=ci=32_1; alitrackid=www.taobao.com; lastalitrackid=www.taobao.com; swfstore=97213; x=e=1&p=*&s=0&c=0&f=0&g=0&t=0&__ll=-1&_ato=0; uc1=cookie16=UtASsssmPlP/f1IHDsDaPRu+Pw==&cookie21=UIHiLt3xThH8t7YQouiW&cookie15=URm48syIIVrSKA==&existShop=false&pas=0&cookie14=UoTaGqj/cX1yKw==&tag=8&lng=zh_CN; JSESSIONID=A502D8EDDCE7B58F15F170380A767027; isg=BMnJJFqj8FrUHowu4yKyNXcd2PXjvpa98f4aQWs-RbDvsunEs2bNGLfj8BYE6lWA; l=cBTDZx2mqxnxDRr0BOCanurza77OSIRYYuPzaNbMi_5dd6T114_OkmrjfF96VjWdO2LB4G2npwJ9-etkZ1QoqpJRWkvP.; whl=-1&0&0&1562528831082', 'user-agent':'Mozilla/5.0'} r = requests.get(url,headers=headers, timeout=30) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return "" def parsePage(ilt, html): try: plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"',html) tlt = re.findall(r'\"raw_title\"\:\".*?\"',html) for i in range(len(plt)):
price = eval(plt[i].split(':')[1])
title = eval(tlt[i].split(':')[1])
ilt.append([price , title])
except:
print("")
def printGoodsList(ilt):
tplt = "{:4}\t{:8}\t{:16}"
print(tplt.format("序号", "价格", "商品名称"))
count = 0
for g in ilt:
count = count + 1
print(tplt.format(count, g[0], g[1]))
def main():
goods = 'ipad'
depth = 2
start_url = 'https://s.taobao.com/search?q=' + goods
infoList = []
for i in range(depth):
try:
url = start_url + '&s=' + str(44*i)
html = getHTMLText(url)
parsePage(infoList, html)
except:
continue
printGoodsList(infoList)
main()