urllib中handler的使用
import urllib.request
url = "http://www.baidu.com"
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0'
}
# 请求地址的定制
request_url = urllib.request.Request(url, headers=headers)
# 获取handler对象
handler = urllib.request.HTTPHandler()
# 获取opener对象
opener = urllib.request.build_opener(handler)
# 调用open方法
response = opener.open(request_url)
# 将请求的数据放在content中
content = response.read().decode('utf-8')
print(content)
jsonpath的使用案例_淘票票
import urllib.requesttao_url = "https://www.taopiaopiao.com/cityAction.json?activityId&_ksTS=1723703101104_132&jsoncallback=jsonp133&action=cityAction&n_s=new&event_submit_doGetAllRegion=true"
headers = {'accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01','accept-language': 'zh-CN,zh;q=0.9','bx-v': '2.5.14','cookie': 'cna=so9EH9DIXigCAdoXf5vkIDJp; xlly_s=1; isg=BL6-wd-dVNTlioDwOYJ00GT1D9QA_4J5P--IlmjHsIH8C17l0I38ibDlg9fHM3qR','priority': 'u=1, i','referer':"https://www.taopiaopiao.com/",'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"','sec-ch-ua-mobile': '?0','sec-ch-ua-platform': '"Windows"','sec-fetch-dest': 'empty','sec-fetch-mode': 'cors','sec-fetch-site': 'same-origin','user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36','x-requested-with': 'XMLHttpRequest',
}request = urllib.request.Request(url=tao_url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')# 数据处理
json_path = content.split('(')[1].split(')')[0]# 存储到本地
with open('15_jsonpath_淘票票.json','w',encoding='utf-8') as fp:fp.write(json_path)# 获取所有的地区名字,regionName
import jsonpath
import json
obj = json.load(open('15_jsonpath_淘票票.json','r',encoding='utf-8'))
city_list = jsonpath.jsonpath(obj,'$..regionName')
print(city_list)
beautifulSoup的基本使用
from bs4 import BeautifulSoup
soup = BeautifulSoup(open('16_beautifulSoup.html','r',encoding='utf-8'),'lxml')
# print(soup)
# 获取第一个li标签
# print(soup.li)
# 获取标签的属性
# print(soup.li.attrs)# bs的一些函数
# find() 找到第一个li标签,括号里面可以写一些特定的条件
# print(soup.find('li'))
# print(soup.find('li',title="6"))
# print(soup.find('li',class_="box"))# findAll() 找到所有li标签
print(soup.findAll('li'))