Download
urllib
'''保存 url 对应数据到本地'''
urllib.request.urlretrieve(url, filename)requests
'''保存 url 对应数据至 filepath 文件'''
res = requests.get(url, headers=headers, timeout=20)
with open(filepath, 'wb') as f:
f.write(res.content)res = requests.get(url, headers=headers, timeout=20)
with open(filepath, 'wb') as f:
for chunk in res.iter_content(chunk_size=32):
f.write(chunk)res = requests.get(url, headers=headers, timeout=20)
if res.status_code == 200:
open(path, 'wb').write(res.content)scrapy
telethon
Last updated