前言 在安全牛上学的,都是些基础的,来整理一下。
requests库 requests是python实现的简单易用的HTTP库,使用起来比urllib简洁很多,因为是第三方库,所以使用前需要cmd安装pip install requests
安装requests库
具体信息看注释。
$_GET方式提交请求 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 import requestsurl="http://baidu.com" proxies={ "http" :"http://127.0.0.1:8080" , "https" :"http://127.0.0.1:8080" , } headers={'user-agent' :'my-hahaha/0.0.1' ,'lala' :'hello world' } cookies=dict (cookies_are='working' ) r=requests.get(url,verify=False ,proxies=proxies,headers=headers,cookies=cookies) print (r.text) print (r.content) f=open ("C:/Users/17295/Desktop/py.txt" ,"wb+" ) f.write(r.content) f.close() print (r.request.headers) print (r.headers) print (r.cookies) print (r.encoding) r.encoding="UTF-8" print (r.status_code)
$_POST提交请求 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 import requestsurl="https://account.tophant.com/login.html?response_type=code&client_id=b611bfe4ef417dbc&state=2e509c092de6ba1bf39a6fff76dd9a76" proxies={ "http" :"http://127.0.0.1:8080" , "https" :"http://127.0.0.1:8080" , } headers={'user-agent' :'my-hahaha/0.0.1' ,'lala' :'hello world' } cookies=dict (cookies_are='working' ) payload={'name' :'loop' ,'age' :12 } r=requests.post(url,verify=False ,data=payload,proxies=proxies,headers=headers,cookies=cookies) print (r.content)
session() 1 2 3 4 5 6 7 8 9 10 11 12 import requestscoon=requests.session() url="http://www.baidu.com" r=coon.get(url) r.request.headers r=coon.get(url) r.request.headers
json() 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 import json _dic={"name" :"loop" ,"age" :"12" } print (_dic)print (type (_dic))s=json.dumps(_dic,indent=2 ) print (s)print (type (s))d=json.load(s) print (d)print (type (d))
实践一下 搜索github API中的信息
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 import requests import jsonif __name__=="__main__" : key="baidu" url="https://api.github.com/search/code?q=%s" % key TOKEN='a9275dcdf30cc646fcf7df7569375b260105a059' headers={"Authorization" :"token %s" % TOKEN} params={"per_page" :10 ,"page" :0 } r=requests.get(url,headers=headers,params=params) d=r.json() print (json.dumps(d,indent=4 ))
multipocessing process_1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 import multiprocessing import timedef worker (interval ): n=5 while n>0 : print ("The time is {0}" .format (time.ctime())) time.sleep(interval) n-=1 ; if __name__=="__main__" : p=multiprocessing.Process(target=worker,args=(3 ,)) p.start() print ("p.pid" ,p.pid) print ("p.name" ,p.name) print ("p.is_alive" ,p.is_alive)
process_2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 import multiprocessing import timedef worker_1 (interval ): print ("worker_1" ) time.sleep(interval) print ("end worker_1" ) def worker_2 (interval ): print ("worker_2" ) time.sleep(interval) print ("end worker_2" ) def worker_3 (interval ): print ("worker_3" ) time.sleep(interval) print ("end worker_3" ) if __name__=="__main__" : p1=multiprocessing.Process(target=worker_1,args=(2 ,)) p2=multiprocessing.Process(target=worker_2,args=(3 ,)) p3=multiprocessing.Process(target=worker_3,args=(4 ,)) p1.start() p2.start() p3.start() print ("The number of cpu is:" +str (multiprocessing.cpu_count())) for p in multiprocessing.active_children(): print ("chile p.name" +p.name+"\tp.id:" +str (p.pid)) print ("END!!!!!!!!!!!!!" )