线程Thread
1.基本使用
(1) 一个进程里包含了多个线程,线程之间是异步并发
from threading import Thread from multiprocessing import Process import os , time , random def func(i): time.sleep(random.uniform(0.1,0.9)) print("当前进程号:{}".format(os.getpid()) , i) if __name__ == "__main__": for i in range(10): t = Thread(target=func,args=(i,)) t.start() print(os.getpid())
(2) 并发的多进程和多线程之间,多线程的速度更快
# 多线程速度 def func(i): print( "当前进程号:{} , 参数是{} ".format(os.getpid() , i) ) if __name__ == "__main__": lst = [] startime = time.time() for i in range(10000): t = Thread(target=func,args=(i,)) t.start() lst.append(t) # print(lst) for i in lst: i.join() endtime = time.time() print("运行的时间是{}".format(endtime - startime) ) # 运行的时间是1.8805944919586182 # 多进程速度 if __name__ == "__main__": lst = [] startime = time.time() for i in range(10000): p = Process(target=func,args=(i,)) p.start() lst.append(p) # print(lst) for i in lst: i.join() endtime = time.time() print("运行的时间是{}".format(endtime - startime) ) # 运行的时间是101.68004035949707
(3) 多线程之间,数据共享
num = 100 lst = [] def func(): global num num -= 1 for i in range(100): t = Thread(target=func) t.start() lst.append(t) for i in lst: i.join() print(num)
2.用类定义线程
(1)线程的相关属性
""" # 线程.is_alive() 检测线程是否仍然存在 # 线程.setName() 设置线程名字 # 线程.getName() 获取线程名字 # 1.currentThread().ident 查看线程id号 # 2.enumerate() 返回目前正在运行的线程列表 # 3.activeCount() 返回目前正在运行的线程数量 """ def func(): time.sleep(1) if __name__ == "__main__": t = Thread(target=func) t.start() # 检测线程是否仍然存在 print( t.is_alive() ) # 线程.getName() 获取线程名字 print(t.getName()) # 设置线程名字 t.setName("抓API接口") print(t.getName()) from threading import currentThread from threading import enumerate from threading import activeCount def func(): time.sleep(0.1) print("当前子线程号id是{},进程号{}".format( currentThread().ident ,os.getpid()) ) if __name__ == "__main__": t = Thread(target=func) t.start() print("当前主线程号id是{},进程号{}".format( currentThread().ident ,os.getpid()) ) for i in range(5): t = Thread(target=func) t.start() # 返回目前正在运行的线程列表 lst = enumerate() print(lst,len(lst)) # 返回目前正在运行的线程数量 (了解) print(activeCount())
(2)必须继承父类Thread,来自定义线程类
class MyThread(Thread): def __init__(self,name): # 手动调用父类的构造方法 super().__init__() # 自定义当前类需要传递的参数 self.name = name def run(self): print( "当前进程号{},name={}".format(os.getpid() , self.name) ) if __name__ == "__main__": t = MyThread("我是线程") t.start() print( "当前进程号{}".format(os.getpid()) )
3.守护线程
# ### 守护线程 : 等待所有线程全部执行完毕之后,自己在终止程序,守护所有线程 from threading import Thread import time def func1(): while True: time.sleep(1) print("我是函数func1") def func2(): print("我是func2 start ... ") time.sleep(3) print("我是func2 end ... ") def func3(): print("我是func3 start ... ") time.sleep(6) print("我是func3 end ... ") if __name__ == "__main__": t = Thread(target=func1) t2 = Thread(target=func2) t3 = Thread(target=func3) # 设置守护线程 (启动前设置) t.setDaemon(True) t.start() t2.start() t3.start() print("主线程执行结束.... ")
4.线程中的数据安全问题
from threading import Thread , Lock import time n = 0 def func1(lock): global n lock.acquire() for i in range(1000000): n += 1 lock.release() def func2(lock): global n # with语法可以简化上锁+解锁的操作,自动完成 with lock: for i in range(1000000): n -= 1 if __name__ == "__main__": lst = [] lock = Lock() start = time.time() for i in range(10): t1 = Thread(target=func1 ,args=(lock,) ) t1.start() t2 = Thread(target=func2 ,args=(lock,) ) t2.start() lst.append(t1) lst.append(t2) for i in lst: i.join() # print(lst,len(lst)) end = time.time() print("主线程执行结束... 当前n结果为{} ,用时{}".format(n , end-start))
5.信号量 (Semaphore)
"""同一时间对多个线程上多把锁""" from threading import Thread,Semaphore import time , random def func(i,sem): time.sleep(random.uniform(0.1,0.7)) # with语法自动实现上锁 + 解锁 with sem: print("我在电影院拉屎 .... 我是{}号".format(i)) if __name__ == "__main__": sem = Semaphore(5) for i in range(30): Thread(target=func,args=(i,sem)).start() print(1) """ 创建线程是异步的, 上锁的过程会导致程序变成同步; """
6.互斥锁 死锁 递归锁
(1)语法上的死锁
from threading import Thread , Lock , RLock
import time
"""语法上的死锁: 是连续上锁不解锁"""
""" lock = Lock() lock.acquire() # lock.acquire() error print("代码执行中 ... 1") lock.release() lock.release() """ """是两把完全不同的锁""" lock1 = Lock() lock2 = Lock() lock1.acquire() lock2.acquire() print("代码执行中 ... 2") lock2.release() lock1.release()
(2)逻辑上的死锁
noodles_lock = Lock() kuaizi_lock = Lock() def eat1(name): noodles_lock.acquire() print("{}抢到面条了 ... ".format(name)) kuaizi_lock.acquire() print("{}抢到筷子了 ... ".format(name)) print("开始享受香菇青菜面 ... ") time.sleep(0.5) kuaizi_lock.release() print("{}吃完了,满意的放下了筷子".format(name)) noodles_lock.release() print("{}吃完了,满意的放下了面条".format(name)) def eat2(name): kuaizi_lock.acquire() print("{}抢到筷子了 ... ".format(name)) noodles_lock.acquire() print("{}抢到面条了 ... ".format(name)) print("开始享受香菇青菜面 ... ") time.sleep(0.5) noodles_lock.release() print("{}吃完了,满意的放下了面条".format(name)) # kuaizi_lock.release() print("{}吃完了,满意的放下了筷子".format(name)) if __name__ == "__main__": lst1 = ["小明","小红"] lst2 = ["小强","小智"] for name in lst1: Thread(target=eat1,args=(name,)).start() for name in lst2: Thread(target=eat2,args=(name,)).start()
(3) 使用递归锁
# 尽量使用一把锁解决问题,(少用锁嵌套,容易逻辑死锁) """ 递归锁的提出专门用来解决死锁现象 用于快速解决线上项目死锁问题 即使连续上锁,使用递归锁后也形同虚设,因为递归锁的作用在于解锁; """ # 让noodles_lock和kuaizi_lock 都等于递归锁 noodles_lock = kuaizi_lock = RLock() def eat1(name): noodles_lock.acquire() print("{}抢到面条了 ... ".format(name)) kuaizi_lock.acquire() print("{}抢到筷子了 ... ".format(name)) print("开始享受香菇青菜面 ... ") time.sleep(0.5) kuaizi_lock.release() print("{}吃完了,满意的放下了筷子".format(name)) noodles_lock.release() print("{}吃完了,满意的放下了面条".format(name)) def eat2(name): kuaizi_lock.acquire() print("{}抢到筷子了 ... ".format(name)) noodles_lock.acquire() print("{}抢到面条了 ... ".format(name)) print("开始享受香菇青菜面 ... ") time.sleep(0.5) noodles_lock.release() print("{}吃完了,满意的放下了筷子".format(name)) kuaizi_lock.release() print("{}吃完了,满意的放下了筷子".format(name)) if __name__ == "__main__": lst1 = ["康裕康","张宇"] lst2 = ["张保张","赵沈阳"] for name in lst1: Thread(target=eat1,args=(name,)).start() for name in lst2: Thread(target=eat2,args=(name,)).start()
7.事件 (Event)
(1)基本知识
""" wait : 动态加阻塞 (True => 放行 False => 阻塞) is_set : 获取内部成员属性值是True 还是 False set : 把False -> True clear : 把True -> False """
(2)模拟连接远程数据库
from threading import Thread , Event import time,random """最多连接三次,如果三次都连接不上,直接报错.""" def check(e): print("目前正在检测您的账号和密码 .... ") # 模拟延迟的场景 time.sleep(random.randrange(1,7)) # 1 ~ 6 # 把成员属性值从False -> True e.set() def connect(e): sign = False for i in range(1,4): e.wait(1) if e.is_set(): print("数据库连接成功 ... ") sign = True break else: print("尝试连接数据库第{}次失败了...".format(i)) # 三次都不成功,报错 if sign == False: # 主动抛出异常 超时错误 raise TimeoutError # if __name__ == "__main__": e = Event() t1 = Thread(target=check,args=(e,)) t1.start() t2 = Thread(target=connect,args=(e,)) t2.start()
8.线程队列(Queue)
# ### 线程队列 from queue import Queue """ put 存放 超出队列长度阻塞 get 获取 超出队列长度阻塞 put_nowait 存放,超出队列长度报错 get_nowait 获取,超出队列长度报错 """ # (1) Queue """先进先出,后进先出""" q = Queue() q.put(100) q.put(200) print(q.get()) # print(q.get()) # print(q.get()) 阻塞 # print(q.get_nowait()) # print(q.get_nowait()) 报错 # Queue(3) => 指定队列长度, 元素个数只能是3个; q2 = Queue(3) q2.put(1000) q2.put(2000) # q2.put(3000) # q2.put(4000) 阻塞 q2.put_nowait(6000) # q2.put_nowait(4000) 报错 # (2) LifoQueue """先进后出,后进先出(栈的特点)""" from queue import LifoQueue lq = LifoQueue() lq.put(110) lq.put(120) lq.put(119) print(lq.get()) print(lq.get()) print(lq.get()) # (3) PriorityQueue """按照优先级顺序进行排序存放(默认从小到大)""" """在一个优先级队列中,要放同一类型的数据,不能混合使用""" from queue import PriorityQueue pq = PriorityQueue() # 1.对数字进行排序 pq.put(100) pq.put(19) pq.put(-90) pq.put(88) print(pq.get()) print(pq.get()) print(pq.get()) print(pq.get()) # 2.对字母进行排序 (按照ascii编码) pq.put("fjd") pq.put("fa") pq.put('agag') pq.put("小强") pq.put("小红") pq.put('小明') print( pq.get() ) print( pq.get() ) print( pq.get() ) print( pq.get() ) print( pq.get() ) print( pq.get() ) # 3.对容器进行排序 pq.put( (22,"fdh") ) pq.put( (67,"afd") ) pq.put( (3,"vdsa") ) pq.put( (3,"lfh") ) print(pq.get()) print(pq.get()) print(pq.get()) print(pq.get()) # 4.注意点 pq.put(100) pq.put("nihao") pq.put( (1,2,3) )
9.进程池 和 线程池
(1)进程池
from concurrent.futures import ProcessPoolExecutor import os,time,random # 获取的逻辑处理器 # print(os.cpu_count()) """多条进程提前开辟,可触发多cpu的并行效果""" def func(i): # print(i) time.sleep(random.uniform(0.1,0.8)) print(" 任务执行中 ... start ... 进程号{}".format(os.getpid()) , i ) print(" 任务执行中 ... end ... 进程号{}".format(os.getpid())) return i if __name__ == "__main__": lst = [] # (1) 创建进程池对象 """默认参数是 系统最大的逻辑核心数 4""" p = ProcessPoolExecutor() # (2) 异步提交任务 """submit(任务,参数1,参数2 ... )""" """默认如果一个进程短时间内可以完成更多的任务,进程池就不会使用更多的进程来辅助完成 , 可以节省系统资源的损耗;""" for i in range(10): obj = p.submit( func , i ) # print(obj) # print(obj.result()) 不要写在这,导致程序同步,内部有阻塞 lst.append(obj) # (3) 获取当前任务的返回值 for i in lst: print(i.result(),">===获取返回值===?") # (4) shutdown 等待所有进程池里的进程执行完毕之后,在放行 p.shutdown() print("进程池结束 ... ")
(2)线程池
from concurrent.futures import ThreadPoolExecutor import os,time,random # from threading import currentThread as ct from threading import current_thread as ct def func(i): print(" 任务执行中 ... start ... 线程号{}".format( ct().ident ) , i ) time.sleep(1) print(" 任务执行中 ... end ... 线程号{}".format(os.getpid())) return ct().ident # 线程号 if __name__ == "__main__": lst = [] setvar = set() """默认参数是 系统最大的逻辑核心数 4 * 5 = 20""" # (1) 创建线程池对象 t = ThreadPoolExecutor() # 20 # print(t) # (2) 异步提交任务 """默认如果一个线程短时间内可以完成更多的任务,线程池就不会使用更多的线程来辅助完成 , 可以节省系统资源的损耗;""" for i in range(100): obj = t.submit(func,i) lst.append(obj) # (3) 获取当前任务的返回值 for i in lst: setvar.add(i.result()) # (4) shutdown 等待所有线程池里的线程执行完毕之后,在放行 t.shutdown() print("主线程执行结束 ... ") print(setvar , len(setvar))
(3) 线程池 map
from threading import currentThread as ct from collections import Iterator,Iterable def func(i): time.sleep(random.uniform(0.1,0.7)) print("thread ... 线程号{}".format(ct().ident),i) return "*" * i if __name__ == "__main__": t = ThreadPoolExecutor() it = t.map(func,range(100)) # 返回的数据是迭代器 print(isinstance(it,Iterator)) # 协调子父线程,等待线程池中所有线程执行完毕之后,在放行; t.shutdown() # 获取迭代器里面的返回值 for i in it: print(i) """ # 总结: 无论是进程池还是线程池,都是由固定的进程数或者线程数来执行所有任务 系统不会额外创建多余的进程或者线程来执行任务; """
10.回调函数
(1)原型
class Ceshi(): def add_done_callback(self,func): print("系统执行操作1 ... ") print("系统执行操作2 ... ") # 回头调用一下 func(self) def result(self): return 112233 def call_back(obj): print(obj.result()) obj = Ceshi() obj.add_done_callback(call_back)
(2)回调函数
""" 回调函数: 回头调用一下函数获取最后结果 微信支付宝付款成功后, 获取付款金额 微信支付宝退款成功后, 获取退款金额 一般用在获取最后的状态值时,使用回调 通过add_done_callback最后调用一下自定义的回调函数; """ from concurrent.futures import ProcessPoolExecutor , ThreadPoolExecutor from threading import currentThread as ct import os,time,random """进程任务""" def func1(i): time.sleep(random.uniform(0.1,0.9)) print(" 进程任务执行中 ... start ... 进程号{}".format(os.getpid()) , i ) print(" 进程任务执行中 ... end ... 进程号{}".format(os.getpid()) ) return i def call_back1(obj): print( "<==回调函数的进程号{}==>".format(os.getpid()) ) print(obj.result()) """线程任务""" def func2(i): time.sleep(random.uniform(0.1,0.9)) print(" 线程任务执行中 ... start ... 线程号{}".format(ct().ident) , i ) print(" 线程任务执行中 ... end ... 线程号{}".format( ct().ident) ) return i def call_back2(obj): print( "<==回调函数的线程号{}==>".format( ct().ident) ) print(obj.result()) if __name__ == "__main__": """ # (1)进程池 结果:(进程池的回调函数由主进程执行) p = ProcessPoolExecutor() # os.cpu_count() => 4 for i in range(1,11): obj = p.submit(func1 , i ) # 使用add_done_callback在获取最后返回值的时候,可以异步并行 obj.add_done_callback(call_back1) # 直接使用result获取返回值的时候,会变成同步程序,速度慢; # obj.result() p.shutdown() print( "主进程执行结束...进程号:" , os.getpid() ) """ print("<==============================================>") # (2)线程池 结果:(线程池的回调函数由子线程执行) t = ThreadPoolExecutor() for i in range(1,11): obj = t.submit(func2 , i ) # 使用add_done_callback在获取最后返回值的时候,可以异步并发 obj.add_done_callback(call_back2) # 直接使用result获取返回值的时候,会变成同步程序,速度慢; # obj.result() t.shutdown() print("主线程执行结束 .... 线程号{}".format(ct().ident))

浙公网安备 33010602011771号