day39

(1)验证GIL锁
(2)IO密集型和计算密集型
(3)死锁和递归锁
(4)信号量
(5)Event事件
(6)线程queue
(7)线程池和进程池
# 验证GIL锁
'''
from threading import Thread
from multiprocessing import Process


def task():
    while True:
        pass


if __name__ == '__main__':
    for i in range(4):
        t = Thread(target=task)       # 因为GIL锁的存在,同一时刻只能一条线程执行,所以CPU利用率不会占满
        # t = Process(target=task)        # 多进程中的线程被CPU调度执行,4个CPU同时工作,利用率会占满
        t.start()
'''

# IO密集型和计算密集型
'''
# 针对于cpython解释器:
# 在单核情况下:开线程
# 在多核情况下:
# (1)IO密集型:开多线程,CPU遇到IO会切换到其他线程执行
# (2)计算密集型:开多进程,能被多个CPU调度执行
from multiprocessing import Process
from threading import Thread
import time


# (1)IO密集型
# def task():
#     time.sleep(2)
#
#
# if __name__ == '__main__':
#     start_time = time.time()
#     l = []
#     for i in range(200):
#         t = Thread(target=task)         # 多线程  2.049150228500366
#         # t = Process(target=task)      # 多进程   10.938572883605957
#         t.start()
#         l.append(t)
#
#     for t in l:
#         t.join()
#
#     end_time = time.time()
#     print(end_time - start_time)

# (2)计算密集型
def task():
    a = 0
    for i in range(10000000):
        a += i


if __name__ == '__main__':
    start_time = time.time()
    l = []
    for i in range(10):
        # t = Thread(target=task)         # 多线程  5.83294939994812
        t = Process(target=task)      # 多进程   2.2345707416534424
        t.start()
        l.append(t)

    for t in l:
        t.join()

    end_time = time.time()
    print(end_time - start_time)
'''

# 死锁、递归锁
'''
# 死锁:是指两个或两个以上的进程或线程在执行过程中,因争夺取资源而造成的一种互相等待的现象,
# 若无外力作用,它们都将无法推进下去。此时称系统处于死锁状态或系统产生了死锁,这些永远在互相等待的进程称为死锁进程
# from threading import Thread, Lock
# import time
# 
# mutexA = Lock()
# mutexB = Lock()
# 
# 
# def taskA(name):
#     mutexA.acquire()
#     print(f'{name}获取了A锁')
#     mutexB.acquire()
#     print(f'{name}获取了B锁')
#     print('taskA running...')
#     mutexB.release()
#     print(f'{name}释放了B锁')
#     mutexA.release()
#     print(f'{name}释放了A锁')
# 
# 
# def taskB(name):
#     mutexB.acquire()
#     print(f'{name}获取了B锁')
#     time.sleep(2)               # IO操作,CPU调度其他线程获取A锁,IO操作结束后,形成死锁
#     mutexA.acquire()
#     print(f'{name}获取了A锁')
#     print('taskB running...')
#     mutexA.release()
#     print(f'{name}释放了A锁')
#     mutexB.release()
#     print(f'{name}释放了B锁')
# 
# 
# if __name__ == '__main__':
#     l = ['X', 'Y', 'Z']
#     for name in l:
#         t1 = Thread(target=taskA, args=(name,))
#         t2 = Thread(target=taskB, args=(name,))
#         t1.start()
#         t2.start()


# 解决方法:递归锁,在Python中为了支持在同一线程中多次请求相同资源,python提供了可重入锁RLock。
# 这个RLock内部维护着一个锁和一个counter变量,counter记录了acquire的次数,从而使其资源可以被多次要求。
# 每acquire一次,内部计数器加1,每release一次,内部计数器减一
# 直到一个线程所有的acquire都被释放,其他的线程才能获得资源。上面的例子如果使用RLock代替Lock,则不会发生死锁。
from threading import Thread, RLock
import time

# 使用可重入锁(同一把锁)
# mutexA = RLock()
# mutexB = mutexA
mutexA = mutexB = RLock()
print(mutexB is mutexA)


def taskA(name):
    mutexA.acquire()
    print(f'{name}获取了A锁')
    mutexB.acquire()
    print(f'{name}获取了B锁')
    print('taskA running...')
    mutexB.release()
    print(f'{name}释放了B锁')
    mutexA.release()
    print(f'{name}释放了A锁')


def taskB(name):
    mutexB.acquire()
    print(f'{name}获取了B锁')
    time.sleep(2)  # IO操作,CPU调度其他线程获取A锁,IO操作结束后,形成死锁
    mutexA.acquire()
    print(f'{name}获取了A锁')
    print('taskB running...')
    mutexA.release()
    print(f'{name}释放了A锁')
    mutexB.release()
    print(f'{name}释放了B锁')


if __name__ == '__main__':
    l = ['X', 'Y', 'Z']
    for name in l:
        t1 = Thread(target=taskA, args=(name,))
        t2 = Thread(target=taskB, args=(name,))
        t1.start()
        t2.start()
'''

# 信号量
'''
# Semaphore:信号量可理解为多把锁,允许同时有多个线程来操作数据
from threading import Thread, Semaphore
import time
import random

sm = Semaphore(3)       # 参数为可同时操作的线程数量


def task(name):
    sm.acquire()
    print(f'No.{name} is running...')
    time.sleep(random.randint(1, 3))
    print(f'No.{name} is over...')
    sm.release()


if __name__ == '__main__':
    for i in range(20):
        t = Thread(target=task, args=(i,))
        t.start()
'''

# Event事件
'''
# 一些线程需要等待其他线程执行完成之后才能执行,类似于发射信号
from threading import Thread, Event
import os

event = Event()  # 创建事件对象
size = os.path.getsize('a.txt')  # 获取文件字节数量


def read_half():
    half = size // 2
    with open('a.txt', 'rb') as f:
        print(f.read(half).decode('utf-8'))
        print('读完一半,发送信号')
        event.set()     # 发送信号


def read_left():
    event.wait()        # 等待信号(如果没有收到信号,则会一直卡住)
    with open('a.txt', 'rb') as f:
        f.seek(size // 2, 0)
        print(f.read().decode('utf-8'))


if __name__ == '__main__':
    t1 = Thread(target=read_half)
    t1.start()
    t2 = Thread(target=read_left)
    t2.start()
'''

# 线程queue
'''
# 进程queue和线程queue不是同一个
from multiprocessing import Queue  # 进程
from queue import Queue, LifoQueue, PriorityQueue  # 线程

# 线程间通信,因为共享变量会出现数据不安全问题,用线程queue通信,不需要加锁,内部自带
# 三种线程Queue
# (1)Queue:队列,先进先出
# (2)LifoQueue:栈,后进先出
# (3)PriorityQueue:优先级队列,谁小谁先出

q = Queue(5)
q.put("铁蛋")
q.put("钢弹")
q.put("金蛋")
print(q.get())
print(q.get())
print(q.get())

q = LifoQueue(5)
q.put("铁蛋")
q.put("钢弹")
print(q.get())
print(q.get())

q = PriorityQueue(3)       # 数字越小,级别越高,优先出队列
q.put((-10, '金蛋'))
q.put((100, '银蛋'))
q.put((99, '铁蛋'))

print(q.get())
print(q.get())
print(q.get())
'''

# 线程池和进程池
'''
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from threading import Thread
import time
import random

# 进程池与线程池用法完全相同
pool = ThreadPoolExecutor(5)        # 参数为线程池的容量
# pool = ProcessPoolExecutor(5)        # 参数为进程池的容量


def task(name):
    print('begin...')
    time.sleep(random.randint(1,3))
    print('end...')
    return f'{name} 已返回。'


def call_back(f):
    print(f.result())


if __name__ == '__main__':
    # l = []
    # for i in range(10):
    #     # t = Thread(target=task,args=(i,))
    #     # t.start()
    #     # pool.submit(task, i)
    #     res = pool.submit(task, i)
    #     # print(res.result())         # result()类似join,会等待返回结果,导致变成串行
    #     l.append(res)
    #
    # for i in l:
    #     print(i.result())

    for i in range(10):
        # 向线程池中提交一个任务,等待任务执行完成,自动回调到call_back函数执行
        pool.submit(task, i).add_done_callback(call_back)
#
'''

 

posted @ 2020-08-27 00:52  板鸭没有腿  阅读(89)  评论(0)    收藏  举报