Python7
Python1-环境配置
Python2-基础认识
Python3-数据类型
Python4-面向对象
Python5-闭包和装饰器
Python6-IO模块
Python7-进程线程携程
Python8-网络编程
Python爬虫
进程线程协程之间的关系
进程 : 是资源单位
线程 : 是执行单位
协程 : 不存在,是人为创造出来的一个概念
一个进程可以有多个线程,一个线程可以分成多个协程
进程之间相互通信需要管道或链表,
线程直接可以直接通过进程通信,也可以通过管道或链表
进程
创建进程
from multiprocessing import Process
import time
def func(name):
print(f'{name}任务开始')
time.sleep(5)
print(f'{name}任务执行完毕')
if __name__ == "__main__":
# 异步提交任务的结果,不等待,直接执行后续操作
p = Process(target=func,args=('写讲话稿',))
p.start()
print('主进程')
面向对象编程方法
from multiprocessing import Process
import time
class MyProcess(Process):
def __init__(self,name) -> None:
super().__init__()
self.task_name = name
def run(self) -> None:
print(f'{self.task_name}任务开始')
time.sleep(5)
print('任务结束')
if __name__ == "__main__":
p = MyProcess('约会')
p.start()
print('主进程')
run方法重写
import multiprocessing
import time
# run方法重写
class MyProcess(multiprocessing.Process):
def run(self) -> None:
n = 5
while n > 0:
print(n)
time.sleep(1)
n -= 1
if __name__ == '__main__':
p = MyProcess()
p.start()
p.join()
join等待
from multiprocessing import Process
import time
def func(name):
print(f'{name}任务开始')
time.sleep(5)
print(f'{name}任务执行完毕')
if __name__ == "__main__":
# 异步提交任务的结果,不等待,直接执行后续操作
p = Process(target=func,args=('写讲话稿',))
p.start()
p.join() # 主进程等待子进程执行完成后再向后执行
print('主进程')
join等待2
from multiprocessing import Process
import time
def func(name,n):
print(f'{name}任务开始')
time.sleep(n)
print(f'{name}任务执行完毕')
if __name__ == "__main__":
# 异步提交任务的结果,不等待,直接执行后续操作
start = time.time()
l = []
for i in range(1, 4):
p = Process(target=func,args=(f'写讲话搞{i}',i))
p.start()
l.append(p)
for p in l:
p.join()
end = time.time()
print('主进程',end-start)
进程间数据隔离与通信
隔离
from multiprocessing import Process
age = 18
def func():
global age
age = 16
if __name__ == "__main__":
p = Process(target=func)
p.start()
p.join()
print(age)
通信
from multiprocessing import Process,Queue
def task1(q):
q.put('宫保鸡丁')
def task2(q):
print(q.get())
if __name__ == "__main__":
q = Queue()
p1 = Process(target=task1,args=(q,))
p2 = Process(target=task2,args=(q,))
p1.start()
p2.start()
'''
# 补充知识
消息队列:管道
'''
import queue
queue.Queue()
from multiprocessing import queues
queues.Queue()
from multiprocessing import Queue
Queue()
from multiprocessing import Queue
# 创建一个有六个位置的队列
q = Queue(6)
q.put('a')
q.put('b')
q.put('c')
q.put('d')
q.put('e')
q.put('f')
# q.put_nowait('g')
# q.put('g',timeout=3)
print(q.empty())
print(q.full())
v1 = q.get()
v2 = q.get()
v3 = q.get()
v4 = q.get()
v5 = q.get()
v6 = q.get()
# q.get_nowait()
# v7 = q.get(tiemout=3)
print(v1,v2,v3,v4,v5,v6)
进程号
from multiprocessing import Process,current_process
import os
def task():
print(f'任务{current_process().pid}执行中')
print(f'任务{os.getpid()}执行中')
print(f'任务的父进程{os.getppid()}执行中')
if __name__ == "__main__":
p = Process(target=task)
p.start()
p.terminate() # 杀死当前进程,需要一定的时间
print(p.is_alive()) # 查询当前进程是否存活 结果是一个布尔值
print(f'主进程{current_process().pid}')
task()
'''
pid就是进程号
'''
僵尸进程和孤儿进程
-
僵尸进程
-
子进程死后,还会有一些资源占用(进程号,进程运行状态,运行时间等),等待父进程通过系统调用回收(尸体)
-
除了init进程之外,所有的进程,最后都会步入僵尸进程
-
危害:
- 子进程退出之后,父进程没有即时处理,僵尸进程会一直占用计算机的资源
- 如果产生了大量的僵尸资源,资源会过度占用,系统没有可用的进程号,导致系统不能产生新的进程
-
-
孤儿进程
- 子进程处于存活状态,但是父进程意外死亡
- 操作系统会开设一个“孤儿院”(init进程),用来管理孤儿进程,回收孤儿进程的相关资源
守护进程
一般使用的守护进程是守护 操作系统的进程
from multiprocessing import Process
import time
def task(name):
print(name,'还活着')
time.sleep(3)
print(name,'正常死亡')
if __name__ == "__main__":
p = Process(target=task,kwargs={'name':'纣王'}) # args=('') kwargs={'name':'纣王'}
p.daemon = True # 设置守护进程
p.start()
time.sleep(1)
print('纣王被杀')
互斥锁
多个进程操作同一份数据的时候,会出现数据错乱的问题,
解决的方法就是加锁处理
把并发变成串行,虽然牺牲了运行效率,但保证了数据的安全
from multiprocessing import Process,Lock
import time
import json
import random
# 查票
def search_ticket(name):
with open(r'python并发编程/tickets.txt','r',encoding='utf-8') as f:
dic = json.load(f)
print(f'用户{name}查询余票:{dic.get("tickets_num")}')
# 买票
def buy_ticket(name):
with open(r'python并发编程/tickets.txt','r',encoding='utf-8') as f:
dic = json.load(f)
# 模拟网络延迟
time.sleep(random.randint(1,5))
if dic.get('tickets_num') > 0:
dic['tickets_num'] -= 1
with open(r'python并发编程/tickets.txt','w+',encoding='utf-8') as f:
json.dump(dic,f)
print(f'用户{name}买票成功')
else:
print(f"余票不足,用户{name}买票失败")
def task(name,mutex):
search_ticket(name)
# 抢锁
mutex.acquire()
buy_ticket(name)
# 释放锁
mutex.release()
if __name__ == "__main__":
mutex = Lock()
for i in range(1,9):
p = Process(target=task,args=(i,mutex))
p.start()
生产者消费者模型
JoinableQueue
在Queue的基础上增加了一个计数器功能,每put一个数据,计数器加一
每调用一个task_done,计数器就减一
当计数器为0时,就会走q.join()后面的代码
from multiprocessing import Process, Queue, JoinableQueue
import time
import random
def profucer(name, food, q):
for i in range(8):
time.sleep(random.randint(1,3))
print(f'{name}生产了{food}{i}')
q.put(f'{food}{i}')
def consumer(name,q):
while True:
food = q.get()
time.sleep(random.randint(1,3))
print(f'{name}吃了{food}')
q.task_done() # 告知队列已经拿走一个数据,并且已经处理完了
if __name__ == "__main__":
q = JoinableQueue()
p1 = Process(target=profucer,args=('中华小当家','黄金炒饭',q))
p2 = Process(target=profucer,args=('神厨小福贵','佛跳墙',q))
c1 = Process(target=consumer,args=('八戒',q))
c2 = Process(target=consumer,args=('悟空',q))
p1.start()
p2.start()
c1.daemon = True
c2.daemon = True
c1.start()
c2.start()
p1.join()
p2.join()
q.join() # 等待数据取完继续向后执行
进程池
池是用来保证计算机硬件安全的情况下,最大限度的利用计算机资源,降低程序运行效率,但是保证了计算的硬件安全
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
import time
import os
pool = ProcessPoolExecutor(3) # 不传参的话就是计算机cpu的数量
def task(name):
print(name,os.getpid())
time.sleep(3)
return name + 10
if __name__ == "__main__":
f_list = []
for i in range(50):
future = pool.submit(task,i)
f_list.append(future)
pool.shutdown() # 关闭线程池,等待线程池中所有任务运行完毕
for f in f_list:
print('任务结果:',f.result())
进程池异步回调机制
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
import time
import os
pool = ProcessPoolExecutor(3) # 不传参的话就是计算机cpu的数量
def task(name):
print(name,os.getpid())
time.sleep(3)
return name + 10
def call_back(res):
print('call_back',res.result())
if __name__ == "__main__":
for i in range(50):
future = pool.submit(task,i).add_done_callback(call_back)
线程
创建线程
from collections.abc import Callable, Iterable, Mapping
from threading import Thread
import time
from typing import Any
def task(name):
print(f'{name}任务开始')
time.sleep(3)
print(f'{name}任务结束')
if __name__ == "__main__":
t = Thread(target=task,args=('悟空',))
t.start()
print('主线程')
面向对象编程方法
from threading import Thread
import time
class MyThread(Thread):
def __init__(self, name) -> None:
super().__init__()
self.name = name
def run(self):
print(f'{self.name}任务开始')
time.sleep(3)
print(f'{self.name}任务结束')
if __name__ == "__main__":
t = MyThread('悟空')
t.start()
print('主线程')
join等待
from threading import Thread
import time
def task(name):
print(f'{name}任务开始')
time.sleep(3)
print(f'{name}任务结束')
if __name__ == "__main__":
t = Thread(target=task,args=('悟空',))
t.start()
t.join()
print('主线程')
查询线程的进程对象
from threading import Thread
import os
age = 18
def task():
global age
age = 16
print('子线程',os.getpid())
if __name__ == "__main__":
t = Thread(target=task)
t.start()
print(f'{age}主线程',os.getpid())
查询当前活跃的线程数和线程名
from threading import Thread,current_thread,active_count
import time
def task():
print(current_thread().name)
time.sleep(1)
if __name__ == "__main__":
t = Thread(target=task)
t2 = Thread(target=task)
t.start()
t2.start()
print(current_thread().name)
print('活跃的线程数量',active_count())
守护线程
主线程运行完毕后不会立即结束
要等待所有的子线程结束之后才会结束
因为主线程结束,意味着主线程所在的进程结束了
子线程想要取数据就无法正常读取
from threading import Thread
import time
def task(name):
print(f'{name}还活着')
time.sleep(3)
print(f'{name}正常死亡')
if __name__ == "__main__":
t = Thread(target=task,args=('妲己',))
t.daemon = True # 守护线程
t.start()
print('纣王驾崩了')
互斥锁
from threading import Thread,Lock
import time
num = 180
mutex = Lock()
def task():
global num
# mutex.acquire()
with mutex:
temp = num
time.sleep(0.05)
num = temp-1
# mutex.release()
if __name__ == "__main__":
l = []
for i in range(180):
t = Thread(target=task)
t.start()
l.append(t)
for t in l:
t.join()
print(num)
GIL
GIL是一把互斥锁,用来阻止同一个进程下的多线程同时执行,也就是说在同一个进程下的多线程,他们没办法并行执行,
有多个cpu都不能并行,一次只有一个cpu来执行。是CPython特有的
因为CPython的内存管理不是线程安全的
内存管理(垃圾回收机制)
引用计数
标记清除
分代回收
GC巡逻
python解释器版本
- CPython
- JPython
- Pypypython
python多线程无用了么?
分情况:
-
单核
- 10个任务(计算密集型/IO密集型)
-
多核
- 10个任务(计算密集型/IO密集型)
-
多核计算密集型
- 多线程100+
- 多进程10+
-
多核IO密集型
- 多线程,节省资源
- 多进程,浪费资源
感知GIL
from threading import Thread
num = 180
def task():
global num
temp = num
num = temp-1
if __name__ == "__main__":
l = []
for i in range(180):
t = Thread(target=task)
t.start()
l.append(t)
for t in l:
t.join()
print(num)
死锁现象
from threading import Thread,Lock,current_thread
import time
mutex1 = Lock()
mutex2 = Lock()
def task():
mutex1.acquire()
print(current_thread().name,'抢到锁1')
mutex2.acquire()
print(current_thread().name,'抢到锁2')
mutex2.release()
mutex1.release()
mutex2.acquire()
print(current_thread().name,'抢到锁2')
time.sleep(1)
mutex1.acquire()
print(current_thread().name,'抢到锁1')
mutex1.release()
mutex2.release()
if __name__ == "__main__":
for i in range(8):
t = Thread(target=task)
t.start()
递归锁
解决部分死锁问题
递归锁内有一个计数器,每acquire一次计数器+1,每release一次计数器-1,
只要计数器不为0,其他人都不能抢到这把锁
from threading import Thread,RLock,current_thread
import time
mutex2 = mutex1 = RLock()
def task():
mutex1.acquire()
print(current_thread().name,'抢到锁1')
mutex2.acquire()
print(current_thread().name,'抢到锁2')
mutex2.release()
mutex1.release()
mutex2.acquire()
print(current_thread().name,'抢到锁2')
time.sleep(1)
mutex1.acquire()
print(current_thread().name,'抢到锁1')
mutex1.release()
mutex2.release()
if __name__ == "__main__":
for i in range(8):
t = Thread(target=task)
t.start()
信号量
锁是黑名单
信号量是白名单
from threading import Thread,Semaphore
import time
import random
sp = Semaphore(5)
def task(name):
sp.acquire()
print(f'{name},抢到车位')
time.sleep(random.randint(3,5))
sp.release()
if __name__ == "__main__":
for i in range(25):
t = Thread(target=task,args=(f'宝马{i+1}号',))
t.start()
线程间通信
event
通知另一个线程
from threading import Thread,Event
import time
event = Event()
def bus():
print('公交车即将到站')
time.sleep(3)
print('公交车到站')
event.set()
def passenger(name):
print(name,'正在等车')
event.wait()
print(name,'上车出发')
if __name__ == "__main__":
t = Thread(target=bus)
t.start()
for i in range(10):
t = Thread(target=passenger,args=(f'乘客{i}',))
t.start()
Queue
import queue
# 先进先出
q = queue.Queue()
q.put() # 写入
q.get() # 写出
q.get(timeout=3) # 3秒等待
q.get_nowait() # 读空
q.put_nowait() # 写空
q.full() # 判定队列
# 后进先出,堆栈
q = queue.LifoQueue()
q.put('a')
q.put('b')
q.put('c')
print(q.get())
# 优先级Queue
q = queue.PriorityQueue()
q.put((18,'a'))
q.put((69,'b'))
q.put((36,'c'))
q.put((-1,'d'))
print(q.get())
print(q.get())
线程池
池是用来保证计算机硬件安全的情况下,最大限度的利用计算机资源,降低程序运行效率,但是保证了计算的硬件安全
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
import time
pool = ThreadPoolExecutor(10) # 不传参的话就是计算机cpu的数量*5
def task(name):
print(name)
time.sleep(3)
return name + 10
f_list = []
for i in range(50):
future = pool.submit(task,i)
f_list.append(future)
pool.shutdown() # 关闭线程池,等待线程池中所有任务运行完毕
for f in f_list:
print('任务结果:',f.result())
多进程VS多线程
计算密集型
from multiprocessing import Process
from threading import Thread
import time
def task():
res = 0
for i in range(10000000):
res += i
if __name__ == "__main__":
l=[]
starttime = time.time()
for i in range(8): # 8个核
# p = Process(target=task) # 花费时间: 1.8048515319824219
p = Thread(target=task) # 花费时间: 4.411757469177246
p.start()
l.append(p)
for p in l:
p.join()
endtime = time.time()
print('花费时间:',endtime-starttime)
IO密集型
from multiprocessing import Process
from threading import Thread
import time
def task():
time.sleep(1)
if __name__ == "__main__":
l=[]
starttime = time.time()
for i in range(80):
# p = Process(target=task) # 花费时间: 2.4550466537475586
p = Thread(target=task) # 花费时间: 1.0191149711608887
p.start()
l.append(p)
for p in l:
p.join()
endtime = time.time()
print('花费时间:',endtime-starttime)
协程
创建协程
人为创造出来的,也成为微线程,是用户态内的上下文切换技术
简单说就是在单线程下实现并发效果
当程序遇到IO操作时,通过写的代码,让代码自动完成切换
用代码监听IO,一旦遇到IO,就在代码层面上切换,欺骗cpu
import time
from gevent import monkey
monkey.patch_all()
from gevent import spawn
# 计算密集型
# 串行:0.10503244400024414
def f1():
n = 0
for i in range(1000000):
n+=i
def f2():
n = 0
for i in range(1000000):
n+=i
start = time.time()
f1()
f2()
end = time.time()
print(end-start)
# 切换:0.20596766471862793
def f1():
n = 0
for i in range(1000000):
n+=i
yield
def f2():
g = f1()
n = 0
for i in range(1000000):
n+=i
next(g)
start = time.time()
f2()
end = time.time()
print(end-start)
# IO密集型
# 串行:12.040576696395874
def da():
for _ in range(3):
print('哒')
time.sleep(2)
def mie():
for _ in range(3):
print('咩')
time.sleep(2)
start = time.time()
da()
mie()
end = time.time()
print(end-start)
# 切换 0.0008611679077148438
# 加入join 6.036522388458252 这个时间是最大时间乘以数量
def da():
for _ in range(3):
print('哒')
time.sleep(2)
def mie():
for _ in range(3):
print('咩')
time.sleep(2)
start = time.time()
g1 = spawn(da)
g2 = spawn(mie)
g1.join()
g2.join()
end = time.time()
print(end-start)
非阻塞IO
import socket
server = socket.socket() # 创建socket实例
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) # 端口复用
server.bind(('127.0.0.1',8080)) # 绑定ip端口
server.listen(5) # 半连接池大小
server.setblocking(False) # 所有的网络阻塞都会变成非阻塞
c_list = []
d_lis = []
while True:
try: # windows要用
conn, addr = server.accept() # 提取连接
c_list.append(conn) # 添加到待处理列表
except BlockingIOError:
for conn in c_list: # 循环待处理列表
try:
data = conn.recv(1024) # 读取文本
if not data:
conn.close() # 空文本关闭连接
d_lis.append(conn) # 添加到待删除列表
conn.send(data.upper()) # 回复文本
except BlockingIOError:
pass
except ConnectionResetError:
conn.close() # 关闭连接
d_lis.append(conn) # 添加到待删除列表
for conn in d_lis: # 循环待删除列表
c_list.remove(conn) # 将待处理列表中的无用值删除
d_lis.clear() # 清空待删除列表
IO多路复用
import socket
import selectors
def accept(server):
conn,addr = server.accept()
sel.register(conn,selectors.EVENT_READ,read)
def read(conn):
try:
data = conn.recv(1024)
if not data:
conn.close()
sel.unregister(conn)
return
conn.send(data.upper())
except ConnectionResetError:
conn.close()
sel.unregister(conn)
return
server = socket.socket() # 创建socket实例
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) # 端口复用
server.bind(('127.0.0.1',8080)) # 绑定ip端口
server.listen(5) # 半连接池大小
server.setblocking(False) # 所有的网络阻塞都会变成非阻塞
sel = selectors.DefaultSelector() # 设置IO复用实例
sel.register(server,selectors.EVENT_READ,accept) # 设置连参数
while True:
events = sel.select() # linux/mac sel.epoll()
for key,mask in events: # 接收返回参数
callback = key.data
callback(key.fileobj)
异步IO
效率最高,使用最多
python 的异步框架
- asyncio
- tornado
- fastapi
- django
- sanic
- vibora
- quart
- twisted
- aiohttp
事件循自动检测并执行我们添加给它的任务
import asyncio
import socket
from threading import current_thread
import time
# python3.4
@asyncio.coroutine
def f1():
print('f1 start',current_thread())
yield from asyncio.sleep(1) # 可等待对象(协程对象,task对象,future对象)
print('f1 end',current_thread())
@asyncio.coroutine
def f2():
print('f2 start',current_thread())
yield from asyncio.sleep(1) # 可等待对象(协程对象,task对象,future对象)
print('f2 end',current_thread())
tasks = [f1(),f2()]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
# python3.5 async/await
async def f1():
print('f1 start',current_thread())
await asyncio.sleep(1) # 可等待对象(协程对象,task对象,future对象)
print('f1 end',current_thread())
async def f2():
print('f2 start',current_thread())
await asyncio.sleep(1) # 可等待对象(协程对象,task对象,future对象)
print('f2 end',current_thread())
tasks = [f1(),f2()]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
# python3.7
async def f1():
print('f1 start',current_thread())
await asyncio.sleep(1) # 可等待对象(协程对象,task对象,future对象)
print('f1 end',current_thread())
async def f2():
print('f2 start',current_thread())
await asyncio.sleep(1) # 可等待对象(协程对象,task对象,future对象)
print('f2 end',current_thread())
tasks = [f1(),f2()]
asyncio.run(asyncio.wait(tasks))
# 等待对象(协程对象)
# 阻塞操作必须替换成相应的异步库提供的函数
# time.sleep() --> asyncio.sleep()
# server.accept() --> loop.sock_accept()
# conn.recv() --> loop.sock_recv()
# aiohttp
# aiomysql
async def recv():
print('进入IO')
# time.sleep()
await asyncio.sleep(3) # 异步阻塞函数
print('结束IO')
return 'hello'
async def f1():
print('f1 start',current_thread())
data = await recv() # 可等待对象(协程对象,task对象,future对象)
print(data)
print('f1 end',current_thread())
async def f2():
print('f2 start',current_thread())
data = await recv() # 可等待对象(协程对象,task对象,future对象)
print(data)
print('f2 end',current_thread())
tasks = [f1(),f2()]
asyncio.run(asyncio.wait(tasks))
# 可等待对象(task对象)
async def nested():
print('进入IO')
await asyncio.sleep(3)
print('退出IO')
return 42
async def main(name):
print(name,'start')
task = asyncio.create_task(nested()) # 提交任务
res = await task
print(res)
async def main2(name):
print(name,'start')
task = asyncio.create_task(nested()) # 提交任务
task.cancel() # 终止任务
res = await task # 等不到结果,后续不执行
print(res)
async def main3(name):
print(name,'start')
task_list = [
asyncio.create_task(nested()),
asyncio.create_task(nested())
]
done, pending = await asyncio.wait(task_list) # done返回完成函数,pending返回超时函数,可添加timeout=1
print(done)
print(pending)
for task in done:
task.result()
asyncio.run(asyncio.wait([main('任务1'),main2('任务2'),main3('任务3')]))
# python3.8
# 可等待对象(task对象)
async def nested():
print('进入IO')
await asyncio.sleep(3)
print('退出IO')
return 42
async def main(name):
print(name,'start')
task_list = [
asyncio.create_task(nested(),name='a'),
asyncio.create_task(nested(),name='b')
]
done, pending = await asyncio.wait(task_list) # done返回完成函数,pending返回超时函数,可添加timeout=1
print(done)
print(pending)
for task in done:
task.result()
asyncio.run(main('任务3'))
# 无主函数调用
async def nested():
print('进入IO')
await asyncio.sleep(3)
print('退出IO')
return 42
task_list = [
nested(),
nested()
]
done, pending = asyncio.run(asyncio.wait(task_list))
print(done)
# 可等待对象(future对象)
# 不常使用,用于等待.
async def f1(future):
await asyncio.sleep(3)
future.set_result('hello')
async def main():
loop = asyncio.get_running_loop()
future = loop.create_future()
loop.create_task(f1(future))
res = await future
print(res)
asyncio.run(main())
# 在不支持异步的情况下使用异步io
# 使用线程池,包装成异步
# 消耗比较多
def f1():
time.sleep(3)
return 'hello'
async def main():
loop = asyncio.get_running_loop()
future = loop.run_in_executor(None,f1)
res = await future
print(res)
asyncio.run(main())
迭代器
# 迭代器
class MyRange(object):
def __init__(self,start,end=None) -> None:
if end:
self.count = start - 1
self.end = end
else:
self.count = -1
self.end = start
def add_count(self):
self.count += 1
if self.count == self.end:
return None
return self.count
def __iter__(self):
return self
def __next__(self):
value = self.add_count()
if value is None:
raise StopIteration
return value
for i in MyRange(10):
print(i)
# 异步迭代器
class MyRange(object):
def __init__(self,start,end=None) -> None:
if end:
self.count = start - 1
self.end = end
else:
self.count = -1
self.end = start
async def add_count(self):
await asyncio.sleep(1)
self.count += 1
if self.count == self.end:
return None
return self.count
def __aiter__(self):
return self
async def __anext__(self):
value = await self.add_count()
if value is None:
raise StopAsyncIteration
return value
async def main():
async for i in MyRange(10):
print(i)
asyncio.run(main())
上下文管理器
# 上下文管理器
class Client(object):
def __init__(self, ip, port) -> None:
self.ip = ip
self.port = port
def recv(self):
pass
def __enter__(self): # 使用with语法才会执行
self.c = socket.socket()
self.c.connect((self.ip, self.port))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.c.close()
with Client('127.0.0.1',8080) as f:
f.recv()
# 异步上下文管理器
class Client(object):
def __init__(self, ip, port) -> None:
self.ip = ip
self.port = port
self.loop = asyncio.get_running_loop() # 异步连接
async def recv(self): # 异步接收数据
data = self.loop.sock_recv(self.c,1024)
return data
async def send(self,data): # 异步发送数据
await self.loop.sock_sendall(self.c, data.encode('utf-8'))
async def __aenter__(self): # 使用with语法才会执行
self.c = socket.socket()
# 异步连接服务端
await self.loop.sock_connect(self.c, (self.ip, self.port))
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.c.close()
async def main():
async with Client('127.0.0.1',8080) as f:
await f.send('abc')
data = await f.recv()
print(data)
asyncio.run(main())

浙公网安备 33010602011771号