python-进程四(共享与通信)

前言:进程之间内存资源都是相互独立的,互不影响和干涉。

那么如何让进程能共享资源或通信呢?python中提供了一些方法,代码如下

  1)共享队列

# 进程同步1:使用Queue实现
# 消费者与生产者模式
from multiprocessing import Process, Queue
import time, random


def write(q):
    for value in ['A', 'B', 'C']:
        print('Put %s to queue...'% value)
        q.put(value)
        time.sleep(random.random())


def read(q):
    time.sleep(1)
    while not q.empty():
        print('Get %s from queue.' % q.get(True))
        time.sleep(1)


if __name__=='__main__':
    q = Queue()
    pw = Process(target=write, args=(q,))
    pr = Process(target=write, args=(q,))
    pw.start()
    pr.start()
    pw.join()
    pr.join()
    print('Done!')

  2)JoinableQueue

import multiprocessing
import time

# 继承了Process,必须要实现run方法,因为这是start调用的方法
# multiprocessing.JoinableQueue(),这也是进程里面的队列,
# 先进先出,取出来的对象是个任务对象(函数或者是一个对象),
# 在执行这个队列的时候,需要告诉这个队列当前任务执行完了,需要调用一下task_done()
# 如果队列里面的所有任务都被执行完了,且每一个任务都调用了task_done()方法,认为此任务的所有任务被执行完了
# 最后有几个进程,需要给这个队列里面加入几个None,作为任务结束的标志


class Consumer(multiprocessing.Process):  # 消费者的类:参数1是任务队列,参数2为结果队列
    # 派生进程
    def __init__(self, task_queue, result_queue):
        super().__init__()
        # multiprocessing.Process.__init__(self)
        self.task_queue = task_queue
        self.result_queue = result_queue

    # 重写原进程的run方法
    def run(self):
        proc_name = self.name  # 获取进程名字
        while True:
            next_task = self.task_queue.get()  # 获取任务队列里面的任务对象
            if next_task is None:
                # Poison pill means shutdown
                print(('%s: Exiting' % proc_name))
                self.task_queue.task_done()
                break
            print(('%s: %s' % (proc_name, next_task)))
            answer = next_task()  # __call__()
            self.task_queue.task_done()
            self.result_queue.put(answer)
        return


class Task(object):  # 进程执行的任务,返回了一个表达式的字符串

    def __init__(self, a, b):
        self.a = a
        self.b = b

    def __call__(self):
        time.sleep(0.1)  # pretend to take some time to do the work
        return '%s * %s = %s' % (self.a, self.b, self.a * self.b)

    def __str__(self):
        return '%s * %s' % (self.a, self.b)


if __name__ == '__main__':
    # 新建的一个任务队列
    tasks = multiprocessing.JoinableQueue()
    # 普通的跨进程队列
    results = multiprocessing.Queue()
    # 计算机器cpu的核数
    num_consumers = multiprocessing.cpu_count()
    print(('Creating %d consumers' % num_consumers))
    # 创建cup核数量个的子进程
    consumers = [Consumer(tasks, results) for i in range(num_consumers)]
    # 依次启动子进程
    for w in consumers:
        w.start()
    num_jobs = 10
    for i in range(num_jobs):
        tasks.put(Task(i, i))
    # 毒丸:有几个进程,放几个None
    # 进程执行的是run方法, 里面有死循环, 只有遇到None才会跳出死循环,结束任务
    for i in range(num_consumers):
        tasks.put(None)
    # 等待所有的任务执行完
    tasks.join()
    # 从结果队列中打印任务的执行结果
    while num_jobs:
        result = results.get()
        print('Result: %s' % result)
        num_jobs -= 1

 

  3)信号传递-Event

import multiprocessing
import time


def wait_for_event(e):
    print('wait_for_event: starting')
    e.wait()  # 等待收到能执行信号,如果一直未收到将一直阻塞
    print('wait_for_event: e.is_set()->', e.is_set())


def wait_for_event_timeout(e, t):
    print('wait_for_event_timeout: starting')
    e.wait(t)  # 等待t秒超时,此时Event的状态仍未未设置,继续执行
    print('wait_for_event_timeout: e.is_set()->', e.is_set())
    e.set()  # 初始内部标志为真


if __name__ == '__main__':
    e = multiprocessing.Event()  # 创建信号
    print("begin,e.is_set()", e.is_set())
    w1 = multiprocessing.Process(name='block', target=wait_for_event, args=(e,))
    w1.start()
    w2 = multiprocessing.Process(name='nonblock', target=wait_for_event_timeout, args=(e, 2))  # 可将2改为5,看看执行结果
    w2.start()
    print('main: waiting before calling Event.set()')
    time.sleep(3)
    # e.set() #可注释此句话看效果
    print('main: event is set')
    
结果:
begin,e.is_set() False
main: waiting before calling Event.set()
wait_for_event_timeout: starting
wait_for_event: starting
wait_for_event_timeout: e.is_set()-> False
wait_for_event: e.is_set()-> True
main: event is set

 

  4)管道-Pipe

# 进程间信息传递(使用管道-Pipe,只能两个进程间的一对一,不能一对多,多对一,而且send和recv也是只能一对一)
import multiprocessing as mp


def proc_1(pipe):
    pipe.send('hello')
    print('proc_1 received: %s' %pipe.recv())
    pipe.send("what is your name?")
    print('proc_1 received: %s' %pipe.recv())


def proc_2(pipe):
    print('proc_2 received: %s' %pipe.recv())
    pipe.send('hello, too')
    print('proc_2 received: %s' %pipe.recv())
    pipe.send("I don't tell you!")


if __name__ == '__main__':
    # 创建一个管道对象pipe
    pipe = mp.Pipe()
    print(len(pipe))
    print(type(pipe))
    # 将第一个pipe对象传给进程1
    p1 = mp.Process(target=proc_1, args=(pipe[0], ))
    # 将第二个pipe对象传给进程2
    p2 = mp.Process(target=proc_2, args=(pipe[1], ))
    p2.start()
    p1.start()
    p2.join()
    p1.join()
    
结果:
2
<class 'tuple'>
proc_2 received: hello
proc_1 received: hello, too
proc_2 received: what is your name?
proc_1 received: I don't tell you!

  5)Condition

# 进程间的通信(使用Condition:notify_all与wait,这个可以一对多,就是可以通知和多个wait状态的进程)

import multiprocessing as mp
import time


def consumer(cond):  # 消费者函数
    with cond: # 获得一个底层锁,获得了锁才能调用wait和notify_all() 或 notify()
        print("consumer before wait")
        cond.wait()  # 等待消费
        print("consumer after wait")


def producer(cond):  # 生产者函数
    with cond:
        print("producer before notifyAll")
        cond.notify_all()  # 通知消费者可以消费了
        print("producer after notifyAll")


if __name__ == '__main__':
    condition = mp.Condition()
    p1 = mp.Process(name="p1", target=consumer, args=(condition,))
    p2 = mp.Process(name="p2", target=consumer, args=(condition,))
    p3 = mp.Process(name="p3", target=producer, args=(condition,))
    p1.start()
    time.sleep(2)
    p2.start()
    time.sleep(2)
    p3.start()

  6)共享数字、队列

from multiprocessing import Process, Value, Array


def f(n, a):
    n.value = 3.1415927
    print('子进程改变的n', n)
    for i in range(len(a)):
        a[i] = -a[i]
        print('子进程改变的元素: ', a[i])


if __name__ == '__main__':
    num = Value('d', 0.0)  # 创建一个进程间共享的数字类型,默认值为0,d 的类型为双精度小数
    arr = Array('i', range(10))  # 创建一个进程间共享的数组类型,初始值为range[10],i是指整型
    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()
    print(num.value)  # 获取共享变量num的值
    print(arr[:])
结果:
子进程改变的n 3.1415927
子进程改变的元素:  0
子进程改变的元素:  -1
子进程改变的元素:  -2
子进程改变的元素:  -3
子进程改变的元素:  -4
子进程改变的元素:  -5
子进程改变的元素:  -6
子进程改变的元素:  -7
子进程改变的元素:  -8
子进程改变的元素:  -9
3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

# 结论:Value, Array 使 数字和序列可以共享

  6)Manager共享模块

# 共享字符串
from multiprocessing import Process, Manager
from ctypes import c_char_p


def greet(shareStr):
    shareStr.value = shareStr.value + ", World!"


if __name__ == '__main__':
    manager = Manager()
    shareStr = manager.Value(c_char_p, "Hello")  # 字符串共享
    process = Process(target=greet, args=(shareStr,))
    process.start()
    process.join()
    print(shareStr.value)
    
结果:
Hello, World!

# 用Manager共享列表和字典的操作

from multiprocessing import Process, Manager


def f( shareDict, shareList ):
    shareDict[1] = '1'
    shareDict['2'] = 2
    shareDict[0.25] = None
    shareList.reverse()  # 翻转列表


if __name__ == '__main__':
    manager = Manager()
    shareDict = manager.dict()  # 创建共享的字典类型
    shareList = manager.list(range(10))  # 创建共享的列表类型
    p = Process(target=f, args=(shareDict, shareList))
    p.start()
    p.join()
    print(shareDict)
    print(shareList)
    
结果:
{1: '1', '2': 2, 0.25: None}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]


# Manager 实现的进程池的队列,
请注意:from multiprocessing import Queue无法在进程池中实现共享,只能在Process类生成的进程间可以共享。

from multiprocessing import Pool,Manager


def func(q):
    print("*"*10)
    q.put("12346")


if __name__ == "__main__":
    manager = Manager()
    q = manager.Queue()
    pool = Pool(processes=4)
    for i in range(5):
        pool.apply_async(func,(q,))
    pool.close()
    pool.join()
    print(q.qsize())
    
结果:
**********
**********
**********
**********
**********
5

  6)共享实例对象

# 类实例在不同进程中的共享
import time, os
from multiprocessing import Pool, Lock
from multiprocessing.managers import BaseManager


class MyManager(BaseManager):  # 生产了一个BaseManager的子类
    pass


def Manager():  # 生成一个MyManager的实例
    m = MyManager()
    m.start()
    return m


class Counter:  # 在不同进程中共享的实例的类
    def __init__(self, value):
        self.val = value
        self.lock = Lock()

    def increment(self):  # 使用锁来累加1
        with self.lock:
            self.val += 1

    def value(self):  # 使用锁来读取数据
        with self.lock:
            return self.val


MyManager.register('Counter', Counter)  # 将Counter类注册到MyManager的类里面


def long_time_task(name, counter):  # 进程的任务函数
    time.sleep(0.2)
    print('Run task %s (%s)...\n' % (name, os.getpid()))
    start = time.time()
    for i in range(50):
        time.sleep(0.01)
        counter.increment()
    end = time.time()
    print('Task %s runs %0.2f seconds.' % (name, (end - start)))


if __name__ == '__main__':
    manager = Manager()
    counter = manager.Counter(0)  # 创建全局共享的Counter类实例,Counter类的初始值0
    print('Parent process %s.' % os.getpid())
    p = Pool()
    for i in range(5):
        p.apply_async(long_time_task, args=(str(i), counter))
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()
    print('All subprocesses done.')
    print(counter.value())
    
结果:
Parent process 93864.
Waiting for all subprocesses done...
Run task 0 (94144)...

Run task 1 (50392)...

Run task 3 (87032)...

Run task 2 (91040)...

Run task 4 (88856)...

Task 0 runs 0.54 seconds.
Task 3 runs 0.54 seconds.
Task 1 runs 0.55 seconds.
Task 4 runs 0.54 seconds.
Task 2 runs 0.54 seconds.
All subprocesses done.
250

 

posted @ 2020-03-27 21:08  海澜时见鲸  阅读(403)  评论(0)    收藏  举报