常用脚本

1. 获取两个月前的第一天

import datetime

def get_deadline():
    '''
    获取当前日期两个月以前的第一天
    :return:
    '''
    c_year = now_year = datetime.date.today().year
    c_month = now_month = datetime.date.today().month

    if now_month > 2 and now_month <= 12:
        c_month = now_month - 2

    elif now_month == 1:
        c_month = 11
        c_year = now_year - 1

    elif now_month == 2:
        c_month = 12
        c_year = now_year - 1

    deadline = datetime.date(c_year, c_month, 1)
    d = deadline.strftime('%Y-%m-%d')
    return d


print(get_deadline())

 

2. 开启子进程执行任务

import sys
import logging

def daemon():
    import os
    # create - fork 1
    try:
        pid = os.fork()
        if pid > 0:
            return pid
    except OSError as error:
        logging.error('fork #1 failed: %d (%s)' % (error.errno, error.strerror))
        return -1
    # it separates the son from the father
    os.chdir('/opt/pbx')
    os.setsid()
    os.umask(0)
    # create - fork 2
    try:
        pid = os.fork()
        if pid > 0:
            return pid
    except OSError as error:
        logging.error('fork #2 failed: %d (%s)' % (error.errno, error.strerror))
        return -1
    sys.stdout.flush()
    sys.stderr.flush()
    si = open("/dev/null", 'r')
    so = open("/dev/null", 'ab')
    se = open("/dev/null", 'ab', 0)
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())
    return 0


def main():
    pid = daemon()
    if pid:
        return pid


if __name__ == "__main__":
    main()

 

3.定时器(指定时间执行,指定时间结束)

import datetime
import time
import threading
import logging

START_HOUR = 0  # 时
START_MINUTE = 00  # 分


def run():
    _date = datetime.datetime.now()
    if _date.hour >= 6:
        print('run')


def main():
    '''
    指定时间执行任务
    :return:
    '''
    _date = datetime.datetime.now()
    excute_time_date = '%s %s:%s' % (time.strftime('%Y-%m-%d'), START_HOUR, START_MINUTE)  # 当天的执行时间
    _excute_time = time.strptime(excute_time_date, '%Y-%m-%d %H:%M')  # 当天执行时间的元组时间
    excute_time = time.mktime(_excute_time)  # 当天执行时间的时间戳

    if _date.hour > START_HOUR:  # 当前时间小时大于执行时间小时
        excute_time += 24 * 60 * 60
    elif _date.hour == START_HOUR and _date.minute >= START_MINUTE:  # 当前时间小时等于执行时间小时,分钟大于执行分钟
        excute_time += 24 * 60 * 60

    wait_time = excute_time - time.time()
    logging.info('wait time is[%s]', wait_time)

    timer1 = threading.Timer(wait_time, run)
    timer1.start()


if __name__ == '__main__':
    main()

  

4.设置日志打印到指定文件

import logging


def main():
    logging.info('run...')


if __name__ == '__main__':
    logging.basicConfig(filename="run.log", level=logging.INFO,
                        format="%(asctime)s  [%(filename)s:%(lineno)d] %(message)s",
                        datefmt="%m/%d/%Y %H:%M:%S [%A]")
    main()

Code

  

5.单进程下设置日志打印到指定文件,并且当文件大于20M的时候,自动创建新文件

from logging.handlers import RotatingFileHandler
import logging
import os

def set_log():
    rotateHandler = RotatingFileHandler('run/run.log', "a", 20 * 1024 * 1024, 200)
    rotateHandler.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '[%(asctime)s] [process:%(process)s] [%(filename)s:%(lineno)d]  %(levelname)s %(message)s')
    rotateHandler.setFormatter(formatter)
    log = logging.getLogger()
    log.addHandler(rotateHandler)
    log.setLevel(logging.INFO)


def main():
    if not os.path.exists('run'):
        os.mkdir('run')
    set_log()
    logging.info('start...')


if __name__ == '__main__':
    main()

  

6.多进程下设置日志打印到指定文件,并且当文件大于20M的时候,自动创建新文件

from cloghandler import ConcurrentRotatingFileHandler
import logging
import os

def set_log():
    rotateHandler = ConcurrentRotatingFileHandler('run/run.log', "a", 20 * 1024 * 1024, 200)
    rotateHandler.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '[%(asctime)s] [process:%(process)s] [%(filename)s:%(lineno)d]  %(levelname)s %(message)s')
    rotateHandler.setFormatter(formatter)
    log = logging.getLogger()
    log.addHandler(rotateHandler)
    log.setLevel(logging.INFO)


def main():
    if not os.path.exists('run'):
        os.mkdir('run')
    set_log()
    logging.info('start...')


if __name__ == '__main__':
    main()

  

7.连接数据库pymysql

#!/usr/bin/env python
# -*- coding:utf-8 -*-


import threading
import pymysql
import traceback
import logging



class BaseDB(object):
    '''
    数据库操作
    '''
    def __init__(self):
        self.conn = None
        self.Db_CONFIG ={
            "host" : "192.168.90.123",
            "port": 3306,
            "db": "test1",
            "user": "root",
            "passwd": "123456",
        }

    def connect(self):
        '''
        连接数据库
        :return:
        '''
        try:
            self.conn = pymysql.connect(host=self.DB_CONFIG['host'],
                                        port=self.DB_CONFIG['port'],
                                        db=self.DB_CONFIG['db'],
                                        user=self.DB_CONFIG['user'],
                                        password=self.DB_CONFIG['passwd'],
                                        cursorclass=pymysql.cursors.DictCursor,             # 数据字典格式
                                        autocommit=True,                                   # 自动commit
                                        charset='utf8')
        except Exception as e:
            exe = traceback.format_exc()
            logging.error(exe)

    def reconnect(self):
        '''
        重新连接
        :return:
        '''
        try:
            self.conn.close()
        except Exception as e:
            pass
        finally:
            self.connect()

    def find(self, sql, *args):
        '''
        查询所有匹配项
        :param sql:
        :param args:
        :return:    list[{dict},{dict}] or tuple(空)
        '''
        database = []
        count = 0
        while count < 3:
            try:
                if not self.conn:
                    self.connect()
                with self.conn.cursor() as cursor:
                    cursor.execute(sql, args)
                    database = cursor.fetchall()
                break
            except Exception as e:
                self.reconnect()
                count += 1
                logging.error('error[%s],sql[%s],*args[%s]', e, sql, args)
                continue
        return database

    def query(self, sql, *args):
        '''
        查询第一个匹配项
        :param sql:
        :param args:
        :return:  dict{} or None
        '''
        database = None
        count = 0
        while count < 3:
            try:
                if not self.conn:
                    self.connect()
                with self.conn.cursor() as cursor:
                    cursor.execute(sql, args)
                    database = cursor.fetchone()
                break
            except Exception as e:
                self.reconnect()
                count += 1
                logging.error('error[%s],sql[%s],*args[%s]', e, sql, args)
                continue
        return database

    def __getattr__(self, item):
        if item in ('update','insert','delete'):
            setattr(self,item,self.__execute)
            return getattr(self,item)

    def __execute(self, sql, *args):
        '''
        执行update、insert、delete操作
        :param sql:
        :param args:
        :return:
        '''
        count = 0
        while count < 3:
            try:
                if not self.conn:
                    self.connect()
                with self.conn.cursor() as cursor:
                    cursor.execute(sql, args)
                break
            except Exception as e:
                self.reconnect()
                count += 1
                logging.error('error[%s],sql[%s],*args[%s]', e, sql, args)
                continue
        return

  

8.找到指定目录下的匹配文件(2018-03-04)的绝对路径

import traceback
import datetime
import logging
import os
import re


def execute(root, deadline):
    '''
    获取当前目录下文件(2018-03-14)这种的文件完整路径
    :param file_q:
    :param root:
    :param deadline:
    :return:
    '''
    try:
        _date = datetime.datetime.now()
        if _date.hour >= 6:
            logging.info('now time is [%s] stop', _date)
            return

        for dirpath, dirnames, filenames in os.walk(root):
            for dirname in dirnames:
                work_path = os.path.join(dirpath, dirname)
                logging.info('working path is[%s]', work_path)

                if re.match(r'^(\d{4})\-(0?[1-9]|[1][012])\-(0?[1-9]|[12][0-9]|3[01])$', dirname):
                    if dirname < deadline:
                        logging.info('put path is [%s] queue[%s]', work_path, file_q.qsize())
                        print(work_path)

    except Exception as e:
        exe = traceback.format_exc()
        logging.error(exe)


execute('/home/recode', '2018-06-01')

  

完善的一些脚本

1.删除目录下的指定文件

  用到的相关内容

    1.开启定时器,指定时间执行,指定时间结束

    2.开启子进程处理程序

    3.日志文件达到指定大小,自动生成新文件

    4.日期处理

    5.正则匹配(2018-03-14)

    6.获取目录下的文件绝对路径

 

#!/usr/bin/env python
# -*- coding:utf-8 -*-

from logging.handlers import RotatingFileHandler
from multiprocessing import Queue
import traceback
import datetime
import time
import threading
import logging
import sys
import os
import re

START_HOUR = 0  # 时
START_MINUTE = 00  # 分


def daemon():
    import os
    # create - fork 1
    try:
        pid = os.fork()
        if pid > 0:
            return pid
    except OSError as error:
        logging.error('fork #1 failed: %d (%s)' % (error.errno, error.strerror))
        return -1
    # it separates the son from the father
    os.chdir('/opt/pbx')
    os.setsid()
    os.umask(0)
    # create - fork 2
    try:
        pid = os.fork()
        if pid > 0:
            return pid
    except OSError as error:
        logging.error('fork #2 failed: %d (%s)' % (error.errno, error.strerror))
        return -1
    sys.stdout.flush()
    sys.stderr.flush()
    si = open("/dev/null", 'r')
    so = open("/dev/null", 'ab')
    se = open("/dev/null", 'ab', 0)
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())
    return 0


def set_log():
    rotateHandler = RotatingFileHandler('delrecord/delrecord.log', "a", 20 * 1024 * 1024, 200)
    rotateHandler.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '[%(asctime)s] [process:%(process)s] [%(filename)s:%(lineno)d]  %(levelname)s %(message)s')
    rotateHandler.setFormatter(formatter)
    log = logging.getLogger()
    log.addHandler(rotateHandler)
    log.setLevel(logging.INFO)


def get_deadline():
    '''
    获取清理文件的截止日期
    :return:
    '''
    c_year = now_year = datetime.date.today().year
    c_month = now_month = datetime.date.today().month

    if now_month > 2 and now_month <= 12:
        c_month = now_month - 2

    elif now_month == 1:
        c_month = 11
        c_year = now_year - 1

    elif now_month == 2:
        c_month = 12
        c_year = now_year - 1

    deadline = datetime.date(c_year, c_month, 1)
    d = deadline.strftime('%Y-%m-%d')
    return d


def get_file(dir):
    '''
    获取当前目录下的文件和文件夹
    :param dir:
    :return:
    '''
    files = os.listdir(dir)
    return files


def delete_file():
    '''
    删除文件和空的路径
    :param queue:
    :return:
    '''
    try:
        while True:
            del_path = file_q.get()
            logging.info('get path is[%s] queue[%s]', del_path, file_q.qsize())
            if del_path:
                if os.path.exists(del_path):
                    files = get_file(del_path)
                    for file in files:
                        del_file = os.path.join(del_path, file)
                        logging.info('delete file is [%s]', del_file)
                        if os.path.isdir(del_file):
                            os.rmdir(del_file)
                        else:
                            os.remove(del_file)

                    end_file = get_file(del_path)
                    if not end_file:
                        logging.info('delete path is [%s]', del_path)
                        os.rmdir(del_path)

    except Exception as e:
        exe = traceback.format_exc()
        logging.error(exe)


def execute(root, deadline):
    '''
    获取当前目录下要删除的文件
    :param file_q:
    :param root:
    :param deadline:
    :return:
    '''
    try:
        _date = datetime.datetime.now()
        if _date.hour >= 6:
            logging.info('now time is [%s] stop', _date)
            return

        for dirpath, dirnames, filenames in os.walk(root):
            for dirname in dirnames:
                work_path = os.path.join(dirpath, dirname)
                logging.info('working path is[%s]', work_path)

                if re.match(r'^(\d{4})\-(0?[1-9]|[1][012])\-(0?[1-9]|[12][0-9]|3[01])$', dirname):
                    if dirname < deadline:
                        logging.info('put path is [%s] queue[%s]', work_path, file_q.qsize())
                        file_q.put(work_path)


    except Exception as e:
        exe = traceback.format_exc()
        logging.error(exe)

    finally:
        run()


def run():
    '''
    指定时间执行任务
    :return:
    '''
    _date = datetime.datetime.now()
    excute_time_date = '%s %s:%s' % (time.strftime('%Y-%m-%d'), START_HOUR, START_MINUTE)  # 当天的执行时间
    _excute_time = time.strptime(excute_time_date, '%Y-%m-%d %H:%M')  # 当天执行时间的元组时间
    excute_time = time.mktime(_excute_time)  # 当天执行时间的时间戳

    if _date.hour > START_HOUR:  # 当前时间小时大于执行时间小时
        excute_time += 24 * 60 * 60
    elif _date.hour == START_HOUR and _date.minute >= START_MINUTE:  # 当前时间小时等于执行时间小时,分钟大于执行分钟
        excute_time += 24 * 60 * 60

    wait_time = excute_time - time.time()
    deadline = get_deadline()
    logging.info('current deadline is[%s] wait time is[%s]', deadline, wait_time)

    timer1 = threading.Timer(wait_time, execute, args=(root, deadline))
    timer1.start()


def main():
    if not os.path.exists('delrecord'):
        os.mkdir('delrecord')
    set_log()

    pid = daemon()
    if pid:
        return pid
    logging.info('start delete file...  working directory is [%s]', root)
    run()
    delete_file()


if __name__ == "__main__":
    root = '/home/records'
    file_q = Queue(2000)

    main()

# 完善问题;获取目录下所有文件,一次匹配

  

2.redis测试(简单版)

from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import time
import random
import redis
import traceback
import logging


REDIS_CONFIG = {
    "host": "127.0.0.1",
    # "host": "192.168.90.123",
    "port": 6379,
}

def setLog():
    logging.basicConfig(filename='/tests/test2.log',
                        format='[%(asctime)s-%(filename)s-%(levelname)s:%(message)s]', level=logging.DEBUG,
                        filemode='a', datefmt='%Y-%m-%d %I:%M:%S %p')


class Redis(object):
    '''redis数据库'''

    def __init__(self):
        self.db = None

    def connect(self):
        '''
        连接数据库
        :return:
        '''
        try:
            self.db = redis.Redis(host=REDIS_CONFIG["host"],
                                  port=REDIS_CONFIG["port"],
                                  decode_responses=True
                                  )
        except Exception as e:
            exe = traceback.format_exc()
            logging.error(exe)

    def reconnect(self):
        '''
        重新连接
        :return:
        '''
        try:
            self.db.close()
        except Exception as e:
            pass
        finally:
            self.connect()

    def set(self, name, value):
        '''
        获取主键所有的信息
        :param name:
        :return:{}
        '''
        database = 0
        count = 0
        while count < 3:
            try:
                if not self.db:
                    self.connect()
                database = self.db.set(name,value)
                break
            except Exception as e:
                self.reconnect()
                count += 1
                logging.error('error[%s],name[%s] key[%s]',e,name,value)
                continue
        return database

    def get(self, name):
        '''
        获取主键所有的信息
        :param name:
        :return:{}
        '''
        database = {}
        count = 0
        while count < 3:
            try:
                if not self.db:
                    self.connect()

                database = self.db.get(name)
                break

            except Exception as e:
                self.reconnect()
                count += 1
                logging.error('error[%s],name[%s] key[%s]',e,name)
                continue

        return database


if __name__ == '__main__':
    setLog()
    conn = Redis()
    res_l = []
    l1 = []
    executor = ThreadPoolExecutor()

    for n in range(10000):
        for i in range(1):
            start = time.time()
            future = executor.submit(conn.get, str(random.randint(18200000000, 18224000000)))
            res = future.result()
            end = time.time()
            res_l.append(end - start)

        logging.info('每一次查[%s] 1 个线程 平均值', float(sum(res_l)) / len(res_l))
        l1.append(float(sum(res_l)) / len(res_l))

        time.sleep(1)

    executor.shutdown(wait=True)
    logging.info('100万次查[%s] 1 个线程 平均值', float(sum(l1)) / len(l1))

  

aiohttp相关

   client端

#!/usr/bin/env python
# -*- coding:utf-8 -*-

import json
import aiohttp
import asyncio


async def get(url):
    async with aiohttp.ClientSession() as session:
        # 发起get请求
        async with session.get(url, params={'key': 'value'}, headers={'Content-Type': 'application/x-www-form-urlencoded'}, timeout=1) as response:
            print(response.status)
            text = await response.text()
            print(text, type(text))
            print(await response.content.read())
            return await response.text()

async def post(url):
    async with aiohttp.ClientSession() as session:
        # 发起post请求
        async with session.post(url, data=json.dumps({'key': 'value'}), headers={'Content-Type': 'application/json'}) as response:
            print(response.status)
            print(await response.text())
            return await response.text()


# 获取当前的loop
loop = asyncio.get_event_loop()
# 执行main函数
loop.run_until_complete(get('http://127.0.0.1:8008/'))
# loop.run_until_complete(post('http://127.0.0.1:8008/'))

   server端

#!/usr/bin/env python
# -*- coding:utf-8 -*-

import time
import json
import asyncio
import logging
import traceback
from aiohttp import web


WEB_CONFIG = {'host':'127.0.0.1', 'port': '8008'}


class Request(object):

    async def get(self, request):
        # 表单数据获取
        val = request.query.get('key')
        msg = await request.text()
        print(val)
        print(msg)

        return web.Response(text=json.dumps({'msg': 'get'}), content_type='application/json')

    async def post(self, request):
        # 表单数据获取
        val = await request.post()
        # 获取请求值
        msg = await request.text()
        # 按字节方式获取
        msg2 = await request.read()
        print(val)
        print(msg)
        print(msg2)
        return web.Response(text=json.dumps('post'), content_type='application/json', status=404)


class Application(object):

    def __init__(self):
        self.host = WEB_CONFIG['host']
        self.port = WEB_CONFIG['port']

    async def app_factory(self,loop=None):
        # 配置路由相关信息
        self.routes = [
            web.get('/', Request().get),
            web.post('/', Request().post)
        ]
        # 实例化app
        app = web.Application()
        # 路由添加到app中
        app.add_routes(self.routes)
        # loop和queue配置到app中
        app['loop'] = loop
        return app

    def run_forever(self,loop=None):
        # 开启web服务
        web.run_app(self.app_factory(loop), host=self.host, port=self.port)



loop = asyncio.get_event_loop()
Application().run_forever(loop)

   

aiomysql相关

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import asyncio
import aiomysql
import logging
import traceback


class AsyncDB(object):
    '''
    异步连接mysql
    '''

    def __init__(self):
        # 连接池
        self.pool = None
        self.DB_CONFIG = {
            "host": "127.0.0.1",
            "port" : 3306,
            "user" : "root",
            "passwd" : "123456",
        }

    async def get(self, sql, args=None):
        try:
            if not self.pool:
                await self.connection()
            async with self.pool.acquire() as conn:
                async with conn.cursor(aiomysql.DictCursor) as cur:
                    await cur.execute(sql, args)
                    value = await cur.fetchone()
                    return value
        except Exception as e:
            exe = traceback.format_exc()
            logging.error(exe)
            return e

    async def query(self, sql, args=None):
        try:
            if not self.pool:
                await self.connection()
            async with self.pool.acquire() as conn:
                async with conn.cursor(aiomysql.DictCursor) as cur:
                    await cur.execute(sql, args)
                    value = await cur.fetchall()
                    return value
        except Exception as e:
            exe = traceback.format_exc()
            logging.error(exe)
            return e

    async def insert(self, sql, args=None):
        try:
            if not self.pool:
                await self.connection()
            async with self.pool.acquire() as conn:
                async with conn.cursor(aiomysql.DictCursor) as cur:
                    ret = await cur.execute(sql, args)
                    if ret > 0:
                        return cur.lastrowid
                    return -1
        except Exception as e:
            exe = traceback.format_exc()
            logging.error(exe)
            return e.args

    async def execute(self, sql, args=None):
        try:
            if not self.pool:
                await self.connection()
            async with self.pool.acquire() as conn:
                async with conn.cursor(aiomysql.DictCursor) as cur:
                    ret = await cur.execute(sql, args)
                    return ret
        except Exception as e:
            exe = traceback.format_exc()
            logging.error(exe)
            return e


    async def connection(self):
        self.pool = await aiomysql.create_pool(host=self.DB_CONFIG["host"], port=self.DB_CONFIG["port"], user=self.DB_CONFIG["user"],
                                          password=self.DB_CONFIG["passwd"], db=self.DB_CONFIG["db"],
                                          loop=loop, autocommit=True, pool_recycle=1)


async def main():
    sql = "SELECT * FROM test.tx_table;"
    dh = await AsyncDB().query(sql)
    print(dh)

loop = asyncio.get_event_loop()
loop.run_until_complete(main())

 

aiopika相关

  消费者

import time
import logging
import traceback
import asyncio
import aio_pika
from aio_pika import ExchangeType

# 消费者

RABBITMQ_CONFIG={
    "host": "127.0.0.1",
    "port": 5672,
    "vhost": "/",
    "user": "guest",
    "passwd": "guest",
    "serverid": "aiopika00",
    "exchange": "exchange",
}


class RabbitMQ(object):
    def __init__(self):
        self.connection = None
        self.channel = None
        self.config = RABBITMQ_CONFIG

    async def reconnect(self, loop):
        if self.connection and not self.connection.is_closed:
            await self.connection.close()

        self.connection = await aio_pika.connect_robust(
            "amqp://{}:{}@{}/".format(self.config['user'], self.config['passwd'], self.config['host']), loop=loop)
        self.channel = await self.connection.channel()
        # 创建交换机
        self.recv_exchange = await self.channel.declare_exchange(self.config['exchange'], ExchangeType.DIRECT)
        # 创建queue
        self.queue = await self.channel.declare_queue('queue_%s' % (self.config['serverid']), exclusive=True)
        # queue和exchange绑定
        await self.queue.bind(exchange=self.recv_exchange, routing_key=self.config['serverid'])
        # 监听queue的消息
        await self.queue.consume(self.receive)

    async def receive(self, message: aio_pika.IncomingMessage):
        try:
            with message.process():
                print('recv...', message.body)
                await self.master_queue.put(message.body)

        except Exception as e:
            exc = traceback.format_exc()
            logging.error(exc)

    async def start_server(self, loop):
        try:
            logging.info("start rabbitmq server")
            await self.reconnect(loop)
        except Exception as e:
            exc = traceback.format_exc()
            logging.error(exc)

        await asyncio.sleep(2)

    @classmethod
    async def run(cls, master_queue, loop):
        self = cls()
        self.master_queue = master_queue
        await self.start_server(loop)


async def get():
    print('get msg from queue')
    while True:
        data = await queue.get()
        if data:
            print('queue msg is...', data)


if __name__ == "__main__":
    loop = asyncio.get_event_loop()
    queue = asyncio.Queue()
    loop.create_task(RabbitMQ.run(queue, loop))
    loop.create_task(get())
    loop.run_forever()

   发布者

import logging
import traceback
import asyncio
import aio_pika
import time
from aio_pika import Message
from aio_pika import DeliveryMode
from aio_pika import ExchangeType


RABBITMQ_CONFIG={
    "host": "127.0.0.1",
    "port": 5672,
    "vhost": "/",
    "user": "guest",
    "passwd": "guest",
    "serverid": "aiopika00",
    "exchange": "exchange",
}


class RabbitMQ(object):
    def __init__(self):
        self.connection = None
        self.channel = None
        self.config = RABBITMQ_CONFIG

    async def reconnect(self, loop):
        if self.connection and not self.connection.is_closed:
            await self.connection.close()

        self.connection = await aio_pika.connect_robust(
            "amqp://{}:{}@{}/".format(self.config['user'], self.config['passwd'], self.config['host']), loop=loop)
        self.channel = await self.connection.channel()

        self.send_exchange = await self.channel.declare_exchange(self.config['exchange'], ExchangeType.DIRECT)

    async def send(self, loop):
        if not self.connection or not self.channel or not self.send_exchange:
            await self.reconnect(loop)

        while True:
            data = 'aha'
            message = Message(data.encode(), delivery_mode=DeliveryMode.PERSISTENT)
            print('send')
            await self.send_exchange.publish(message, routing_key=self.config['serverid'])
            # time.sleep(2)

    @classmethod
    async def run(cls, loop):
        """
        start rabbitmq server
        :return:
        """
        try:
            logging.info("start rabbitmq server")
            self = cls()
            await self.send(loop)
            print(111)
        except Exception as e:
            exc = traceback.format_exc()
            logging.error(exc)
        await asyncio.sleep(2)



if __name__ == "__main__":
    loop = asyncio.get_event_loop()
    loop.create_task(RabbitMQ.run(loop))
    loop.run_forever()

 

aioredis相关

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import asyncio
import aioredis


REDIS_CONFIG = {
    "host": "127.0.0.1",
    "port": 6379,
}


class Redis():
    def __init__(self):
        self.loop = asyncio.get_event_loop()
        self.pool = None

    async def create_conn(self):
        if not self.pool:
            self.pool = await aioredis.create_redis_pool((REDIS_CONFIG["host"], REDIS_CONFIG["port"]), encoding='utf-8', loop=self.loop)

    async def set(self, key, value):
        await self.create_conn()
        with await self.pool as redis:
            await redis.set(key, value)

    async def get(self, key):
        await self.create_conn()
        with await self.pool as redis:
            value = await redis.get(key)
        return value

    async def hset(self, key, field, value):
        await self.create_conn()
        with await self.pool as redis:
            await redis.hset( key, field, value)

    async def hget(self, key, field):
        await self.create_conn()
        with await self.pool as redis:
            value = await redis.hget(key, field)
            print(value)
        return value

    async def hmset(self, key, kwargs):
        await self.create_conn()
        with await self.pool as redis:
            for field, value in kwargs.items():
                await redis.hmset(key, field, value)

    async def hgetall(self, key):
        await self.create_conn()
        with await self.pool as redis:
            value = await redis.hgetall(key)
            print(value)
        return value

    async def hincry(self, key, field, value):
        await self.create_conn()
        with await self.pool as redis:
            await redis.hincry( key, field, value)

    async def delete(self, key):
        await self.create_conn()
        with await self.pool as redis:
            value = await redis.delete( key)
        return value

if __name__ == '__main__':
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.create_task(Redis().hget('test', 'serverid'))
    loop.create_task(Redis().hgetall('test'))
    loop.run_forever()

 

posted @ 2018-09-30 14:17  chitalu  阅读(252)  评论(0)    收藏  举报