celery 5.5.1
celery-types 0.23.0
import functools
from redis import StrictRedis
from celery import Celery
MQ_URL = "amqp://celery:123456@localhost:5672/app"
REDIS_URL = "redis://localhost:6379"
MAX_LIVE_SECONDS = 1.5 * 60 * 60 # 最大推流时长(超过这个时间就自动关闭ffmpeg进程)
app = Celery(__name__, broker=MQ_URL, backend=DB_URL)
app.autodiscover_tasks()
app.conf.broker_connection_retry_on_startup = True
# 同一个 Worker 在执行了大量任务后,会有几率出现内存泄漏的情况。
# 这里建议全局设置 Worker 最大的任务执行数,Worker 在完成了最大的任务执行数后就主动退出。
# https://www.cnblogs.com/jmilkfan-fanguiju/p/10589785.html
app.conf.worker_max_tasks_per_child = 100
app.conf.task_time_limit = 65 * 60 # 全局设置任务超时时间
@functools.cache
def get_sync_redis_client(url=REDIS_URL) -> StrictRedis:
return StrictRedis.from_url(url)
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
# Auto close outdate ffmpeg processes every 1.5 hours
sender.add_periodic_task(MAX_LIVE_SECONDS, close_outdate_ffmpeg.s())
@app.task()
def close_outdate_ffmpeg():
now = datetime.now()
delta = timedelta(seconds=MAX_LIVE_SECONDS)
for p in get_ffmpeg_processes():
nums = re.findall(r"\d+", p.started.split(".")[0])[:6]
start_time = datetime(*map(int, nums)) # type:ignore[arg-type]
if (now - start_time) > delta:
close_process(p.pid, raises=False)