langfuse从v2.70.1升级到V3.110(异机升级+数据迁移)

环境:
Os:Centos 7
langfuse:
升级前版本:V2.70.1
升级后版本:V3.110

说明:
v3部署在新机器,我们需要将v2下的postgresql外挂的数据目录文件拷贝到v3下的外挂数据目录
说明:yaml资源清单文件直接下载,尽量不要复制粘贴,避免不必要的错误.

 

1.新机器准备langfuse部署目录
[root@kvm-181 home]# mkdir -p /home/middle/langfuse/langfuse
下面下载的yaml文件就保存在该目录

下载地址:
https://github.com/langfuse/langfuse/blob/v3.110.0/docker-compose.yml

[root@host135 langfuse]# ls -al
total 8
drwxr-xr-x 2 root root 32 May 20 09:11 .
drwxr-xr-x 3 root root 22 May 20 09:11 ..
-rw-r--r-- 1 root root 7294 May 20 09:03 docker-compose.yml
[root@host135 langfuse]# pwd
/home/middle/langfuse/langfuse


需要进行修改,修改的地方:镜像地址(docker hub无法访问)和外挂目录,最后修改后的文件内容如下:

 

[root@host135 langfuse]# more docker-compose.yml 
# Make sure to update the credential placeholders with your own secrets.
# We mark them with # CHANGEME in the file below.
# In addition, we recommend to restrict inbound traffic on the host to langfuse-web (port 3000) and minio (port 9090) only.
# All other components are bound to localhost (127.0.0.1) to only accept connections from the local machine.
# External connections from other machines will not be able to reach these services directly.
services:
  langfuse-worker:
    image: registry.cn-shenzhen.aliyuncs.com/hxlk8s/langfuse-worker:3.110
    restart: always
    depends_on: &langfuse-depends-on
      postgres:
        condition: service_healthy
      minio:
        condition: service_healthy
      redis:
        condition: service_healthy
      clickhouse:
        condition: service_healthy
    ports:
      - 127.0.0.1:3030:3030
    environment: &langfuse-worker-env
      DATABASE_URL: postgresql://postgres:postgres@postgres:5432/postgres # CHANGEME
      SALT: "mysalt" # CHANGEME
      ENCRYPTION_KEY: "0000000000000000000000000000000000000000000000000000000000000000" # CHANGEME: generate via `openssl rand -hex 
32`
      TELEMETRY_ENABLED: ${TELEMETRY_ENABLED:-true}
      LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: ${LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES:-true}
      CLICKHOUSE_MIGRATION_URL: ${CLICKHOUSE_MIGRATION_URL:-clickhouse://clickhouse:9000}
      CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://clickhouse:8123}
      CLICKHOUSE_USER: ${CLICKHOUSE_USER:-clickhouse}
      CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD:-clickhouse} # CHANGEME
      CLICKHOUSE_CLUSTER_ENABLED: ${CLICKHOUSE_CLUSTER_ENABLED:-false}
      LANGFUSE_S3_EVENT_UPLOAD_BUCKET: ${LANGFUSE_S3_EVENT_UPLOAD_BUCKET:-langfuse}
      LANGFUSE_S3_EVENT_UPLOAD_REGION: ${LANGFUSE_S3_EVENT_UPLOAD_REGION:-auto}
      LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID:-minio}
      LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: ${LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY:-miniosecret} # CHANGEME
      LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: ${LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT:-http://minio:9000}
      LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE:-true}
      LANGFUSE_S3_EVENT_UPLOAD_PREFIX: ${LANGFUSE_S3_EVENT_UPLOAD_PREFIX:-events/}
      LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: ${LANGFUSE_S3_MEDIA_UPLOAD_BUCKET:-langfuse}
      LANGFUSE_S3_MEDIA_UPLOAD_REGION: ${LANGFUSE_S3_MEDIA_UPLOAD_REGION:-auto}
      LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID:-minio}
      LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: ${LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY:-miniosecret} # CHANGEME
      LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: ${LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT:-http://localhost:9090}
      LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE:-true}
      LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: ${LANGFUSE_S3_MEDIA_UPLOAD_PREFIX:-media/}
      LANGFUSE_S3_BATCH_EXPORT_ENABLED: ${LANGFUSE_S3_BATCH_EXPORT_ENABLED:-false}
      LANGFUSE_S3_BATCH_EXPORT_BUCKET: ${LANGFUSE_S3_BATCH_EXPORT_BUCKET:-langfuse}
      LANGFUSE_S3_BATCH_EXPORT_PREFIX: ${LANGFUSE_S3_BATCH_EXPORT_PREFIX:-exports/}
      LANGFUSE_S3_BATCH_EXPORT_REGION: ${LANGFUSE_S3_BATCH_EXPORT_REGION:-auto}
      LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_ENDPOINT:-http://minio:9000}
      LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT:-http://localhost:9090}
      LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: ${LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID:-minio}
      LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: ${LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY:-miniosecret} # CHANGEME
      LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: ${LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE:-true}
      LANGFUSE_INGESTION_QUEUE_DELAY_MS: ${LANGFUSE_INGESTION_QUEUE_DELAY_MS:-}
      LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: ${LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS:-}
      REDIS_HOST: ${REDIS_HOST:-redis}
      REDIS_PORT: ${REDIS_PORT:-6379}
      REDIS_AUTH: ${REDIS_AUTH:-myredissecret} # CHANGEME
      REDIS_TLS_ENABLED: ${REDIS_TLS_ENABLED:-false}
      REDIS_TLS_CA: ${REDIS_TLS_CA:-/certs/ca.crt}
      REDIS_TLS_CERT: ${REDIS_TLS_CERT:-/certs/redis.crt}
      REDIS_TLS_KEY: ${REDIS_TLS_KEY:-/certs/redis.key}

  langfuse-web:
    image: registry.cn-shenzhen.aliyuncs.com/hxlk8s/langfuse:3.110
    restart: always
    depends_on: *langfuse-depends-on
    ports:
      - 3000:3000
    environment:
      <<: *langfuse-worker-env
      NEXTAUTH_URL: http://localhost:3000
      NEXTAUTH_SECRET: mysecret # CHANGEME
      LANGFUSE_INIT_ORG_ID: ${LANGFUSE_INIT_ORG_ID:-}
      LANGFUSE_INIT_ORG_NAME: ${LANGFUSE_INIT_ORG_NAME:-}
      LANGFUSE_INIT_PROJECT_ID: ${LANGFUSE_INIT_PROJECT_ID:-}
      LANGFUSE_INIT_PROJECT_NAME: ${LANGFUSE_INIT_PROJECT_NAME:-}
      LANGFUSE_INIT_PROJECT_PUBLIC_KEY: ${LANGFUSE_INIT_PROJECT_PUBLIC_KEY:-}
      LANGFUSE_INIT_PROJECT_SECRET_KEY: ${LANGFUSE_INIT_PROJECT_SECRET_KEY:-}
      LANGFUSE_INIT_USER_EMAIL: ${LANGFUSE_INIT_USER_EMAIL:-}
      LANGFUSE_INIT_USER_NAME: ${LANGFUSE_INIT_USER_NAME:-}
      LANGFUSE_INIT_USER_PASSWORD: ${LANGFUSE_INIT_USER_PASSWORD:-}

  clickhouse:
    image: registry.cn-shenzhen.aliyuncs.com/hxlk8s/clickhouse-server:24.8
    restart: always
    ##user: "101:101"
    environment:
      CLICKHOUSE_DB: default
      CLICKHOUSE_USER: clickhouse
      CLICKHOUSE_PASSWORD: clickhouse # CHANGEME
    volumes:
      - /home/middle/langfuse/clickhouse/data:/var/lib/clickhouse
      - /home/middle/langfuse/clickhouse/logs:/var/log/clickhouse-server
      ##- /home/middle/langfuse/clickhouse/conf/config.xml:/etc/clickhouse-server/config.xml
      ##- /home/middle/langfuse/clickhouse/conf/users.xml:/etc/clickhouse-server/users.xml
    ports:
      - 127.0.0.1:8123:8123
      - 127.0.0.1:9000:9000
    healthcheck:
      test: wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1
      interval: 5s
      timeout: 5s
      retries: 10
      start_period: 1s

  minio:
    image: registry.cn-shenzhen.aliyuncs.com/hxlk8s/minio:RELEASE.2025-04-22T22-12-26Z
    restart: always
    entrypoint: sh
    # create the 'langfuse' bucket before starting the service
    command: -c 'mkdir -p /data/langfuse && minio server --address ":9000" --console-address ":9001" /data'
    environment:
      MINIO_ROOT_USER: minio
      MINIO_ROOT_PASSWORD: miniosecret # CHANGEME
    ports:
      - 9090:9000
      - 9091:9001
    volumes:
      - /home/middle/langfuse/minio_data:/data
    healthcheck:
      test: ["CMD", "mc", "ready", "local"]
      interval: 1s
      timeout: 5s
      retries: 5
      start_period: 1s

  redis:
    image: registry.cn-shenzhen.aliyuncs.com/hxlk8s/redis:7.4
    restart: always
    # CHANGEME: row below to secure redis password
    command: >
      --requirepass ${REDIS_AUTH:-myredissecret}
    ports:
      - 127.0.0.1:6379:6379
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 3s
      timeout: 10s
      retries: 10

  postgres:
    image: registry.cn-shenzhen.aliyuncs.com/hxlk8s/postgresql:16.4.0
    restart: always
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U postgres"]
      interval: 3s
      timeout: 3s
      retries: 10
    environment:
      POSTGRES_USER: postgres
      POSTGRES_PASSWORD: postgres # CHANGEME
      POSTGRES_DB: postgres
    ports:
      - 5432:5432
    volumes:
      - /home/middle/langfuse/pgdata:/var/lib/postgresql/data

 

 

说明:
a.使用可以访问的镜像,默认的dockerhub无法访问;
b.clickhouse,pg数据目录和日志目录要外挂到缩主机;
c.minio尽量使用新的版本,旧版本minio:RELEASE.2024-12-18T13-15-44Z发现无法使用;
d.minio的外放端口去掉127.0.0.1(原来是:- 127.0.0.1:9091:9001,修改为- 9091:9001),开发所有机器可访问,否则客户端浏览器无法使用minio
e.clickhouse环境变量去掉##user: "101:101",否则会创建临时目录会提示权限不足

f.pg的5432端口要全部开放,否则无法备份

 

3.v3版本机器上创建配置文件外挂出来的目录
mkdir -p /home/middle/langfuse/pgdata
mkdir -p /home/middle/langfuse/minio_data
mkdir -p /home/middle/langfuse/clickhouse/data
mkdir -p /home/middle/langfuse/clickhouse/logs


4.尝试看是否能启动
[root@host135 langfuse]# cd /home/middle/langfuse/langfuse
[root@host135 langfuse]# docker compose up -d
第一次启动会拉取镜像,镜像比较多,需要等几分钟

说明:
出现无法某些组件无法启动的时候需要等一会,等其他组件都启动了,再逐一启动下面的两个组件
docker compose start langfuse-worker
docker compose start langfuse-web ##要启动很久,因为需要修改表结构

 

5.登录各组件访问
langfuse
http://192.168.1.135:3000

redis
[root@localhost secure_file]# /usr/local/services/redis/bin/redis-cli -h 192.168.1.135 -p 6379 -a myredissecret
Warning: Using a password with '-a' option on the command line interface may not be safe.
Could not connect to Redis at 192.168.1.181:6379: Connection refused
Could not connect to Redis at 192.168.1.181:6379: Connection refused
远程还登录不了,尝试在本机登录,即使是本机也不要写ip,直接写127.0.0.1 或是localhost
将redis客户端拷贝到服务器
[root@localhost services]# scp -r redis root@192.168.1.181:/usr/local/services/

然后进行登录
[root@kvm-181 soft]# /usr/local/services/redis/bin/redis-cli -h 127.0.0.1 -p 6379 -a myredissecret
Warning: Using a password with '-a' option on the command line interface may not be safe.
这里的-a 密码是在yaml文件里定义的

发现docker 运行的是没有配置文件的
192.168.1.135:6379> config set maxmemory-policy allkeys-lfu
OK
192.168.1.135:6379> config rewrite
(error) ERR The server is running without a config file


clickhoue
安装clickhouse客户端,版本需要与服务器版本一致
安装步骤参考如下链接:
https://www.cnblogs.com/hxlasky/p/18674814
[root@kvm-181 clickhouse_client]# which clickhouse-client
/usr/bin/clickhouse-client
[root@host135 langfuse]# clickhouse-client -h 127.0.0.1 -m -u clickhouse --password clickhouse --port=9000
ClickHouse client version 24.8.11.5 (official build).
Connecting to 127.0.0.1:9000 as user clickhouse.
Connected to ClickHouse server version 24.8.11.

ce106a2c8013 :) show databases;

SHOW DATABASES

Query id: e53f9a2f-8213-426d-8add-cce5591dd8b3

┌─name───────────────┐
1. │ INFORMATION_SCHEMA │
2. │ default │
3. │ information_schema │
4. │ system │
└────────────────────┘

4 rows in set. Elapsed: 0.008 sec.

ce106a2c8013 :) use default;
ce106a2c8013 :) show tables;

SHOW TABLES

Query id: e8e30b77-86fc-4daa-88e7-ff51bf037141

┌─name─────────────────────────────────┐
1. │ analytics_observations │
2. │ analytics_scores │
3. │ analytics_traces │
4. │ blob_storage_file_log │
5. │ dataset_run_items │
6. │ dataset_run_items_rmt │
7. │ event_log │
8. │ observations │
9. │ project_environments │
10. │ project_environments_observations_mv │
11. │ project_environments_scores_mv │
12. │ project_environments_traces_mv │
13. │ schema_migrations │
14. │ scores │
15. │ traces │
16. │ traces_30d_amt │
17. │ traces_30d_amt_mv │
18. │ traces_7d_amt │
19. │ traces_7d_amt_mv │
20. │ traces_all_amt │
21. │ traces_all_amt_mv │
22. │ traces_null │
└──────────────────────────────────────┘

22 rows in set. Elapsed: 0.007 sec.

 

 

postgresql
需要安装postgresql客户端,版本需要与服务器版本一致
部署链接如下:
https://www.cnblogs.com/hxlasky/p/18548402

密码也是:postgresql
[root@host135 bin]# /opt/pg16/bin/psql -h 127.0.0.1 -U postgres -p5432
[root@host135 langfuse]# /opt/pg16/bin/psql -h 127.0.0.1 -U postgres -p5432
Password for user postgres:
psql (16.4)
Type "help" for help.

postgres=# \dt
List of relations
Schema | Name | Type | Owner
--------+------------------------------+-------+----------
public | Account | table | postgres
public | Session | table | postgres
public | _prisma_migrations | table | postgres
public | actions | table | postgres
public | annotation_queue_assignments | table | postgres
public | annotation_queue_items | table | postgres
public | annotation_queues | table | postgres
public | api_keys | table | postgres
public | audit_logs | table | postgres
public | automation_executions | table | postgres
public | automations | table | postgres
public | background_migrations | table | postgres
public | batch_exports | table | postgres
public | billing_meter_backups | table | postgres
public | blob_storage_integrations | table | postgres
public | comments | table | postgres
public | cron_jobs | table | postgres
public | dashboard_widgets | table | postgres
public | dashboards | table | postgres
public | dataset_items | table | postgres
public | dataset_run_items | table | postgres
public | dataset_runs | table | postgres
public | datasets | table | postgres
public | default_llm_models | table | postgres
public | eval_templates | table | postgres
public | job_configurations | table | postgres
public | job_executions | table | postgres
public | llm_api_keys | table | postgres
public | llm_schemas | table | postgres
public | llm_tools | table | postgres
public | media | table | postgres
public | membership_invitations | table | postgres
public | models | table | postgres
public | observation_media | table | postgres
public | observations | table | postgres
public | organization_memberships | table | postgres
public | organizations | table | postgres
public | pending_deletions | table | postgres
public | posthog_integrations | table | postgres
public | prices | table | postgres
public | project_memberships | table | postgres
public | projects | table | postgres
public | prompt_dependencies | table | postgres
public | prompt_protected_labels | table | postgres
public | prompts | table | postgres
public | score_configs | table | postgres
public | scores | table | postgres
public | slack_integrations | table | postgres
public | sso_configs | table | postgres
public | surveys | table | postgres
public | table_view_presets | table | postgres
public | trace_media | table | postgres
public | trace_sessions | table | postgres
public | traces | table | postgres
public | triggers | table | postgres
public | users | table | postgres
public | verification_tokens | table | postgres
(57 rows)


minio
http://192.168.1.135:9091/login
账号密码为:minio/miniosecret

 

##########################################clickhouse外放配置文件到缩主机#######################
1.创建配置文件存放的路径
mkdir -p /home/middle/langfuse/clickhouse/conf/

 

2.拷贝容器的配置文件到缩主机相应的目录
docker ps ##找到clickhouse容器id,拷贝配置文件到宿宿主机目录
docker cp ce106a2c8013:/etc/clickhouse-server/config.xml /home/middle/langfuse/clickhouse/conf/
docker cp ce106a2c8013:/etc/clickhouse-server/users.xml /home/middle/langfuse/clickhouse/conf/

 

3.修改缩主机clickhouse配置文件
vi /home/middle/langfuse/clickhouse/conf/config.xml

将这两句注释的打开:
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
<!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->

修改为:
<max_table_size_to_drop>0</max_table_size_to_drop>
<max_partition_size_to_drop>0</max_partition_size_to_drop>



4.修改yaml文件,添加如下红色的两项
vi /home/middle/langfuse/langfuse/docker-compose.yml
clickhouse:
image: registry.cn-shenzhen.aliyuncs.com/hxlk8s/clickhouse-server:24.8
restart: always
container_name: clickhouse
hostname: clickhouse
environment:
CLICKHOUSE_DB: default
CLICKHOUSE_USER: clickhouse
CLICKHOUSE_PASSWORD: clickhouse
volumes:
- /home/middle/langfuse/clickhouse/data:/var/lib/clickhouse
- /home/middle/langfuse/clickhouse/logs:/var/log/clickhouse-server
- /home/middle/langfuse/clickhouse/conf/config.xml:/etc/clickhouse-server/config.xml
- /home/middle/langfuse/clickhouse/conf/users.xml:/etc/clickhouse-server/users.xml

5.重新部署
cd /home/middle/langfuse/langfuse/
docker compose down
docker compose up -d


6.登录容器查看是否生效
docker exec -ti langfuse-clickhouse-1 /bin/bash

cat /etc/clickhouse-server/config.xml|grep max_table_size_to_drop
cat /etc/clickhouse-server/config.xml|grep max_partition_size_to_drop


##########################################修改pg参数####################################
注意这里修改了,后面数据迁移过来的时候是整个pgdata目录迁移的,迁移过来后也需要进行修改
1.修改pg参数
vi /home/middle/langfuse/pgdata/postgresql.conf

max_connections=1000 #修改最大链接数
##修改时区(已经是如下值的情况下不需要修改)
log_timezone = 'Asia/Shanghai'
timezone = 'Asia/Shanghai'

 

2.重启
cd /home/middle/langfuse/langfuse/
docker compose stop
docker compose start

 

#####################################数据迁移########################################

1.停掉v3版本
[root@kvm-181 langfuse]# cd /home/middle/langfuse/langfuse
[root@kvm-181 langfuse]# docker compose down

 

2.删除之前部署的各中间数据目录
rm -rf /home/middle/langfuse/clickhouse/data/*
rm -rf /home/middle/langfuse/clickhouse/logs/*
rm -rf /home/middle/langfuse/minio_data/*
rm -rf /home/middle/langfuse/pgdata/* ##这里会把上面步骤修改的配置文件删除掉了

 

3.停掉v2版本
登录到原langfuse服务器
停掉v2的目的是postgresql没有事务写入,可以直接拷贝数据目录到v3版本使用
[root@middle ~]# /usr/local/services/langfuse
[root@localhost langfuse]# docker compose stop
[root@localhost langfuse]# docker compose ps
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS

确定v2数据库目录路径,然后进行拷贝
scp -r -p /var/lib/docker/volumes/langfuse_database_data/_data/* root@192.168.1.135:/home/middle/langfuse/pgdata/

拷贝过去目标服务器文件目录权限如下:
[root@host135 pgdata]# ls -al
total 64
drwx------ 19 polkitd root 4096 Sep 22 14:46 .
drwxrwxr-x 6 root root 72 Sep 22 13:40 ..
drwx------ 6 root root 50 Sep 11 2024 base
drwx------ 2 root root 4096 Sep 4 15:03 global
drwx------ 2 root root 6 Aug 14 2024 pg_commit_ts
drwx------ 2 root root 6 Aug 14 2024 pg_dynshmem
-rw------- 1 root root 5811 Nov 18 2024 pg_hba.conf
-rw------- 1 root root 2640 Aug 14 2024 pg_ident.conf
drwx------ 4 root root 68 Sep 22 14:32 pg_logical
drwx------ 4 root root 36 Aug 14 2024 pg_multixact
drwx------ 2 root root 6 Aug 14 2024 pg_notify
drwx------ 2 root root 6 Sep 22 02:07 pg_replslot
drwx------ 2 root root 6 Aug 14 2024 pg_serial
drwx------ 2 root root 6 Aug 14 2024 pg_snapshots
drwx------ 2 root root 25 Sep 22 14:32 pg_stat
drwx------ 2 root root 6 Aug 14 2024 pg_stat_tmp
drwx------ 2 root root 18 Sep 18 19:18 pg_subtrans
drwx------ 2 root root 6 Aug 14 2024 pg_tblspc
drwx------ 2 root root 6 Aug 14 2024 pg_twophase
-rw------- 1 root root 3 Aug 14 2024 PG_VERSION
drwx------ 3 root root 188 Sep 22 09:20 pg_wal
drwx------ 2 root root 90 Sep 16 11:15 pg_xact
-rw------- 1 root root 88 Aug 14 2024 postgresql.auto.conf
-rw------- 1 root root 29774 Sep 3 2024 postgresql.conf
-rw------- 1 root root 36 Aug 19 13:39 postmaster.opts

 

5.V3版本重新部署
登录v3版本服务器
[root@kvm-181 langfuse]# cd /home/middle/langfuse/langfuse
[root@kvm-181 langfuse]# docker compose up -d


6.查看升级情况
[root@kvm-181 langfuse]# pwd
/home/middle/langfuse/langfuse
[root@kvm-181 langfuse]# docker compose logs langfuse-web
langfuse-web-1 | Applying migration `20250519093328_media_relax_id_uniqueness_to_project_only`
langfuse-web-1 | Applying migration `20250519145128_resize_dashboard_y_axis_components`
langfuse-web-1 | Applying migration `20250520123737_add_single_aggregate_chart_type`
langfuse-web-1 | Applying migration `20250522140357_remove_obsolete_observation_media_index`
langfuse-web-1 | Applying migration `20250523100511_add_default_eval_model_table`
langfuse-web-1 | Applying migration `20250523110540_modify_nullable_cols_eval_templates`
langfuse-web-1 | Applying migration `20250523120545_add_nullable_job_template_id`
langfuse-web-1 | Applying migration `20250529071241_make_blobstorage_integration_credentials_optional`
langfuse-web-1 | Applying migration `20250604085536_add_histogram_chart_type`
langfuse-web-1 | Applying migration `20250625_add_pivot_table_charttype`
langfuse-web-1 | Applying migration `20250704170658_add_automations`
langfuse-web-1 | Applying migration `20250709113103_add_blob_export_schedule_type`
langfuse-web-1 | Applying migration `20250711105322_prices_add_project_id`
langfuse-web-1 | Applying migration `20250711134738_add_patch_llm_tool_schema_audit_logs_background_migration`
langfuse-web-1 | Applying migration `20250714151410_add_trace_session_combined_index`


langfuse-web-1 | 18/u add_scores_run_index (2.211701789s)
langfuse-web-1 | 19/u analytics_traces (2.358740396s)
langfuse-web-1 | 20/u analytics_observations (1.954651527s)
langfuse-web-1 | 21/u analytics_scores (1.976599872s)
langfuse-web-1 | 22/u dataset_run_items (1.964436398s)
langfuse-web-1 | 23/u traces_aggregating_merge_trees (2.77449022s)
langfuse-web-1 | 24/u dataset_run_items (2.859917355s)
langfuse-web-1 | 25/u add_observations_metadata_indexes (3.001327807s)
langfuse-web-1 | 26/u add_trace_id_index (2.894366407s)
langfuse-web-1 | ▲ Next.js 15.5.2
langfuse-web-1 | - Local: http://819159e07a44:3000
langfuse-web-1 | - Network: http://819159e07a44:3000
langfuse-web-1 |
langfuse-web-1 | ? Starting...
langfuse-web-1 | Running init scripts...
langfuse-web-1 | ? Ready in 27.8s

docker compose logs langfuse-worker

这个时候可以查看后台进程
/opt/pg16/bin/psql -h 127.0.0.1 -U postgres -p5432
select pid,usename,client_addr,client_port,backend_start,query_start,state_change,state,query from pg_stat_activity;

select pid,usename,client_addr,client_port,backend_start,query_start,state_change,state,query from pg_stat_activity where state='active';


7.数据验证

密码是postgres
[root@kvm-181 bin]# /opt/pg16/bin/psql -h 127.0.0.1 -U postgres -p5432


postgres=# select count(1) from users;
count
-------
22
(1 row)

postgres=# select count(1) from traces;
count
-------
99665
(1 row)


##########################################再次修改pg参数####################################
看情况是否需要修改
注意这里修改了,后面数据迁移过来的时候是整个pgdata目录迁移的,迁移过来后也需要进行修改
1.修改pg参数
vi /home/middle/langfuse/pgdata/postgresql.conf

max_connections=1000 #修改最大链接数
##修改时区(已经是如下值的情况下不需要修改)
log_timezone = 'Asia/Shanghai'
timezone = 'Asia/Shanghai'

2.重启
cd /home/middle/langfuse/langfuse/
docker compose stop
docker compose start

 

posted @ 2025-09-22 15:18  slnngk  阅读(37)  评论(0)    收藏  举报