Kubernetes 日志收集案例二 【实现daemonset和sidcar日志收集】

日志案例二、sidecar模式构架

一、制作logstash-sidecar镜像

#logstash-image-Dockerfile 每个node中添加logstash

[root@xianchaomaster1 1.logstash-image-Dockerfile]# ll
total 972704
-rwxr-xr-x 1 root root       298 Apr  6 19:58 build-commond.sh
-rw-r--r-- 1 root root       221 Apr  6 19:54 Dockerfile
-rw-r--r-- 1 root root       740 Apr  6 19:54 logstash.conf
-rw------- 1 root root 996032512 Apr  6 19:59 logstash_sidecar.tar.gz
-rw-r--r-- 1 root root        92 May 23  2022 logstash.yml


[root@xianchaomaster1 1.logstash-image-Dockerfile]# cat build-commond.sh
#!/bin/bash

docker build -t harbor.magedu.net/baseimages/logstash:v7.12.1-sidecar .

#docker push harbor.magedu.local/baseimages/logstash:v7.12.1-sidecar
#nerdctl  build -t harbor.magedu.net/baseimages/logstash:v7.12.1-sidecar .
#nerdctl push harbor.magedu.net/baseimages/logstash:v7.12.1-sidecar
#nerdctl build -t harbor.sheca.com/baseimages/logstash:v7.12.1-sidecar .
#nerdctl push --insecure-registry harbor.sheca.com/baseimages/logstash:v7.12.1-sidecar

#提前拉镜像
docker pull logstash:7.12.1

#编辑Dockerfile
vim Dockerfile
FROM logstash:7.12.1

USER root
WORKDIR /usr/share/logstash
ADD logstash.yml /usr/share/logstash/config/logstash.yml
ADD logstash.conf /usr/share/logstash/pipeline/logstash.conf


[root@xianchaomaster1 1.logstash-image-Dockerfile]# cat logstash.conf
input {
  file {
    path => "/var/log/applog/catalina.out"
    start_position => "beginning"
    type => "app1-sidecar-catalina-log"
  }
  file {
    path => "/var/log/applog/localhost_access_log.*.txt"
    start_position => "beginning"
    type => "app1-sidecar-access-log"
  }
}

output {
  if [type] == "app1-sidecar-catalina-log" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384  #logstash每次向ES传输的数据量大小,单位为字节
      codec => "${CODEC}"
   } }

  if [type] == "app1-sidecar-access-log" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384
      codec => "${CODEC}"
  }}
}

[root@xianchaomaster1 1.logstash-image-Dockerfile]# cat logstash.yml
http.host: "0.0.0.0"
#xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]

#创建镜像同步到harbor
bash build-command.sh

#节点提早下载镜像
crictl pull harbor.sheca.com/baseimages/logstash:v7.12.1-sidecar

 二、创建Deployment-Tomcat应用

[root@xianchaomaster1 2.sidecar-logstash]# cat 2.tomcat-app1.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-tomcat-app1-deployment-label
  name: magedu-tomcat-app1-deployment #当前版本的deployment 名称
spec:
  replicas: 3
  selector:
    matchLabels:
      app: magedu-tomcat-app1-selector
  template:
    metadata:
      labels:
        app: magedu-tomcat-app1-selector
    spec:
      containers:
      - name: sidecar-container
        image: harbor.magedu.net/baseimages/logstash:v7.12.1-sidecar
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        env:
        - name: "KAFKA_SERVER"
          value: "192.168.40.180:9092,192.168.40.181:9092,192.168.40.182:9092"
        - name: "TOPIC_ID"
          value: "tomcat-app1-topic"
        - name: "CODEC"
          value: "json"
        volumeMounts:
        - name: applogs
          mountPath: /var/log/applog
      - name: magedu-tomcat-app1-container
        image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/tomcat-app1:v1
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        resources:
          limits:
            cpu: 1
            memory: "512Mi"
          requests:
            cpu: 500m
            memory: "512Mi"
        volumeMounts:
        - name: applogs
          mountPath: /apps/tomcat/logs
        startupProbe:
          httpGet:
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5 #首次检测延迟5s
          failureThreshold: 3  #从成功转为失败的次数
          periodSeconds: 3 #探测间隔周期
        readinessProbe:
          httpGet:
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
        livenessProbe:
          httpGet:
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
      volumes:
      - name: applogs #定义通过emptyDir实现业务容器与sidecar容器的日志共享,以让sidecar收集业务容器中的日志
        emptyDir: {}
        
#
kubectl apply  -f 2.tomcat-app1.yaml

三、编辑Tomcat Service文件 NodePort暴露端口

#NodePort端口:30090

[root@xianchaomaster1 2.sidecar-logstash]# cat 3.tomcat-service.yaml
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-tomcat-app1-service-label
  name: magedu-tomcat-app1-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30090
  selector:
    app: magedu-tomcat-app1-selector
    
#
kubectl apply -f 3.tomcat-service.yaml


四、配置把Kafka中的数据传入Elasticsearch中

[root@xianchaomaster1 2.sidecar-logstash]# cat 4.logsatsh-sidecar-kafka-to-es.conf
input {
  kafka {
    bootstrap_servers => "192.168.40.180:9092,192.168.40.181:9092,192.168.40.182:9092"
    topics => ["tomcat-app1-topic"]
    codec => "json"
  }
}

output {
  #if [fields][type] == "app1-access-log" {
  if [type] == "app1-sidecar-access-log" {
    elasticsearch {
      hosts => ["192.168.40.180:9200","192.168.40.181:9200"]
      index => "sidecar-app1-accesslog-%{+YYYY.MM.dd}"
    }
  }

  #if [fields][type] == "app1-catalina-log" {
  if [type] == "app1-sidecar-catalina-log" {
    elasticsearch {
      hosts => ["192.168.40.180:9200","192.168.40.181:9200"]
      index => "sidecar-app1-catalinalog-%{+YYYY.MM.dd}"
    }
  }

#  stdout {
#    codec => rubydebug
#  }
}

[root@xianchaomaster1 conf.d]# cp 4.logsatsh-sidecar-kafka-to-es.conf /etc/logstash/conf.d/logstash-sidercar-kafka-to-es.conf

[root@xianchaomaster1 conf.d]# systemctl restart logstash

[root@xianchaomaster1 conf.d]# tail -f /var/log/logstash/logstash-plain.log
[2023-04-07T07:14:30,966][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][944b3a7af9dd65be640986732712b6983a2d047a2d3ad5d891001312a0b5e4dd] [Consumer clientId=logstash-0, groupId=logstash] Join group failed with org.apache.kafka.common.errors.MemberIdRequiredException: The group member needs to have a valid member id before actually entering a consumer group
[2023-04-07T07:14:30,967][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][944b3a7af9dd65be640986732712b6983a2d047a2d3ad5d891001312a0b5e4dd] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2023-04-07T07:14:30,980][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][37c0f0fd08067984bcbec4d60b1b15abe4810964929cb6103508ae423e8f0624] [Consumer clientId=logstash-0, groupId=logstash] Finished assignment for group at generation 24: {logstash-0-0e7604bd-9b53-47c1-b1d5-a935a97fd43d=Assignment(partitions=[tomcat-app1-topic-0]), logstash-0-509b2885-ee56-49ff-8ad6-513378b7eec7=Assignment(partitions=[jsonfile-log-topic-0])}
[2023-04-07T07:14:30,984][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][944b3a7af9dd65be640986732712b6983a2d047a2d3ad5d891001312a0b5e4dd] [Consumer clientId=logstash-0, groupId=logstash] Successfully joined group with generation 24
[2023-04-07T07:14:30,985][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][37c0f0fd08067984bcbec4d60b1b15abe4810964929cb6103508ae423e8f0624] [Consumer clientId=logstash-0, groupId=logstash] Successfully joined group with generation 24
[2023-04-07T07:14:30,988][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][944b3a7af9dd65be640986732712b6983a2d047a2d3ad5d891001312a0b5e4dd] [Consumer clientId=logstash-0, groupId=logstash] Adding newly assigned partitions: tomcat-app1-topic-0
[2023-04-07T07:14:30,988][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][37c0f0fd08067984bcbec4d60b1b15abe4810964929cb6103508ae423e8f0624] [Consumer clientId=logstash-0, groupId=logstash] Adding newly assigned partitions: jsonfile-log-topic-0
[2023-04-07T07:14:30,994][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][944b3a7af9dd65be640986732712b6983a2d047a2d3ad5d891001312a0b5e4dd] [Consumer clientId=logstash-0, groupId=logstash] Found no committed offset for partition tomcat-app1-topic-0
[2023-04-07T07:14:30,996][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][37c0f0fd08067984bcbec4d60b1b15abe4810964929cb6103508ae423e8f0624] [Consumer clientId=logstash-0, groupId=logstash] Setting offset for partition jsonfile-log-topic-0 to the committed offset FetchPosition{offset=2184170, offsetEpoch=Optional[29], currentLeader=LeaderAndEpoch{leader=Optional[192.168.40.182:9092 (id: 103 rack: null)], epoch=35}}
[2023-04-07T07:14:31,012][INFO ][org.apache.kafka.clients.consumer.internals.SubscriptionState][main][944b3a7af9dd65be640986732712b6983a2d047a2d3ad5d891001312a0b5e4dd] [Consumer clientId=logstash-0, groupId=logstash] Resetting offset for partition tomcat-app1-topic-0 to offset 817767.

五、添加 Kibana索引-查看两个索引日志

#由于Pod做了liveness和readness的探测 所有访问日志 accesslog 有数据

5.1 此时应用日志没有,自己在容器中catalina.out 中添加数据 、再kibana中添加索引查看

[root@xianchaomaster1 conf.d]# kubectl get pods | grep Running
details-v1-65bbfd4f58-47z7q                      2/2     Running                 0          16h
magedu-tomcat-app1-deployment-8684dfd865-jpfql   3/3     Running                 0          6h49m
magedu-tomcat-app1-deployment-8684dfd865-kdqzp   3/3     Running                 0          6h49m
mysql-0                                          3/3     Running                 0          6h43m
nginx-hpa-fb74696c-dkrff                         2/2     Running                 0          6h58m
productpage-v1-6b746f74dc-dnqr9                  2/2     Running                 0          6h43m
ratings-v1-b45758b-dm7vg                         2/2     Running                 0          16h
redis-0                                          2/2     Running                 0          6h43m
redis-1                                          2/2     Running                 0          6h43m
redis-2                                          2/2     Running                 0          6h43m
redis-3                                          2/2     Running                 0          6h43m
redis-4                                          2/2     Running                 0          6h39m
redis-5                                          2/2     Running                 0          6h39m
reviews-v1-74894b48c8-mvzhd                      2/2     Running                 0          6h43m
reviews-v2-f649764d-zb9b9                        2/2     Running                 0          6h47m
reviews-v3-6c675c6774-c2qc2                      2/2     Running                 0          6h43m
sleep-557747455f-mjc59                           2/2     Running                 0          16h
zookeeper2-56b79857dc-2ph46                      2/2     Running                 0          6h42m
zookeeper3-5d58d7c798-dhw5b                      2/2     Running                 0          6h43m


[root@xianchaomaster1 conf.d]# kubectl exec -it magedu-tomcat-app1-deployment-8684dfd865-jpfql -c magedu-tomcat-app1-container bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql /]# cd /apps/
apache-tomcat-8.5.43/ tomcat/
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql /]# cd /apps/tomcat/l
lib/  logs/
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql /]# cd /apps/tomcat/l
lib/  logs/
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql /]# cd /apps/tomcat/logs/
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql logs]# ll
total 1808
-rw-rw-r-- 1 nginx nginx    7356 Apr  9 09:24 catalina.2023-04-09.log
-rw-rw-r-- 1 nginx nginx    7900 Apr  9 15:53 catalina.out
-rw-rw-r-- 1 nginx nginx       0 Apr  9 09:24 host-manager.2023-04-09.log
-rw-rw-r-- 1 nginx nginx       0 Apr  9 09:24 localhost.2023-04-09.log
-rw-rw-r-- 1 nginx nginx 1330822 Apr  9 16:05 localhost_access_log.2023-04-09.txt
-rw-rw-r-- 1 nginx nginx       0 Apr  9 09:24 manager.2023-04-09.log
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql logs]# echo "Now is 2 times see this sidercar Kibana" >> catalina.out

5.2 测试 local_access log 因为是两个容器 找到sidercar-container容器 查看/var/log/applog

[root@xianchaomaster1 conf.d]# kubectl describe pod magedu-tomcat-app1-deployment-8684dfd865-jpfql
Name:         magedu-tomcat-app1-deployment-8684dfd865-jpfql
Namespace:    default
Priority:     0
Node:         xianchaonode2/192.168.40.182
Start Time:   Sun, 09 Apr 2023 09:24:19 +0800
Labels:       app=magedu-tomcat-app1-selector
              istio.io/rev=default
              pod-template-hash=8684dfd865
              security.istio.io/tlsMode=istio
              service.istio.io/canonical-name=magedu-tomcat-app1-selector
              service.istio.io/canonical-revision=latest
Annotations:  cni.projectcalico.org/podIP: 10.244.102.98/32
              cni.projectcalico.org/podIPs: 10.244.102.98/32
              prometheus.io/path: /stats/prometheus
              prometheus.io/port: 15020
              prometheus.io/scrape: true
              sidecar.istio.io/status:
                {"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-...
Status:       Running
IP:           10.244.102.98
IPs:
  IP:           10.244.102.98
Controlled By:  ReplicaSet/magedu-tomcat-app1-deployment-8684dfd865
Init Containers:
  istio-init:
    Container ID:  docker://093af1a84851d11d865121f5ea7d418199780b89b8cd7c9102f670776446050b
    Image:         docker.io/istio/proxyv2:1.10.1
    Image ID:      docker://sha256:5c66e8ac89a704ffe3fb20f5027d2e4381e944047d87b3017169308526c1cab7
    Port:          <none>
    Host Port:     <none>
    Args:
      istio-iptables
      -p
      15001
      -z
      15006
      -u
      1337
      -m
      REDIRECT
      -i
      *
      -x

      -b
      *
      -d
      15090,15021,15020
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sun, 09 Apr 2023 09:24:21 +0800
      Finished:     Sun, 09 Apr 2023 09:24:21 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:        10m
      memory:     40Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-hvv89 (ro)
Containers:
  sidecar-container:
    Container ID:   docker://070068f0a62a7df798220f676c21aadddf22973db8e98731fd2a65c1151f0948
    Image:          harbor.magedu.net/baseimages/logstash:v7.12.1-sidecar
    Image ID:       docker://sha256:bc09febf9a7b4b63ee0ecd717431234dfd460ab28a3ddcf939d11b655082cc84
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Sun, 09 Apr 2023 09:24:22 +0800
    Ready:          True
    Restart Count:  0
    Environment:
      KAFKA_SERVER:  192.168.40.180:9092,192.168.40.181:9092,192.168.40.182:9092
      TOPIC_ID:      tomcat-app1-topic
      CODEC:         json
    Mounts:
      /var/log/applog from applogs (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-hvv89 (ro)
  magedu-tomcat-app1-container:
    Container ID:   docker://9d56e9538555e31553985a7294d9f8fba431d505ce03d2c5ae096b31a95ee763
    Image:          registry.cn-hangzhou.aliyuncs.com/zhangshijie/tomcat-app1:v1
    Image ID:       docker-pullable://registry.cn-hangzhou.aliyuncs.com/zhangshijie/tomcat-app1@sha256:60c976c8b8f7a216b3408c9b6a8cc6d0eecebd70e3ed2a47e6b86ccdc980ffa0
    Port:           8080/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Sun, 09 Apr 2023 09:24:23 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     1
      memory:  512Mi
    Requests:
      cpu:      500m
      memory:   512Mi
    Liveness:   http-get http://:15020/app-health/magedu-tomcat-app1-container/livez delay=5s timeout=5s period=3s #success=1 #failure=3
    Readiness:  http-get http://:15020/app-health/magedu-tomcat-app1-container/readyz delay=5s timeout=5s period=3s #success=1 #failure=3
    Startup:    http-get http://:15020/app-health/magedu-tomcat-app1-container/startupz delay=5s timeout=1s period=3s #success=1 #failure=3
    Environment:
      password:  123456
      age:       18
    Mounts:
      /apps/tomcat/logs from applogs (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-hvv89 (ro)
  istio-proxy:
    Container ID:  docker://efc1172afd7d8c784bd1e51cccda7a46098c190415400d1b88ddbeb6a67e78e3
    Image:         docker.io/istio/proxyv2:1.10.1
    Image ID:      docker://sha256:5c66e8ac89a704ffe3fb20f5027d2e4381e944047d87b3017169308526c1cab7
    Port:          15090/TCP
    Host Port:     0/TCP
    Args:
      proxy
      sidecar
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      magedu-tomcat-app1-selector.$(POD_NAMESPACE)
      --proxyLogLevel=warning
      --proxyComponentLogLevel=misc:error
      --log_output_level=default:info
      --concurrency
      2
    State:          Running
      Started:      Sun, 09 Apr 2023 09:24:23 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:      10m
      memory:   40Mi
    Readiness:  http-get http://:15021/healthz/ready delay=1s timeout=3s period=2s #success=1 #failure=30
    Environment:
      JWT_POLICY:                    third-party-jwt
      PILOT_CERT_PROVIDER:           istiod
      CA_ADDR:                       istiod.istio-system.svc:15012
      POD_NAME:                      magedu-tomcat-app1-deployment-8684dfd865-jpfql (v1:metadata.name)
      POD_NAMESPACE:                 default (v1:metadata.namespace)
      INSTANCE_IP:                    (v1:status.podIP)
      SERVICE_ACCOUNT:                (v1:spec.serviceAccountName)
      HOST_IP:                        (v1:status.hostIP)
      CANONICAL_SERVICE:              (v1:metadata.labels['service.istio.io/canonical-name'])
      CANONICAL_REVISION:             (v1:metadata.labels['service.istio.io/canonical-revision'])
      PROXY_CONFIG:                  {}

      ISTIO_META_POD_PORTS:          [
                                         {"name":"http","containerPort":8080,"protocol":"TCP"}
                                     ]
      ISTIO_META_APP_CONTAINERS:     sidecar-container,magedu-tomcat-app1-container
      ISTIO_META_CLUSTER_ID:         Kubernetes
      ISTIO_META_INTERCEPTION_MODE:  REDIRECT
      ISTIO_META_WORKLOAD_NAME:      magedu-tomcat-app1-deployment
      ISTIO_META_OWNER:              kubernetes://apis/apps/v1/namespaces/default/deployments/magedu-tomcat-app1-deployment
      ISTIO_META_MESH_ID:            cluster.local
      TRUST_DOMAIN:                  cluster.local
      ISTIO_KUBE_APP_PROBERS:        {"/app-health/magedu-tomcat-app1-container/livez":{"httpGet":{"path":"/myapp/index.html","port":8080,"scheme":"HTTP"},"timeoutSeconds":5},"/app-health/magedu-tomcat-app1-container/readyz":{"httpGet":{"path":"/myapp/index.html","port":8080,"scheme":"HTTP"},"timeoutSeconds":5},"/app-health/magedu-tomcat-app1-container/startupz":{"httpGet":{"path":"/myapp/index.html","port":8080,"scheme":"HTTP"},"timeoutSeconds":1}}
    Mounts:
      /etc/istio/pod from istio-podinfo (rw)
      /etc/istio/proxy from istio-envoy (rw)
      /var/lib/istio/data from istio-data (rw)
      /var/run/secrets/istio from istiod-ca-cert (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-hvv89 (ro)
      /var/run/secrets/tokens from istio-token (rw)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  istio-envoy:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  <unset>
  istio-data:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  istio-podinfo:
    Type:  DownwardAPI (a volume populated by information about the pod)
    Items:
      metadata.labels -> labels
      metadata.annotations -> annotations
      limits.cpu -> cpu-limit
      requests.cpu -> cpu-request
  istio-token:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  43200
  istiod-ca-cert:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-ca-root-cert
    Optional:  false
  applogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  default-token-hvv89:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-hvv89
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                 node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:          <none>
[root@xianchaomaster1 conf.d]# kubectl exec -it magedu-tomcat-app1-deployment-8684dfd865-jpfql -c sidecar-container bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql logstash]# cd /var/log/applog/
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql applog]# ll
total 1340
-rw-rw-r-- 1 2022 2022    7356 Apr  9 01:24 catalina.2023-04-09.log
-rw-rw-r-- 1 2022 2022    7940 Apr  9 08:06 catalina.out
-rw-r--r-- 1 root root       0 Apr  9 08:07 catalina.out[root@xianchaomaster1
-rw-rw-r-- 1 2022 2022       0 Apr  9 01:24 host-manager.2023-04-09.log
-rw-rw-r-- 1 2022 2022       0 Apr  9 01:24 localhost.2023-04-09.log
-rw-rw-r-- 1 2022 2022 1355141 Apr  9 08:12 localhost_access_log.2023-04-09.txt
-rw-rw-r-- 1 2022 2022       0 Apr  9 01:24 manager.2023-04-09.log
[root@magedu-tomcat-app1-deployment-8684dfd865-jpfql applog]# tail -f localhost_access_log.2023-04-09.txt
127.0.0.6 - - [09/Apr/2023:16:12:54 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:12:54 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:12:57 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:12:57 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:00 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:00 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:03 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:03 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:06 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:06 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:09 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:09 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:12 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:12 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:15 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:15 +0800] "GET /myapp/index.html HTTP/1.1" 200 15
127.0.0.6 - - [09/Apr/2023:16:13:18 +0800] "GET /myapp/index.html HTTP/1.1" 200 15

 

posted @ 2023-04-07 07:19  しみずよしだ  阅读(287)  评论(0)    收藏  举报