3 K8s安裝ELK+filebeat

1 Filebeat:

apiVersion: v1
kind: Service
metadata:
  name: XX
spec:
  ports:
  - name: http
    port: 80
    targetPort: http
  selector:
    app: XX---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: XX
  labels:
    app: XX
spec:
  replicas: 2
  minReadySeconds: 2
  revisionHistoryLimit: 10
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 25%
      maxSurge: 25%
  selector:
    matchLabels:
      app: XX
  template:
    metadata:
      labels:
        app: XX
    spec:
      terminationGracePeriodSeconds: 30
      imagePullSecrets:
        - name: registry-key
      containers:
      - name: filebeat
        image: docker.elastic.co/beats/filebeat:6.2.4
        args: [
          "-c", "/etc/filebeat/filebeat.yml",
        ]
        volumeMounts:
          - name: app-logs
            mountPath: /aaa/log
          - name: filebeat-config
            mountPath: /etc/filebeat/
      - name: forecast-user-profile
        image: your_application_imageURL
        volumeMounts:
          - name: app-logs
            mountPath: /var/log
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - name: http
          containerPort: 9000
        env:
        - name: DB_HOST
          valueFrom:
            configMapKeyRef:
              name: forecast-conf
              key: db_host
        - name: DB_PORT
          valueFrom:
            configMapKeyRef:
              name: forecast-conf
              key: db_port
        - name: DB_NAME
          valueFrom:
            configMapKeyRef:
              name: forecast-conf
              key: db_name
        - name: DB_USER
          valueFrom:
            secretKeyRef:
              name: db-auth
              key: username
        - name: DB_PWD
          valueFrom:
            secretKeyRef:
              name: db-auth
              key: password
      volumes:
        - name: app-logs
          emptyDir: {}
        - name: filebeat-config
          configMap:
           name: filebeat-config
        
        # lifecycle:
          # preStop:
            # exec:
              # command: ["consul", "leave']
          # PostStart:
            # exec:
              # command: ["consule", "entry"]
        # livenessProbe:
        # readinessProbe:
        # resources:
        # workingDir:
apiVersion: v1
kind: ConfigMap
metadata:
  name: filebeat-config
data:
  filebeat.yml: |
    filebeat.prospectors:
    - input_type: log
      paths:
        - "/aaa/log/*.log"
    output.elasticsearch:
      hosts: ["logstash-elasticsearch-service:9200"]

2 Elasticsearch:

kind: Deployment
apiVersion: apps/v1beta2
metadata:
  labels:
    elastic-app: elasticsearch
    role: data
  name: logstash-elasticsearch-deployment
spec:
  replicas: 2
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      elastic-app: elasticsearch
  template:
    metadata:
      labels:
        elastic-app: elasticsearch
        role: data
    spec:
      containers:
        - name: elasticsearch
          image: docker.elastic.co/elasticsearch/elasticsearch:6.2.4
          ports:
            - containerPort: 9200
              protocol: TCP
          volumeMounts:
            - name: esdata
              mountPath: /usr/share/elasticsearch/data
          env:
            - name: "ES_JAVA_OPTS"
              value: "-Xms256m -Xmx256m"
      volumes:
        - name: esdata
          emptyDir: {}
      initContainers:
      - image: alpine:3.6
        command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
        name: elasticsearch-logging-init
        securityContext:
          privileged: true 
---
kind: Service
apiVersion: v1
metadata:
  labels:
    elastic-app: elasticsearch-service
  name: logstash-elasticsearch-service
spec:
  ports:
    - port: 9200
      targetPort: 9200
  selector:
    elastic-app: elasticsearch
  type: NodePort

3 kibana:

---
kind: Deployment
apiVersion: apps/v1beta2
metadata:
  labels:
    elastic-app: kibana
  name: kibana
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      elastic-app: kibana
  template:
    metadata:
      labels:
        elastic-app: kibana
    spec:
      containers:
        - name: kibana
          image: docker.elastic.co/kibana/kibana:6.2.4
          ports:
            - containerPort: 5601
              protocol: TCP
          volumeMounts:
          - name: config-volume
            mountPath: /opt/kibana/config
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
      -  name: config-volume
         configMap:
           name: logging-configmap
           items:
             - key: kibana.yml
               path: kibana.yml

---
kind: Service
apiVersion: v1
metadata:
  labels:
    elastic-app: kibana
  name: kibana-service
spec:
  ports:
    - port: 5601
      targetPort: 5601
  selector:
    elastic-app: kibana
  type: NodePort
apiVersion: v1
kind: ConfigMap
metadata:
  name: logging-configmap
  namespace: default
data:
  logstash.yml: |
    http.host: "0.0.0.0"
    path.config: /usr/share/logstash/pipeline

    ## Disable X-Pack
    ## see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html
    xpack.monitoring.enabled: false
  logstash.conf: |
    # all input will come from filebeat, no local logs
    input {
      beats {
        port => 5044
      }
    }

    ## some more advanced filtering and tagging of incoming kubernetes logs is done in logstash
    filter {
      if [type] == "kube-logs" {
        mutate {
          rename => ["log", "message"]
          add_tag => [ "pelias", "kubernetes" ]
        }

        date {
          match => ["time", "ISO8601"]
          remove_field => ["time"]
        }

        # all standard container logs files match a common pattern
        grok {
          match => { "source" => "%{DATA:pod_name}" }
          remove_field => ["source"]
        }

        # system services have a simpler log filename format that does not include namespace, pod names, or container ids
        grok {
            match => { "source" => "%{DATA:container_name}" }
            add_field => { "namespace" => "default" }
            remove_field => ["source"]
        }
      }
    }

    output {
        elasticsearch {
            hosts => [ "logstash-elasticsearch-service:9200" ]
        }
    }
    
  kibana.yml: |
    ## Default Kibana configuration from kibana-docker.
    ## from https://github.com/elastic/kibana-docker/blob/master/build/kibana/config/kibana.yml
    #
    #server.host: "0"
    elasticsearch.url: http://logstash-elasticsearch-service:9200
    ## Disable X-Pack
    ## see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html
    ##     https://www.elastic.co/guide/en/x-pack/current/installing-xpack.html#xpack-enabling
    #
    xpack.security.enabled: false
    xpack.monitoring.enabled: false
    xpack.ml.enabled: false
    xpack.graph.enabled: false
    xpack.reporting.enabled: false

4:  常用命令:

       kubectl  delete -f    xxxxxxxxx.yaml

       kubectl creatge -f  xxxxxxxxxx.yaml

扫描二维码关注公众号,回复: 1567405 查看本文章

       kubectl get pods

       kubectl get service

       kubectl logs -f   podname containername -n namespacename

  kubectl exec -it  podname   sh

       

猜你喜欢

转载自www.cnblogs.com/liufei1983/p/9171365.html