k8s离线搭建eck+logstash+filebeat日志系统

前言

最近项目搭建完成k8s集群之后,需要搭建elk日志收集系统,也是从零开始,说实话整个elk全部部署在集群上压力还是蛮大的,现在只是最开始试着部署一下,之后的方案肯定还要调整,目前的机器数量太少,无法满足要求,仅记录搭建过程备用。

机器准备

服务器 ip 系统 角色
master1 172.16.140.100 centos7 k8s-master节点1,ceph-node,ceph-osd,ceph-mds
master2 172.16.140.101 centos7 k8s-master节点2,ceph-node,ceph-osd,ceph-mds
node1 172.16.140.102 centos7 k8s-node节点1,ceph-node,ceph-osd,ceph-mds
node2 172.16.140.103 centos7 k8s-node节点2,ceph-node,ceph-osd,ceph-mds,ceph-mgr,yum本地源服务器
test——外网服务器,下软件包用 192.168.2.190 centos7

准备部署所需镜像及文件

root@test:

下载所需镜像和文件

# 准备目录
mkdir resource && cd resource
# 下载相关镜像
docker pull docker.elastic.co/beats/filebeat:7.5.1
docker pull docker.elastic.co/logstash/logstash:7.5.1
docker pull docker.elastic.co/kibana/kibana:7.5.1
docker pull docker.elastic.co/elasticsearch/elasticsearch:7.5.1
docker pull docker.elastic.co/eck/eck-operator:1.0.0-beta1
# 对镜像打标
docker tag docker.elastic.co/beats/filebeat:7.5.1 172.16.140.103/kubernetes-deploy/filebeat:7.5.1
docker tag docker.elastic.co/logstash/logstash:7.5.1 172.16.140.103/kubernetes-deploy/logstash:7.5.1
docker tag docker.elastic.co/kibana/kibana:7.5.1 172.16.140.103/kubernetes-deploy/kibana:7.5.1
docker tag docker.elastic.co/elasticsearch/elasticsearch:7.5.1 172.16.140.103/kubernetes-deploy/elasticsearch:7.5.1
docker tag docker.elastic.co/eck/eck-operator:1.0.0-beta1 172.16.140.103/kubernetes-deploy/eck-operator:1.0.0-beta1
# 保存镜像
docker save 172.16.140.103/kubernetes-deploy/filebeat:7.5.1 -o filebeat.tar.gz
docker save 172.16.140.103/kubernetes-deploy/logstash:7.5.1 -o logstash.tar.gz
docker save 172.16.140.103/kubernetes-deploy/kibana:7.5.1 -o kibana.tar.gz
docker save 172.16.140.103/kubernetes-deploy/elasticsearch:7.5.1 -o elasticsearch.tar.gz
docker save 172.16.140.103/kubernetes-deploy/eck-operator:1.0.0-beta1 -o eck-operator.tar.gz
# 下载eck部署文件
wget -o https://download.elastic.co/downloads/eck/1.0.0-beta1/all-in-one.yaml

修改文件为本地仓库路径

sed -i "s/docker.elastic.co\/eck/172.16.140.103\/kubernetes-deploy/g" all-in-one.yaml

打包

cd ../
tar -zcf resource.tar.gz resource

拷贝文件至master1 root文件夹下

root@master1

  • 解压文件中的镜像并提交到私有云
tar -xvf resource.tar.gz
cd resource
docker load -i filebeat.tar.gz
docker load -i logstash.tar.gz
docker load -i kibana.tar.gz
docker load -i elasticsearch.tar.gz
docker load -i eck-operator.tar.gz
# 上传到私有云仓库上
docker login 172.16.140.103
docker push 172.16.140.103/kubernetes-deploy/filebeat:7.5.1
docker push 172.16.140.103/kubernetes-deploy/logstash:7.5.1
docker push 172.16.140.103/kubernetes-deploy/kibana:7.5.1
docker push 172.16.140.103/kubernetes-deploy/elasticsearch:7.5.1
docker push 172.16.140.103/kubernetes-deploy/eck-operator:1.0.0-beta1
# 上传完成后删除不用镜像
docker rmi $(docker images -aq)
cd ~
mkdir yaml/eck
cp resource/all-in-one.yaml yaml/eck
# 删除不用文件
rm -rf resource resource.tar.gz

部署eck

root@all:

  • 设置jvm虚拟机参数
# 设置vm.max_map_count为262144
sysctl -w vm.max_map_count=262144
# 永久生效
cat << EOF >> /etc/sysctl.conf
vm.max_map_count=262144
EOF

root@master1:

  • 部署eck-operator
kubectl apply -f yaml/eck/all-in-one.yaml
  • 部署elasticsearch
cat <<EOF | kubectl apply -f -
apiVersion: elasticsearch.k8s.elastic.co/v1beta1
kind: Elasticsearch
metadata:
  name: eck-cluster
  namespace: elastic-system
spec:
  version: 7.5.1
  http:
    tls:
      selfSignedCertificate:
        disabled: true
  image: 172.16.140.103/kubernetes-deploy/elasticsearch:7.5.1
  nodeSets:
  - name: default
    count: 1
    volumeClaimTemplates:
    - metadata:
        name: elasticsearch-data
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 10Gi
        storageClassName: rbd
EOF
  • 部署kibana
cat <<EOF | kubectl apply -f -
apiVersion: kibana.k8s.elastic.co/v1beta1
kind: Kibana
metadata:
  name: eck-cluster
  namespace: elastic-system
spec:
  version: 7.5.1
  count: 1
  image: 172.16.140.103/kubernetes-deploy/kibana:7.5.1
  elasticsearchRef:
    name: eck-cluster
EOF

部署logstash

  • 生成配置文件
# 新建配置目录
mkdir -p yaml/eck/logstash
# 获取es密码
ES_PASSWORD=$(kubectl get secret -n elastic-system eck-cluster-es-elastic-user -o yaml | awk '/elastic:/ {print $2}' | base64 --decode)
# 新建logstash.yml配置文件
cat << EOF > yaml/eck/logstash/logstash.yml
http.host: "0.0.0.0"
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.hosts: [ "http://eck-cluster-es-http.elastic-system:9200" ]
xpack.monitoring.elasticsearch.username: "elastic" 
xpack.monitoring.elasticsearch.password: "$ES_PASSWORD"
EOF
# 新建piplines.conf配置文件
cat << EOF > yaml/eck/logstash/pipelines.yml
# This file is where you define your pipelines. You can define multiple.
# For more information on multiple pipelines, see the documentation:
#   https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html

- pipeline.id: main
  path.config: "/usr/share/logstash/pipeline"
  queue.type: persisted
EOF
# 新建logstash.conf配置文件
cat << EOF > yaml/eck/logstash/logstash.conf
input {
  beats {
    port => 5044
  }
}

output {
  elasticsearch {
    hosts => ["http://eck-cluster-es-http.elastic-system:9200"]
    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
    user => "elastic"
    password => "$ES_PASSWORD"
  }
}
EOF
# 新建configMap
kubectl create configmap logstah-custom-config -n elastic-system --from-file=yaml/eck/logstash
  • 部署logstash
cat <<EOF | kubectl apply -f -
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: logstash-data
  namespace: elastic-system
spec:
  storageClassName: rbd
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Gi
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: logstash
  namespace: elastic-system
  labels: 
    app: logstash
spec:
  replicas: 1
  selector:
    matchLabels:
      app: logstash
  template:
    metadata:
      labels:
        app: logstash
    spec:
      containers:
      - name: logstash
        image: 172.16.140.103/kubernetes-deploy/logstash:7.5.1
        volumeMounts:
        - name: config
          mountPath: "/usr/share/logstash/config/logstash.yml"
          subPath: logstash.yml
          readOnly: true
        - name: config
          mountPath: "/usr/share/logstash/pipeline/logstash.conf"
          subPath: "logstash.conf"
          readOnly: true
        - name: config
          mountPath: "/usr/share/logstash/config/pipelines.yml"
          subPath: "pipelines.yml"
          readOnly: true
        - name: data
          mountPath: "/usr/share/logstash/data/"
          subPath: "data"
      volumes:
      - name: data
        persistentVolumeClaim:
          claimName: logstash-data
      - name: config
        configMap:
          defaultMode: 0600
          name: logstah-custom-config
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: logstash
  name: logstash-http
  namespace: elastic-system
spec:
  ports:
  - name: http
    port: 5044
    protocol: TCP
    targetPort: 5044
  selector:
    app: logstash
EOF

部署filebeat

  • 参照官网生成部署文件
# 获取logstash的serviceIP
LOGSTASH_IP=$(kubectl get svc -n elastic-system logstash-http | awk '/logstash-http/ {print $3}')
# 生成配置文件
cat <<\EOF > yaml/eck/file-beat.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: filebeat-config
  namespace: elastic-system
  labels:
    k8s-app: filebeat
data:
  filebeat.yml: |-
    filebeat.inputs:
    - type: container
      paths:
        - /var/log/containers/*.log
      processors:
        - add_kubernetes_metadata:
            host: ${NODE_NAME}
            matchers:
            - logs_path:
                logs_path: "/var/log/containers/"

    # To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
    #filebeat.autodiscover:
    #  providers:
    #    - type: kubernetes
    #      host: ${NODE_NAME}
    #      hints.enabled: true
    #      hints.default_config:
    #        type: container
    #        paths:
    #          - /var/log/containers/*${data.kubernetes.container.id}.log

    processors:
      - add_cloud_metadata:
      - add_host_metadata:

    cloud.id: ${ELASTIC_CLOUD_ID}
    cloud.auth: ${ELASTIC_CLOUD_AUTH}

    # output.elasticsearch:
    #   hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
    #   username: ${ELASTICSEARCH_USERNAME}
    #   password: ${ELASTICSEARCH_PASSWORD}
    output.logstash: 
      hosts: LOGSTASH_IP
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: filebeat
  namespace: elastic-system
  labels:
    k8s-app: filebeat
spec:
  selector:
    matchLabels:
      k8s-app: filebeat
  template:
    metadata:
      labels:
        k8s-app: filebeat
    spec:
      serviceAccountName: filebeat
      terminationGracePeriodSeconds: 30
      hostNetwork: true
      dnsPolicy: ClusterFirstWithHostNet
      containers:
      - name: filebeat
        image: 172.16.140.103/kubernetes-deploy/filebeat:7.5.1
        args: [
          "-c", "/etc/filebeat.yml",
          "-e",
        ]
        env:
        - name: ELASTICSEARCH_HOST
          value: eck-cluster-es-http.elastic-system
        - name: ELASTICSEARCH_PORT
          value: "9200"
        - name: ELASTICSEARCH_USERNAME
          value: elastic
        - name: ELASTICSEARCH_PASSWORD
          value: 2zmjkbnfft5827srml5hpj2v
        - name: ELASTIC_CLOUD_ID
          value:
        - name: ELASTIC_CLOUD_AUTH
          value:
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        securityContext:
          runAsUser: 0
          # If using Red Hat OpenShift uncomment this:
          #privileged: true
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 100Mi
        volumeMounts:
        - name: config
          mountPath: /etc/filebeat.yml
          readOnly: true
          subPath: filebeat.yml
        - name: data
          mountPath: /usr/share/filebeat/data
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: true
        - name: varlog
          mountPath: /var/log
          readOnly: true
      volumes:
      - name: config
        configMap:
          defaultMode: 0600
          name: filebeat-config
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers
      - name: varlog
        hostPath:
          path: /var/log
      # data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
      - name: data
        hostPath:
          path: /var/lib/filebeat-data
          type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: filebeat
subjects:
- kind: ServiceAccount
  name: filebeat
  namespace: elastic-system
roleRef:
  kind: ClusterRole
  name: filebeat
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: filebeat
  labels:
    k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
  resources:
  - namespaces
  - pods
  verbs:
  - get
  - watch
  - list
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: filebeat
  namespace: elastic-system
  labels:
    k8s-app: filebeat
---
EOF
# 替换service-ip
sed -i "s/LOGSTASH_IP/$LOGSTASH_IP/g" yaml/eck/file-beat.yaml
# 创建file-beat
kubectl apply -f yaml/eck/file-beat.yaml

创建ingress便于访问

root@master1

cat <<\EOF | kubectl apply -f -
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-kibana
  namespace: elastic-system
  annotations:
    nginx.ingress.kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
  rules:
    - host: kibana.com
      http:
        paths:
          - path: /
            backend:
              serviceName: eck-cluster-kb-http
              servicePort: 5601
EOF
  • 登陆kibana
    将172.16.140.150 kibana.com 添加到host
    浏览器输入https://kibana.com:30443/
    k8s离线搭建eck+logstash+filebeat日志系统_第1张图片
    输入用户elastic,密码由之前的ES_PASSWORD得到
    登陆平台,配置indices,配置monitor,然后开始使用吧。
    本次搭建所用logstash功能几乎没有用到,logstash插件库实际上丰富的很,以后使用上手了考虑写一篇心得。

你可能感兴趣的:(环境搭建)