cat elasticsearch.yaml
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: kafka
labels:
k8s-app: elasticsearch
spec:
ports:
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch
namespace: kafka
labels:
k8s-app: elasticsearch
—**
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: elasticsearch
labels:
k8s-app: elasticsearch
rules:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kafka
name: elasticsearch
labels:
k8s-app: elasticsearch
subjects:
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch-master
namespace: kafka
labels:
k8s-app: elasticsearch
role: master
spec:
replicas: 4
serviceName: elasticsearch-master
selector:
matchLabels:
k8s-app: elasticsearch
role: master
template:
metadata:
labels:
k8s-app: elasticsearch
role: master
spec:
serviceAccountName: elasticsearch
containers:
- image: docker.elastic.co/elasticsearch/elasticsearch:7.5.0
name: elasticsearch-mater
resources:
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 1000m
requests:
cpu: 100m
ports:
- containerPort: 9200 #容器端口
protocol: TCP
name: db
- containerPort: 9300
protocol: TCP
name: transport
env:
- name: “cluster.name”
value: “elasticsearch-cluster”
- name: “network.host”
value: “0.0.0.0”
- name: “discovery.seed_hosts”
value: “elasticsearch”
- name: “xpack.monitoring.collection.enabled”
value: “true”
- name: “cluster.initial_master_nodes”
value: “elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2,elasticsearch-master-3,elasticsearch-master-4”
- name: “discovery.zen.ping_timeout”
value: “5s”
- name: “node.master”
value: “true”
- name: “xpack.monitoring.collection.enabled”
value: “true”
- name: “node.data”
value: “false”
- name: “node.ingest”
value: “false”
securityContext:
privileged: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch-data
namespace: kafka
labels:
k8s-app: elasticsearch
role: data
spec:
replicas: 2
selector:
matchLabels:
k8s-app: elasticsearch
role: data
template:
metadata:
labels:
k8s-app: elasticsearch
role: data
spec:
serviceAccountName: elasticsearch
containers:
- image: docker.elastic.co/elasticsearch/elasticsearch:7.5.0
name: elasticsearch-data
resources:
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 1000m
requests:
cpu: 100m
ports:
- containerPort: 9200 #容器端口
protocol: TCP
name: db
- containerPort: 9300
protocol: TCP
name: transport
env:
- name: “cluster.name”
value: “elasticsearch-cluster”
- name: “network.host”
value: “0.0.0.0”
- name: “discovery.seed_hosts”
value: “elasticsearch”
- name: “node.master”
value: “false”
- name: “node.data”
value: “true”
securityContext:
privileged: true
apiVersion: v1
kind: Service
metadata:
name: elasticsearch-data
namespace: kafka
labels:
k8s-app: elasticsearch-data
spec:
type: NodePort
ports:
cat logstash-kibana.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kibana-config
namespace: kafka
data:
kibana.yml: |
server.host: “0.0.0.0”
server.name: kibana
elasticsearch.hosts: [ “http://elasticsearch-data:9200” ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: “zh-CN”
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-config
namespace: kafka
data:
logstash.yml: |
http.host: “0.0.0.0”
xpack.monitoring.elasticsearch.hosts: [“http://elasticsearch-data:9200”]
xpack.monitoring.enabled: true
apiVersion: v1
kind: ConfigMap
metadata:
name: conf
namespace: kafka
data:
logstash.conf: |
input {
beats {
port => 5044
}
}
output {
elasticsearch {
hosts => ["elasticsearch-data:9200"]
}
}
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: kafka
spec:
type: NodePort
ports:
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: kafka
labels:
k8s-app: kibana
spec:
replicas: 2
selector:
matchLabels:
k8s-app: kibana
template:
metadata:
labels:
k8s-app: kibana
spec:
serviceAccountName: elasticsearch
containers:
- image: docker.elastic.co/kibana/kibana:7.5.0
name: kibana
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
ports:
- containerPort: 5601
protocol: TCP
volumeMounts:
- name: kconfig
mountPath: /usr/share/kibana/config/kibana.yml
subPath: kibana.yml
volumes:
- name: kconfig
configMap:
name: kibana-config
–**
apiVersion: v1
kind: Service
metadata:
name: logstash
namespace: kafka
spec:
type: NodePort
ports:
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: kafka
labels:
k8s-app: logstash
spec:
replicas: 1
selector:
matchLabels:
k8s-app: logstash
template:
metadata:
labels:
k8s-app: logstash
spec:
serviceAccountName: elasticsearch
containers:
- image: docker.elastic.co/logstash/logstash:7.5.0
name: logstash
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
ports:
- containerPort: 5044
protocol: TCP
volumeMounts:
- name: lconfig
mountPath: /usr/share/logstash/config/logstash.yml
subPath: logstash.yml
- name: lconf
mountPath: /opt/logstash/pipeline/logstash.conf
subPath: logstash.conf
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: lconfig
configMap:
name: logstash-config
- name: lconf
configMap:
name: conf
cat filebeat.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: kube-system
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: “/var/log/containers/”
processors:
- add_cloud_metadata:
- add_host_metadata:
# enabled: true
# hosts: ['${ELASTICSEARCH_HOST:logstash}:${ELASTICSEARCH_PORT:5044}']
# topic: "nginx"
# partition.round_robin:
# reachable_only: false
# required_acks: 1
# max_message_bytes: 1000000
output.logstash:
hosts:['${ELASTICSEARCH_HOST:logstash}:${ELASTICSEARCH_PORT:5044}']
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.5.0
args: [
“-c”, “/etc/filebeat.yml”,
“-e”,
]
env:
- name: ELASTICSEARCH_HOST
value: kafka-svc.kafka
- name: ELASTICSEARCH_PORT
value: “9092”
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log