名称 | 版本 | 备注 |
IP | 10.10.10.99 | centos7 2009 |
kubesphere | v3.3.0 | |
kubernetes | v1.22.10 | |
oap | 8.8.1 | |
ui | 8.8.1 | |
elasticsearch | 7.5.1 |
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v2.2.1 sh -
./kk create cluster --with-kubernetes v1.22.10 --with-kubesphere v3.3.0
[email protected]:apache/skywalking-kubernetes.git
vim /etc/profile && source /etc/profile
helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install "${SKYWALKING_RELEASE_NAME}" ${REPO}/skywalking -n "${SKYWALKING_RELEASE_NAMESPACE}" \ --set oap.image.tag=8.8.1 \ --set oap.storageType=elasticsearch \ --set ui.image.tag=8.8.1 \ --set elasticsearch.imageTag=7.5.1 \ --set oap.replicas=1 \ --set oap.javaOpts=-Xms1g -Xmx1g \ --set elasticsearch.minimumMasterNodes=1 \ --set elasticsearch.esJavaOpts=-Xms1g -Xmx1g \ --set elasticsearch.replicas=1
helm install "${SKYWALKING_RELEASE_NAME}" ${REPO}/skywalking -n "${SKYWALKING_RELEASE_NAMESPACE}" -f values.yaml
values.yaml内容
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for skywalking.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
serviceAccounts:
oap:
imagePullSecrets: []
initContainer:
image: busybox
tag: '1.30'
oap:
name: oap
# When 'dynamicConfigEnabled' set to true, enable oap dynamic configuration through k8s configmap,
# Note: The default configmap data is empty, please refer to the detailed documentation (https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/dynamic-config.md)
# Sync period in seconds. Defaults to 60 seconds. env: SW_CONFIG_CONFIGMAP_PERIOD
dynamicConfigEnabled: false
image:
repository: skywalking.docker.scarf.sh/apache/skywalking-oap-server
tag: 8.8.1 # Must be set explicitly
pullPolicy: IfNotPresent
storageType: elasticsearch
initEs: true # Whether need to initial ES
ports:
# add more ports here if you need, for example
# zabbix: 10051
grpc: 11800
rest: 12800
replicas: 1
service:
type: ClusterIP
javaOpts: -Xmx1g -Xms1g
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
resources:
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 2
memory: 2Gi
# podAnnotations:
# example: oap-foo
envoy:
als:
enabled: false
# more envoy ALS ,please refer to https://github.com/apache/skywalking/blob/master/docs/en/setup/envoy/als_setting.md#observe-service-mesh-through-als
istio:
adapter:
enabled: false
env:
# more env, please refer to https://hub.docker.com/r/apache/skywalking-oap-server
# or https://github.com/apache/skywalking-docker/blob/master/6/6.4/oap/README.md#sw_telemetry
ui:
name: ui
replicas: 1
image:
repository: skywalking.docker.scarf.sh/apache/skywalking-ui
tag: 8.8.1 # Must be set explicitly
pullPolicy: IfNotPresent
# podAnnotations:
# example: oap-foo
nodeAffinity: {}
nodeSelector: {}
tolerations: []
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts: []
# - skywalking.local
tls: []
# - secretName: skywalking-tls
# hosts:
# - skywalking.local
service:
type: ClusterIP
# clusterIP: None
externalPort: 80
internalPort: 8080
## nodePort is the port on each node on which this service is exposed when type=NodePort
## Default: auto-allocated port if not specified. 30080 is just an example
##
# nodePort: 30080
## External IP addresses of service
## Default: nil
##
# externalIPs:
# - 192.168.0.1
#
## LoadBalancer IP if service.type is LoadBalancer
## Default: nil
##
# loadBalancerIP: 10.2.2.2
# Annotation example: setup ssl with aws cert when service.type is LoadBalancer
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
annotations: {}
## Limit load balancer source ips to list of CIDRs (where available)
# loadBalancerSourceRanges: []
esInit:
nodeAffinity: {}
nodeSelector: {}
tolerations: []
elasticsearch:
enabled: true
config: # For users of an existing elasticsearch cluster,takes effect when `elasticsearch.enabled` is false
port:
http: 9200
host: elasticsearch # es service on kubernetes or host
user: "elastic" # [optional]
password: "elastic" # [optional]
clusterName: "elasticsearch"
nodeGroup: "master"
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: ""
# Elasticsearch roles that will be applied to this nodeGroup
# These will be set as environment variables. E.g. node.master=true
roles:
master: "true"
ingest: "true"
data: "true"
replicas: 1
minimumMasterNodes: 1
esMajorVersion: ""
# Allows you to add any config files in /usr/share/elasticsearch/config/
# such as elasticsearch.yml and log4j2.properties
esConfig: {}
# elasticsearch.yml: |
# key:
# nestedkey: value
# log4j2.properties: |
# key = value
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs: []
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
# - name: elastic-certificates
# secretName: elastic-certificates
# path: /usr/share/elasticsearch/config/certs
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.5.1"
imagePullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# additionals labels
labels: {}
esJavaOpts: "-Xmx4g -Xms4g"
resources:
requests:
cpu: "2000m"
memory: "6Gi"
limits:
cpu: "2000m"
memory: "6Gi"
initResources:
limits:
cpu: "512"
memory: "1024Mi"
requests:
cpu: "512m"
memory: "1024Mi"
sidecarResources:
limits:
cpu: "1000m"
memory: "1048Mi"
requests:
cpu: "1000m"
memory: "1048Mi"
networkHost: "0.0.0.0"
volumeClaimTemplate:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 50Gi
rbac:
create: false
serviceAccountName: ""
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
persistence:
enabled: false
annotations: {}
extraVolumes: ""
# - name: extras
# emptyDir: {}
extraVolumeMounts: ""
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: ""
# - name: do-something
# image: busybox
# command: ['do', 'something']
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "soft"
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
protocol: http
httpPort: 9200
transportPort: 9300
service:
labels: {}
labelsHeadless: {}
type: ClusterIP
nodePort: ""
annotations: {}
httpPortName: http
transportPortName: transport
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
# The following value is deprecated,
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
securityContext:
capabilities:
drop:
- ALL
#readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# How long to wait for elasticsearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
nodeSelector: {}
tolerations: []
# Enabling this will publically expose your Elasticsearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
nameOverride: ""
fullnameOverride: ""
# https://github.com/elastic/helm-charts/issues/63
masterTerminationFix: true
#没有执行权限
#lifecycle:
#preStop:
#exec:
#command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
#postStart:
#exec:
#command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
sysctlInitContainer:
enabled: true
keystore: []
satellite:
name: satellite
replicas: 1
enabled: false
image:
repository: skywalking.docker.scarf.sh/apache/skywalking-satellite
tag: 8.8.1 # Must be set explicitly
pullPolicy: IfNotPresent
ports:
grpc: 11800
prometheus: 1234
service:
type: ClusterIP
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 1
memory: 1Gi
podAnnotations:
# example: oap-foo
env:
# more env, please refer to https://skywalking.apache.org/docs/skywalking-satellite/latest/en/setup/readme/#satellite_configyaml
nameOverride: ""
fullnameOverride: ""