部署步骤
1、下载go:
[root@ke-cloud ~]# wget https://golang.google.cn/dl/go1.15.5.linux-amd64.tar.gz
[root@ke-cloud ~]# tar -zxvf go1.15.5.linux-amd64.tar.gz -C /usr/local
2、配置环境变量
[root@ke-cloud ~]# vim /etc/profile
# golang env
export GOROOT=/usr/local/go
export GOPATH=/data/gopath
export PATH=$PATH:$GOROOT/bin:$GOPATH/bin
[root@ke-cloud ~]# source /etc/profile
[root@ke-cloud ~]# mkdir -p /data/gopath && cd /data/gopath
[root@ke-cloud ~]# mkdir -p src pkg bin
3、下载kubedge源码
[root@ke-cloud ~]# git clone https://github.com/kubeedge/kubeedge $GOPATH/src/github.com/kubeedge/kubeedge
4、编译keadm
[root@ke-cloud ~]# cd $GOPATH/src/github.com/kubeedge/kubeedge
[root@ke-cloud ~]# make all WHAT=keadm
说明:编译后的二进制文件在./_output/local/bin下,单独编译cloudcore与edgecore的方式如下:
[root@ke-cloud ~]# make all WHAT=cloudcore && make all WHAT=edgecore
5、获取token
[root@master1 kubeedge]# keadm gettoken
另一种方法:
kubectl get secret default-token-rzlvq -o=jsonpath='{.data.token}' -n kubeedge | base64 -d
[root@master1 kubeedge]# netstat -pan | grep -i listen | grep kube-apiserve
tcp6 0 0 :::6443 :::* LISTEN 12424/kube-apiserve
6、创建系统资源
cd $GOPATH/src/github.com/kubeedge/kubeedge
cd build/crds/
kubectl create -f 所有文件
7、生成yaml汶口文件
root@ubuntu:~/cmd# mkdir -p /etc/kubeedge/config/
root@ubuntu:~/cmd# ./cloudcore --defaultconfig > /etc/kubeedge/config/cloudcore.yaml
8、后台运行cloudcore
nohup cloudcore > cloudcore.log 2>&1 &
9、生成证书
cd /etc/kubeedge/certs# ls
edge.crt edge.csr edge.key stream.crt stream.csr stream.key
root@ubuntu:/etc/kubeedge/certs# openssl genrsa -out server.key 1024
root@ubuntu:/etc/kubeedge/certs# openssl rsa -in server.key -pubout -out server.crt
root@ubuntu:kubeedge# export CLOUDCOREIPS="192.168.56.105"
root@ubuntu:kubeedge# build/tools/certgen.sh stream
vim /etc/kubeedge/config/cloudcore.yaml
修改下面两行
tlsCertFile: /etc/kubeedge/certs/stream.crt #原来是server.crt
tlsPrivateKeyFile: /etc/kubeedge/certs/stream.key #原来是server.key
10、部署edge
keadm join --cloudcore-ipport=88.88.68.202:10000 --edgenode-name=dege-vito --token=0df0fb87eaaef55ff091e26f53d25432363d29b967cf3c988bf682651a016d3b.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzM4NTgyOTF9.KEL4ZAwR8sZrNHA4y36ljFUI4gFdNHd38wdthOVckO0
故障排除
Oct 11 17:40:50 server1.novalocal systemd[1]: Started edgecore.service.
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.131608 567 server.go:70] Version: v1.8.1
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.134999 567 sql.go:21] Begin to register twin db model
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.135164 567 module.go:34] Module twin registered successfully
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.139316 567 client.go:78] "Connecting to docker on the dockerEndpoint" endpoint="unix:///var/run/docker.sock"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.139852 567 client.go:97] "Start docker client with request timeout" timeout="0s"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.141385 567 docker_service.go:242] "Hairpin mode is set" hairpinMode=hairpin-veth
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.141486 567 cni.go:239] "Unable to update cni config" err="no networks found in /etc/cni/net.d"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.145370 567 hostport_manager.go:72] "The binary conntrack is not installed, this can cause failures in network connection cleanup."
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.145396 567 hostport_manager.go:72] "The binary conntrack is not installed, this can cause failures in network connection cleanup."
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.146830 567 docker_service.go:257] "Docker cri networking managed by the network plugin" networkPluginName="kubernetes.io/no-op"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.157286 567 docker_service.go:264] "Docker Info" dockerInfo=&{ID:6W4W:2PTF:JXCJ:ZNCT:35A6:UN54:2LM2:6MPI:QTAP:27ZN:I5I2:PGW7 Containers:2 ContainersRunning:0 ContainersPaused:0 ContainersStopped:2 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem xfs] [Supports d_type true] [Native Overlay Diff true]] SystemStatus:[] Plugins:{Volume:[local] Network:[bridge host macvlan null overlay] Authorization:[] Log:[]} MemoryLimit:true SwapLimit:true KernelMemory:true KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:false IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6tables:true Debug:false NFd:14 OomKillDisable:true NGoroutines:21 SystemTime:2021-10-11T17:40:51.148873414+08:00 LoggingDriver:journald CgroupDriver:cgroupfs CgroupVersion: NEventsListener:0 KernelVersion:3.10.0-1160.25.1.el7.x86_64 OperatingSystem:CentOS Linux 7 (Core) OSVersion: OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:0xc0002ee000 NCPU:8 MemTotal:16655929344 GenericResources:[] DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:server1.novalocal Labels:[] ExperimentalBuild:false ServerVersion:1.13.1 ClusterStore: ClusterAdvertise: Runtimes:map[docker-runc:{Path:/usr/libexec/docker/docker-runc-current Args:[] Shim:} runc:{Path:docker-runc Args:[] Shim:}] DefaultRuntime:docker-runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:[] Nodes:0 Managers:0 Cluster:0xc00017e420 Warnings:[]} LiveRestoreEnabled:false Isolation: InitBinary:/usr/libexec/docker/docker-init-current ContainerdCommit:{ID: Expected:aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1} RuncCommit:{ID:66aedde759f33c190954815fb765eedc1d782dd9 Expected:9df8b306d01f59d3a8029be411de015b7304dd8f} InitCommit:{ID:fec3683b971d9c3ef73f284f176672c44b448662 Expected:949e6facb77383876aeff8a6944dde66b3089574} SecurityOptions:[name=seccomp,profile=/etc/docker/seccomp.json] ProductLicense: DefaultAddressPools:[] Warnings:[]}
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.157313 567 docker_service.go:277] "Setting cgroupDriver" cgroupDriver="cgroupfs"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.157379 567 edged.go:690] RemoteRuntimeEndpoint: "unix:///var/run/dockershim.sock", remoteImageEndpoint: "unix:///var/run/dockershim.sock"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.157388 567 edged.go:693] Starting the GRPC server for the docker CRI shim.
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.167943 567 remote_runtime.go:62] parsed scheme: ""
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.167960 567 remote_runtime.go:62] scheme "" not registered, fallback to default scheme
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.167994 567 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{/var/run/dockershim.sock 0 }] }
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168009 567 clientconn.go:948] ClientConn switching balancer to "pick_first"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168054 567 remote_image.go:50] parsed scheme: ""
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168061 567 remote_image.go:50] scheme "" not registered, fallback to default scheme
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168070 567 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{/var/run/dockershim.sock 0 }] }
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168075 567 clientconn.go:948] ClientConn switching balancer to "pick_first"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168161 567 container_manager_linux.go:991] "CPUAccounting not enabled for process" pid=567
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168169 567 container_manager_linux.go:994] "MemoryAccounting not enabled for process" pid=567
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168221 567 container_manager_linux.go:991] "CPUAccounting not enabled for process" pid=5354
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.168228 567 container_manager_linux.go:994] "MemoryAccounting not enabled for process" pid=5354
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.196258 567 kuberuntime_manager.go:222] "Container runtime initialized" containerRuntime="docker" version="1.13.1" apiVersion="1.26.0"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.196310 567 edged.go:575] --cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200076 567 container_manager_linux.go:278] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200129 567 container_manager_linux.go:283] "Creating Container Manager object based on Node Config" nodeConfig={RuntimeCgroupsName: SystemCgroupsName: KubeletCgroupsName: ContainerRuntime:docker CgroupsPerQOS:true CgroupRoot:/ CgroupDriver:cgroupfs KubeletRootDir:/var/lib/edged ProtectKernelDefaults:false NodeAllocatableConfig:{KubeReservedCgroupName: SystemReservedCgroupName: ReservedSystemCPUs: EnforceNodeAllocatable:map[] KubeReserved:map[] SystemReserved:map[] HardEvictionThresholds:[]} QOSReserved:map[] ExperimentalCPUManagerPolicy:none ExperimentalTopologyManagerScope:container ExperimentalCPUManagerReconcilePeriod:0s ExperimentalMemoryManagerPolicy: ExperimentalMemoryManagerReservedMemory:[] ExperimentalPodPidsLimit:0 EnforceCPULimits:false CPUCFSQuotaPeriod:0s ExperimentalTopologyManagerPolicy:none}
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200152 567 topology_manager.go:120] "Creating topology manager with policy per scope" topologyPolicyName="none" topologyScopeName="container"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200164 567 container_manager_linux.go:314] "Initializing Topology Manager" policy="none" scope="container"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200172 567 container_manager_linux.go:319] "Creating device plugin manager" devicePluginEnabled=false
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200239 567 module.go:34] Module edged registered successfully
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200249 567 module.go:34] Module websocket registered successfully
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200257 567 module.go:34] Module eventbus registered successfully
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200534 567 metamanager.go:45] Begin to register metaManager db model
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.200606 567 module.go:34] Module metaManager registered successfully
Oct 11 17:40:51 server1.novalocal edgecore[567]: W1011 17:40:51.200619 567 module.go:37] Module servicebus is disabled, do not register
Oct 11 17:40:51 server1.novalocal edgecore[567]: W1011 17:40:51.200631 567 module.go:37] Module edgestream is disabled, do not register
Oct 11 17:40:51 server1.novalocal edgecore[567]: W1011 17:40:51.200637 567 module.go:37] Module testManager is disabled, do not register
Oct 11 17:40:51 server1.novalocal edgecore[567]: table `device` already exists, skip
Oct 11 17:40:51 server1.novalocal edgecore[567]: table `device_attr` already exists, skip
Oct 11 17:40:51 server1.novalocal edgecore[567]: table `device_twin` already exists, skip
Oct 11 17:40:51 server1.novalocal edgecore[567]: table `sub_topics` already exists, skip
Oct 11 17:40:51 server1.novalocal edgecore[567]: table `meta` already exists, skip
Oct 11 17:40:51 server1.novalocal edgecore[567]: table `meta_v2` already exists, skip
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.202014 567 core.go:24] Starting module twin
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.202035 567 core.go:24] Starting module edged
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.202091 567 core.go:24] Starting module websocket
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.202116 567 core.go:24] Starting module eventbus
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.202137 567 core.go:24] Starting module metaManager
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.202226 567 http.go:40] tlsConfig InsecureSkipVerify true
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.202382 567 process.go:113] Begin to sync sqlite
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.202433 567 edged.go:294] Starting edged...
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.204193 567 common.go:96] start connect to mqtt server with client id: hub-client-sub-1633945251
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.204218 567 common.go:98] client hub-client-sub-1633945251 isconnected: false
Oct 11 17:40:51 server1.novalocal edgecore[567]: E1011 17:40:51.204405 567 csi_plugin.go:224] kubernetes.io/csi: CSIDriverLister not found on KubeletVolumeHost
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.204459 567 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer"
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.204742 567 client.go:150] finish hub-client sub
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.204759 567 common.go:96] start connect to mqtt server with client id: hub-client-pub-1633945251
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.204767 567 common.go:98] client hub-client-pub-1633945251 isconnected: false
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205060 567 client.go:166] finish hub-client pub
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205068 567 eventbus.go:63] Init Sub And Pub Client for externel mqtt broker tcp://127.0.0.1:1883 successfully
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205089 567 client.go:91] edge-hub-cli subscribe topic to $hw/events/upload/#
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205190 567 client.go:91] edge-hub-cli subscribe topic to $hw/events/device/+/state/update
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205275 567 client.go:91] edge-hub-cli subscribe topic to $hw/events/device/+/twin/+
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205372 567 client.go:91] edge-hub-cli subscribe topic to $hw/events/node/+/membership/get
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205452 567 client.go:91] edge-hub-cli subscribe topic to SYS/dis/upload_records
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205536 567 client.go:91] edge-hub-cli subscribe topic to +/user/#
Oct 11 17:40:51 server1.novalocal edgecore[567]: I1011 17:40:51.205662 567 client.go:99] list edge-hub-cli-topics status, no record, skip sync
Oct 11 17:40:51 server1.novalocal edgecore[567]: F1011 17:40:51.222413 567 certmanager.go:93] Error: failed to get edge certificate from the cloudcore, error: Get "https://88.88.68.202:10002/edge.crt": x509: certificate signed by unknown authority
Oct 11 17:40:51 server1.novalocal edgecore[567]: goroutine 140 [running]:
Oct 11 17:40:51 server1.novalocal edgecore[567]: k8s.io/klog/v2.stacks(0xc0000c0001, 0xc00037a3c0, 0xcb, 0xe9)
Oct 11 17:40:51 server1.novalocal edgecore[567]: /root/codes/src/github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2/klog.go:1021 +0xb9
Oct 11 17:40:51 server1.novalocal edgecore[567]: k8s.io/klog/v2.(*loggingT).output(0x4656de0, 0xc000000003, 0x0, 0x0, 0xc000374d90, 0x42ee10c, 0xe, 0x5d, 0x0)
删掉crt
[root@server1 vito]# cd /etc/kubeedge/
[root@server1 kubeedge]# ll
drwxr-xr-x 2 root root 24 Oct 11 11:18 ca
-rw-r--r-- 1 root root 132 Oct 11 10:37 cloudcore.service
drwxr-xr-x 2 root root 27 Oct 11 10:46 config
-rw-r--r-- 2 root root 160 Oct 11 10:45 edgecore.service
drwxr-xr-x 4 root root 46 Sep 15 20:01 kubeedge-v1.8.1-linux-amd64
-rw-r--r-- 1 root root 48580944 Oct 11 10:37 kubeedge-v1.8.1-linux-amd64.tar.gz
[root@server1 kubeedge]# cd ca/
[root@server1 ca]# ll
-rw-r--r-- 1 root root 1984 Oct 11 17:47 rootCA.crt
[root@server1 ca]# rm rootCA.crt
rm: remove regular file ‘rootCA.crt’? y
重新部署
[root@server1 ca]# systemctl stop edgecore
[root@server1 ca]# rm /etc/systemd/system/edgecore.service
rm: remove regular file ‘/etc/systemd/system/edgecore.service’? y
[root@server1 ca]# keadm join --cloudcore-ipport=88.88.68.202:10000 --edgenode-name=dege-vito --token=0df0fb87eaaef55ff091e26f53d25432363d29b967cf3c988bf682651a016d3b.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQwMzEwOTF9.VfZFTBvdiEXAowp6sECCfGUV-hYfrpSj8O8__S3XmiY
Host has mosquit+ already installed and running. Hence skipping the installation steps !!!
Expected or Default KubeEdge version 1.8.1 is already downloaded and will checksum for it.
kubeedge-v1.8.1-linux-amd64.tar.gz checksum:
checksum_kubeedge-v1.8.1-linux-amd64.tar.gz.txt content:
kubeedge-v1.8.1-linux-amd64.tar.gz in your path checksum failed and do you want to delete this file and try to download again?
[y/N]:
n
W1011 17:59:48.113950 5753 common.go:279] failed to checksum and will continue to install.
[Run as service] service file already exisits in /etc/kubeedge//edgecore.service, skip download
kubeedge-v1.8.1-linux-amd64/
kubeedge-v1.8.1-linux-amd64/edge/
kubeedge-v1.8.1-linux-amd64/edge/edgecore
kubeedge-v1.8.1-linux-amd64/cloud/
kubeedge-v1.8.1-linux-amd64/cloud/csidriver/
kubeedge-v1.8.1-linux-amd64/cloud/csidriver/csidriver
kubeedge-v1.8.1-linux-amd64/cloud/admission/
kubeedge-v1.8.1-linux-amd64/cloud/admission/admission
kubeedge-v1.8.1-linux-amd64/cloud/cloudcore/
kubeedge-v1.8.1-linux-amd64/cloud/cloudcore/cloudcore
kubeedge-v1.8.1-linux-amd64/version
KubeEdge edgecore is running, For logs visit: journalctl -u edgecore.service -b
[root@server1 ca]# journalctl -u edgecore.service -f