ubuntu 16.04.5 LTS
以下操作全部在root用户下完成
安装v2ray
curl -L https://install.direct/go.sh -O && bash go.sh
config v2ray
cd /etc/v2ray/
cp config.json config.json.ori
vi /etc/v2ray/config.json
{
"log": {
"access": "/var/log/v2ray/access.log",
"error": "/var/log/v2ray/error.log",
"loglevel": "warning"
},
"inbound": {
"port": 1080,
"protocol": "vmess",
"settings": {
"clients": [
{
"id": "你的uuid",
"level": 1,
"alterId": 64
}
]
}
},
"outbound": {
"protocol": "freedom",
"settings": {}
},
"outboundDetour": [
{
"protocol": "blackhole",
"settings": {},
"tag": "blocked"
}
],
"routing": {
"strategy": "rules",
"settings": {
"rules": [
{
"type": "field",
"ip": [
"0.0.0.0/8",
"10.0.0.0/8",
"100.64.0.0/10",
"127.0.0.0/8",
"169.254.0.0/16",
"172.16.0.0/12",
"192.0.0.0/24",
"192.0.2.0/24",
"192.168.0.0/16",
"198.18.0.0/15",
"198.51.100.0/24",
"203.0.113.0/24",
"::1/128",
"fc00::/7",
"fe80::/10"
],
"outboundTag": "blocked"
}
]
}
}
}
测试配置文件
/usr/bin/v2ray/v2ray -test -config /etc/v2ray/config.json
出现这个提示即可
An unified platform for anti-censorship.
Configuration OK.
设置V2Ray服务
systemctl enable v2ray
systemctl start v2ray
安装polipo
apt-get -y install polipo
vi /etc/polipo/config
# This file only needs to list configuration variables that deviate
# from the default values. See /usr/share/doc/polipo/examples/config.sample
# and "polipo -v" for variables you can tweak and further information.
logSyslog = true
logFile = /var/log/polipo/polipo.log
socksParentProxy = "127.0.0.1:1080"
socksProxyType = socks5
chunkHighMark = 50331648
objectHighMark = 16384
serverMaxSlots = 64
serverSlots = 16
serverSlots1 = 32
proxyAddress = "0.0.0.0"
proxyPort = 8123
设置apt-get科学上网
cat </etc/apt/apt.conf.d/90proxy
Acquire::http::Proxy "http://127.0.0.1:8123/";
Acquire::https::Proxy "http://127.0.0.1:8123/";
Acquire::ftp::Proxy "http://127.0.0.1:8123/";
EOF
安装kubeadm和docker
export https_proxy=http://127.0.0.1:8123
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat </etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y docker.io kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
部署kubenetes
kubeadm.yaml
张磊老师给的kubeadm.yaml已经过时,需要转换为新格式
cd ~
vi kubeold.yaml
注意apiVersion要改为kubeadm.k8s.io/v1alpha2
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
controllerManagerExtraArgs:
horizontal-pod-autoscaler-use-rest-clients: "true"
horizontal-pod-autoscaler-sync-period: "10s"
node-monitor-grace-period: "10s"
apiServerExtraArgs:
runtime-config: "api/all=true"
kubernetesVersion: "stable-1.11"
kubeadm config migrate --old-config kubeold.yaml --new-config kubeadm.yaml
查看新格式的kubeadm.yml
cat kubeadm.yml
apiEndpoint:
advertiseAddress: 172.28.13.112
bindPort: 6443
apiVersion: kubeadm.k8s.io/v1alpha3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: yyztn6.2anrticbe0z5dyx4
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServerExtraArgs:
runtime-config: api/all=true
apiVersion: kubeadm.k8s.io/v1alpha3
auditPolicy:
logDir: /var/log/kubernetes/audit
logMaxAge: 2
path: ""
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: ""
controllerManagerExtraArgs:
horizontal-pod-autoscaler-sync-period: 10s
horizontal-pod-autoscaler-use-rest-clients: "true"
node-monitor-grace-period: 10s
etcd:
local:
dataDir: /var/lib/etcd
image: ""
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.11.3
networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
unifiedControlPlaneImage: ""
初始化k8s
cd ~
- 关闭swap分区
swapoff -a
vi /etc/fstab
把swap分区注释掉
docker配置科学上网
mkdir /etc/systemd/system/docker.service.d/
cd /etc/systemd/system/docker.service.d/
cat <http-proxy.conf
[Service]
Environment="HTTP_PROXY=http://127.0.0.1:8123/"
Environment="HTTPS_PROXY=https://127.0.0.1:8123/"
EOF
systemctl daemon-reload
systemctl restart docker
- 初始化k8s
kubeadm init --config kubeadm.yaml --ignore-preflight-errors=KubeletVersion
初始化成功之后会出现
kubeadm join 172.28.13.124:6443 --token yyztn6.2anrticbe0z5dyx4 --discovery-token-ca-cert-hash sha256:c80c38a8d6354ec1c868333e62251e67303f9aaf9baae358cc0d79a79bf49a1
把这个记下来,将来worker加入的时候要用到这个命令
-
复制配置文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config - 查看k8s状态
kubectl get pods --all-namespacesNAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-99b9bb8bd-47mvf 0/1 Pending 0 1m kube-system coredns-99b9bb8bd-8gnln 0/1 Pending 0 1m kube-system etcd-k8s-master 1/1 Running 0 18s kube-system kube-apiserver-k8s-master 1/1 Running 0 14s kube-system kube-controller-manager-k8s-master 1/1 Running 0 18s kube-system kube-proxy-9n6hw 1/1 Running 0 1m kube-system kube-scheduler-k8s-master 1/1 Running 0 11s
coredns处于pending状态,原因是没装网络插件,
-
安装weave
kubectl apply -f https://git.io/weave-kube-1.6 -
再查看状态
kubectl get pods --all-namespacesNAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-99b9bb8bd-47mvf 0/1 CrashLoopBackOff 1 8m kube-system coredns-99b9bb8bd-8gnln 0/1 CrashLoopBackOff 1 8m kube-system etcd-k8s-master 1/1 Running 0 10s kube-system kube-apiserver-k8s-master 1/1 Running 0 10s kube-system kube-controller-manager-k8s-master 1/1 Running 0 10s kube-system kube-proxy-9n6hw 1/1 Running 0 8m kube-system kube-scheduler-k8s-master 1/1 Running 0 8s kube-system weave-net-8khzw 2/2 Running 0 23s
状态变成CrashLoopBackOff
这个问题整整折磨了我两天 - 查看coredns的日志
kubectl logs -f coredns-99b9bb8bd-47mvf -n kube-system.:53 2018/09/22 07:39:37 [INFO] CoreDNS-1.2.2 2018/09/22 07:39:37 [INFO] linux/amd64, go1.11, eb51e8b CoreDNS-1.2.2 linux/amd64, go1.11, eb51e8b 2018/09/22 07:39:37 [INFO] plugin/reload: Running configuration MD5 = f65c4821c8a9b7b5eb30fa4fbc167769 2018/09/22 07:39:38 [FATAL] plugin/loop: Seen "HINFO IN 8205794187887631643.5216586587165434789." more than twice, loop detected
日志没看懂,google了一大堆网站,终于找到这篇文章
https://github.com/coredns/coredns/issues/1986
从这篇文章得到提示,coredns pod会取宿主机的/etc/resolv.conf里面定义的nameserver作为自己的upstream server。而ubuntu的这个文件定义的nameserver是127.0.0.1,那么问题来了,coredns向127.0.0.1查询,而127.0.0.1代表本机,就这样形成死循环,也解释了为什么上面的日志提示"loop detected"
- 修改/etc/resolv.conf
这个文件是自动生成的,无法手工修改。上网找了一大堆办法
修改 /etc/network/interfaces
auto enp2s0
iface enp2s0 inet dhcp
dns-nameservers 114.114.114.114
systemctl restart networking
修改/etc/dhcp/dhclient.conf
prepend domain-name-servers 114.114.114.114;
systemctl restart networking
统统没用,/etc/resolv.conf里面只有nameserver 127.0.0.1
苍天不负有心人,终于被我找到这篇文章
https://askubuntu.com/questions/627899/nameserver-127-0-1-1-in-resolv-conf-wont-go-away/627900#627900
原来只要简单粗暴地,把原来的/etc/resolv.conf删掉,重新创建一个新文件,把nameserver写进去,就行了
- 重启服务器
reboot
有时重启服务器也无法解决,那么就只能kubectl reset -f重置,再执行一次初始化的步骤 - 部署dashboard可视化插件
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml - 部署容器存储插件
我们选择rook,但是rook属于用户pod。而Master节点默认不允许执行用户Pod,需要执行下面命令调整执行Pod的策略 - kubectl taint nodes --all node-role.kubernetes.io/master-
然后再执行下面两条命令
kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/operator.yaml
kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/cluster.yaml
- kubectl -n rook-ceph-system get pods
NAME READY STATUS RESTARTS AGE rook-ceph-agent-v4t4v 1/1 Running 0 8m rook-ceph-operator-78d498c68c-8rbsj 1/1 Running 0 18h rook-discover-g92zk 1/1 Running 0 8m
看到这三个Pod都处于running状态
到此为止,我们来看看全部的Pod状态
- get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-99b9bb8bd-gb9cs 1/1 Running 0 18h kube-system coredns-99b9bb8bd-vsg4h 1/1 Running 0 18h kube-system etcd-k8s-master 1/1 Running 0 18h kube-system kube-apiserver-k8s-master 1/1 Running 0 18h kube-system kube-controller-manager-k8s-master 1/1 Running 0 18h kube-system kube-proxy-9tk7r 1/1 Running 0 18h kube-system kube-scheduler-k8s-master 1/1 Running 0 18h kube-system kubernetes-dashboard-767dc7d4d-nkvh4 1/1 Running 0 18h kube-system weave-net-5jwtk 2/2 Running 0 18h rook-ceph-system rook-ceph-agent-v4t4v 1/1 Running 0 13m rook-ceph-system rook-ceph-operator-78d498c68c-8rbsj 1/1 Running 0 18h rook-ceph-system rook-discover-g92zk 1/1 Running 0 13m rook-ceph rook-ceph-mgr-a-7866f56456-7rxtc 1/1 Running 0 12m rook-ceph rook-ceph-mon-a-679754576b-wdmdk 1/1 Running 0 13m rook-ceph rook-ceph-mon-b-54bc6865fb-vsbg7 1/1 Running 0 12m rook-ceph rook-ceph-mon-c-676597bf64-8n9lp 1/1 Running 0 12m rook-ceph rook-ceph-osd-0-d99586d78-h72dn 1/1 Running 0 12m rook-ceph rook-ceph-osd-prepare-k8s-master-fdfbw 0/1 Completed 0 12m
那么恭喜你,已经完成单节点的kubenetes搭建工作!
参考资料:
blog.csdn.net/forever__1234/article/details/81259907
blog.51cto.com/vnimos/2053215
www.v2ex.com/t/261723