[root@master ~]# kubectl get namespace
NAME STATUS AGE
default Active 45h # 所有未指定Namespace的对象都会被分配在default命名空间
kube-node-lease Active 45h # 集群节点之间的心跳维护,v1.13开始引入
kube-public Active 45h # 此命名空间下的资源可以被所有人访问(包括未认证用户)
kube-system Active 45h # 所有由Kubernetes系统创建的资源都处于这个命名空间
下面来看namespace资源的具体操作:
查看
# 1 查看所有的ns 命令:kubectl get ns[root@master ~]# kubectl get ns
NAME STATUS AGE
default Active 45h
kube-node-lease Active 45h
kube-public Active 45h
kube-system Active 45h
# 2 查看指定的ns 命令:kubectl get ns ns名称[root@master ~]# kubectl get ns default
NAME STATUS AGE
default Active 45h
# 3 指定输出格式 命令:kubectl get ns ns名称 -o 格式参数# kubernetes支持的格式有很多,比较常见的是wide、json、yaml[root@master ~]# kubectl get ns default -o yaml
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: "2020-04-05T04:44:16Z"
name: default
resourceVersion: "151"
selfLink: /api/v1/namespaces/default
uid: 7405f73a-e486-43d4-9db6-145f1409f090
spec:
finalizers:
- kubernetes
status:
phase: Active
# 4 查看ns详情 命令:kubectl describe ns ns名称[root@master ~]# kubectl describe ns default
Name: default
Labels:
Annotations:
Status: Active # Active 命名空间正在使用中 Terminating 正在删除命名空间# ResourceQuota 针对namespace做的资源限制# LimitRange针对namespace中的每个组件做的资源限制
No resource quota.
No LimitRange resource.
创建
# 创建namespace[root@master ~]# kubectl create ns dev
namespace/dev created
删除
# 删除namespace[root@master ~]# kubectl delete ns dev
namespace "dev" deleted
# 命令格式: kubectl run (pod控制器名称) [参数] # --image 指定Pod的镜像# --port 指定端口# --namespace 指定namespace[root@master ~]# kubectl run nginx --image=nginx:1.17.1 --port=80 --namespace dev
deployment.apps/nginx created
查看pod信息
# 查看Pod基本信息[root@master ~]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
nginx-5ff7956ff6-fg2db 1/1 Running 0 43s
# 查看Pod的详细信息[root@master ~]# kubectl describe pod nginx-5ff7956ff6-fg2db -n dev
Name: nginx-5ff7956ff6-fg2db
Namespace: dev
Priority: 0
Node: node1/192.168.109.101
Start Time: Wed, 08 Apr 2020 09:29:24 +0800
Labels: pod-template-hash=5ff7956ff6
run=nginx
Annotations:
Status: Running
IP: 10.244.1.23
IPs:
IP: 10.244.1.23
Controlled By: ReplicaSet/nginx-5ff7956ff6
Containers:
nginx:
Container ID: docker://4c62b8c0648d2512380f4ffa5da2c99d16e05634979973449c98e9b829f6253c
Image: nginx:1.17.1
Image ID: docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Wed, 08 Apr 2020 09:30:01 +0800
Ready: True
Restart Count: 0
Environment:
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-hwvvw (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-hwvvw:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-hwvvw
Optional: false
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
-------------------------
Normal Scheduled default-scheduler Successfully assigned dev/nginx-5ff7956ff6-fg2db to node1
Normal Pulling 4m11s kubelet, node1 Pulling image "nginx:1.17.1"
Normal Pulled 3m36s kubelet, node1 Successfully pulled image "nginx:1.17.1"
Normal Created 3m36s kubelet, node1 Created container nginx
Normal Started 3m36s kubelet, node1 Started container nginx
访问Pod
# 获取podIP[root@master ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE ...
nginx-5ff7956ff6-fg2db 1/1 Running 0 190s 10.244.1.23 node1 ...#访问POD[root@master ~]# curl http://10.244.1.23:80
<!DOCTYPE html>
Welcome to nginx!</title>
</head>
Thank you forusing nginx.</em></p>
</body>
</html>
删除指定Pod
# 删除指定Pod[root@master ~]# kubectl delete pod nginx-5ff7956ff6-fg2db -n dev
pod "nginx-5ff7956ff6-fg2db" deleted
# 此时,显示删除Pod成功,但是再查询,发现又新产生了一个 [root@master ~]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
nginx-5ff7956ff6-jj4ng 1/1 Running 0 21s
# 这是因为当前Pod是由Pod控制器创建的,控制器会监控Pod状况,一旦发现Pod死亡,会立即重建# 此时要想删除Pod,必须删除Pod控制器# 先来查询一下当前namespace下的Pod控制器[root@master ~]# kubectl get deploy -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 1 1 9m7s
# 接下来,删除此PodPod控制器[root@master ~]# kubectl delete deploy nginx -n dev
deployment.apps "nginx" deleted
# 稍等片刻,再查询Pod,发现Pod被删除了[root@master ~]# kubectl get pods -n dev
No resources found in dev namespace.
配置操作
创建一个pod-nginx.yaml,内容如下:
apiVersion: v1
kind: Pod
metadata:name: nginx
namespace: dev
spec:containers:-image: nginx:1.17.1
name: pod
ports:-name: nginx-port
containerPort:80protocol: TCP
# 为pod资源打标签[root@master ~]# kubectl label pod nginx-pod version=1.0 -n dev
pod/nginx-pod labeled
# 为pod资源更新标签[root@master ~]# kubectl label pod nginx-pod version=2.0 -n dev --overwrite
pod/nginx-pod labeled
# 查看标签[root@master ~]# kubectl get pod nginx-pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx-pod 1/1 Running 0 10m version=2.0
# 筛选标签[root@master ~]# kubectl get pod -n dev -l version=2.0 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx-pod 1/1 Running 0 17m version=2.0
[root@master ~]# kubectl get pod -n dev -l version!=2.0 --show-labels
No resources found in dev namespace.#删除标签[root@master ~]# kubectl label pod nginx-pod version- -n dev
pod/nginx-pod labeled
配置方式
apiVersion: v1
kind: Pod
metadata:name: nginx
namespace: dev
labels:version:"3.0"env:"test"spec:containers:-image: nginx:1.17.1
name: pod
ports:-name: nginx-port
containerPort:80protocol: TCP
# 创建Pod[root@master pod]# kubectl apply -f pod-base.yaml
pod/pod-base created
# 查看Pod状况# READY 1/2 : 表示当前Pod中有2个容器,其中1个准备就绪,1个未就绪# RESTARTS : 重启次数,因为有1个容器故障了,Pod一直在重启试图恢复它[root@master pod]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pod-base 1/2 Running 4 95s
# 可以通过describe查看内部的详情# 此时已经运行起来了一个基本的Pod,虽然它暂时有问题[root@master pod]# kubectl describe pod pod-base -n dev
镜像拉取
创建pod-imagepullpolicy.yaml文件,内容如下:
apiVersion: v1
kind: Pod
metadata:name: pod-imagepullpolicy
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
imagePullPolicy: Always # 用于设置镜像拉取策略-name: busybox
image: busybox:1.30
# 创建Pod[root@master pod]# kubectl create -f pod-imagepullpolicy.yaml
pod/pod-imagepullpolicy created
# 查看Pod详情# 此时明显可以看到nginx镜像有一步Pulling image "nginx:1.17.1"的过程[root@master pod]# kubectl describe pod pod-imagepullpolicy -n dev......
Events:
Type Reason Age From Message
-------------------------
Normal Scheduled default-scheduler Successfully assigned dev/pod-imagePullPolicy to node1
Normal Pulling 32s kubelet, node1 Pulling image "nginx:1.17.1"
Normal Pulled 26s kubelet, node1 Successfully pulled image "nginx:1.17.1"
Normal Created 26s kubelet, node1 Created container nginx
Normal Started 25s kubelet, node1 Started container nginx
Normal Pulled 7s (x3 over 25s) kubelet, node1 Container image "busybox:1.30" already present on machine
Normal Created 7s (x3 over 25s) kubelet, node1 Created container busybox
Normal Started 7s (x3 over 25s) kubelet, node1 Started container busybox
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-exec
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
ports:-name: nginx-port
containerPort:80livenessProbe:exec:command:["/bin/cat","/tmp/hello.txt"]# 执行一个查看文件的命令
创建pod,观察效果
# 创建Pod[root@master ~]# kubectl create -f pod-liveness-exec.yaml
pod/pod-liveness-exec created
# 查看Pod详情[root@master ~]# kubectl describe pods pod-liveness-exec -n dev......
Normal Created 20s (x2 over 50s) kubelet, node1 Created container nginx
Normal Started 20s (x2 over 50s) kubelet, node1 Started container nginx
Normal Killing 20s kubelet, node1 Container nginx failed liveness probe, will be restarted
Warning Unhealthy 0s (x5 over 40s) kubelet, node1 Liveness probe failed: cat: can't open '/tmp/hello11.txt': No such file or directory
# 观察上面的信息就会发现nginx容器启动之后就进行了健康检查# 检查失败之后,容器被kill掉,然后尝试进行重启(这是重启策略的作用,后面讲解)# 稍等一会之后,再观察pod信息,就可以看到RESTARTS不再是0,而是一直增长[root@master ~]# kubectl get pods pod-liveness-exec -n dev
NAME READY STATUS RESTARTS AGE
pod-liveness-exec 0/1 CrashLoopBackOff 2 3m19s
# 当然接下来,可以修改成一个存在的文件,比如/tmp/hello.txt,再试,结果就正常了......
方式二:TCPSocket
创建pod-liveness-tcpsocket.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-tcpsocket
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
ports:-name: nginx-port
containerPort:80livenessProbe:tcpSocket:port:8080# 尝试访问8080端口
创建pod,观察效果
# 创建Pod[root@master ~]# kubectl create -f pod-liveness-tcpsocket.yaml
pod/pod-liveness-tcpsocket created
# 查看Pod详情[root@master ~]# kubectl describe pods pod-liveness-tcpsocket -n dev......
Normal Scheduled 31s default-scheduler Successfully assigned dev/pod-liveness-tcpsocket to node2
Normal Pulled kubelet, node2 Container image "nginx:1.17.1" already present on machine
Normal Created kubelet, node2 Created container nginx
Normal Started kubelet, node2 Started container nginx
Warning Unhealthy (x2 over ) kubelet, node2 Liveness probe failed: dial tcp 10.244.2.44:8080: connect: connection refused
# 观察上面的信息,发现尝试访问8080端口,但是失败了# 稍等一会之后,再观察pod信息,就可以看到RESTARTS不再是0,而是一直增长[root@master ~]# kubectl get pods pod-liveness-tcpsocket -n dev
NAME READY STATUS RESTARTS AGE
pod-liveness-tcpsocket 0/1 CrashLoopBackOff 2 3m19s
# 当然接下来,可以修改成一个可以访问的端口,比如80,再试,结果就正常了......
# 创建Pod[root@master ~]# kubectl create -f pod-liveness-httpget.yaml
pod/pod-liveness-httpget created
# 查看Pod详情[root@master ~]# kubectl describe pod pod-liveness-httpget -n dev.......
Normal Pulled 6s (x3 over 64s) kubelet, node1 Container image "nginx:1.17.1" already present on machine
Normal Created 6s (x3 over 64s) kubelet, node1 Created container nginx
Normal Started 6s (x3 over 63s) kubelet, node1 Started container nginx
Warning Unhealthy 6s (x6 over 56s) kubelet, node1 Liveness probe failed: HTTP probe failed with statuscode: 404
Normal Killing 6s (x2 over 36s) kubelet, node1 Container nginx failed liveness probe, will be restarted
# 观察上面信息,尝试访问路径,但是未找到,出现404错误# 稍等一会之后,再观察pod信息,就可以看到RESTARTS不再是0,而是一直增长[root@master ~]# kubectl get pod pod-liveness-httpget -n dev
NAME READY STATUS RESTARTS AGE
pod-liveness-httpget 1/1 Running 5 3m17s
# 当然接下来,可以修改成一个可以访问的路径path,比如/,再试,结果就正常了......
apiVersion: v1
kind: Pod
metadata:name: pod-nodename
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
nodeName: node1 # 指定调度到node1节点上
#创建Pod[root@master ~]# kubectl create -f pod-nodename.yaml
pod/pod-nodename created
#查看Pod调度到NODE属性,确实是调度到了node1节点上[root@master ~]# kubectl get pods pod-nodename -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE ......
pod-nodename 1/1 Running 0 56s 10.244.1.87 node1 ......# 接下来,删除pod,修改nodeName的值为node3(并没有node3节点)[root@master ~]# kubectl delete -f pod-nodename.yaml
pod "pod-nodename" deleted
[root@master ~]# vim pod-nodename.yaml[root@master ~]# kubectl create -f pod-nodename.yaml
pod/pod-nodename created
#再次查看,发现已经向Node3节点调度,但是由于不存在node3节点,所以pod无法正常运行[root@master ~]# kubectl get pods pod-nodename -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE ......
pod-nodename 0/1 Pending 0 6s node3 ......
apiVersion: v1
kind: Pod
metadata:name: pod-nodeaffinity-preferred
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
affinity:#亲和性设置nodeAffinity:#设置node亲和性preferredDuringSchedulingIgnoredDuringExecution:# 软限制-weight:1preference:matchExpressions:# 匹配env的值在["xxx","yyy"]中的标签(当前环境没有)-key: nodeenv
operator: In
values:["xxx","yyy"]
# 创建pod[root@master ~]# kubectl create -f pod-nodeaffinity-preferred.yaml
pod/pod-nodeaffinity-preferred created
# 查看pod状态 (运行成功)[root@master ~]# kubectl get pod pod-nodeaffinity-preferred -n dev
NAME READY STATUS RESTARTS AGE
pod-nodeaffinity-preferred 1/1 Running 0 40s
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-target
namespace: dev
labels:podenv: pro #设置标签spec:containers:-name: nginx
image: nginx:1.17.1
nodeName: node1 # 将目标pod名确指定到node1上
# 启动目标pod[root@master ~]# kubectl create -f pod-podaffinity-target.yaml
pod/pod-podaffinity-target created
# 查看pod状况[root@master ~]# kubectl get pods pod-podaffinity-target -n dev
NAME READY STATUS RESTARTS AGE
pod-podaffinity-target 1/1 Running 0 4s
2)创建pod-podaffinity-required.yaml,内容如下:
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-required
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
affinity:#亲和性设置podAffinity:#设置pod亲和性requiredDuringSchedulingIgnoredDuringExecution:# 硬限制-labelSelector:matchExpressions:# 匹配env的值在["xxx","yyy"]中的标签-key: podenv
operator: In
values:["xxx","yyy"]topologyKey: kubernetes.io/hostname
# 启动pod[root@master ~]# kubectl create -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
# 查看pod状态,发现未运行[root@master ~]# kubectl get pods pod-podaffinity-required -n dev
NAME READY STATUS RESTARTS AGE
pod-podaffinity-required 0/1 Pending 0 9s
# 查看详细信息[root@master ~]# kubectl describe pods pod-podaffinity-required -n dev......
Events:
Type Reason Age From Message
-------------------------
Warning FailedScheduling default-scheduler 0/3 nodes are available: 2 node(s) didn't match pod affinity rules, 1 node(s) had taints that the pod didn't tolerate.# 接下来修改 values: ["xxx","yyy"]----->values:["pro","yyy"]# 意思是:新Pod必须要与拥有标签nodeenv=xxx或者nodeenv=yyy的pod在同一Node上[root@master ~]# vim pod-podaffinity-required.yaml# 然后重新创建pod,查看效果[root@master ~]# kubectl delete -f pod-podaffinity-required.yaml
pod "pod-podaffinity-required" deleted
[root@master ~]# kubectl create -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
# 发现此时Pod运行正常[root@master ~]# kubectl get pods pod-podaffinity-required -n dev
NAME READY STATUS RESTARTS AGE LABELS
pod-podaffinity-required 1/1 Running 0 6s
apiVersion: v1
kind: Pod
metadata:name: pod-toleration
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
tolerations:# 添加容忍-key:"tag"# 要容忍的污点的keyoperator:"Equal"# 操作符value:"heima"# 容忍的污点的valueeffect:"NoExecute"# 添加容忍的规则,这里必须和标记的污点规则相同
# 添加容忍之前的pod[root@master ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED
pod-toleration 0/1 Pending 0 3s # 添加容忍之后的pod[root@master ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED
pod-toleration 1/1 Running 0 3s 10.244.1.62 node1
[root@master ~]# kubectl run taint3 --image=nginx:1.17.1 -n dev [root@master ~]# kubectl get pods -n dev -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED taint1-7665f7fd85-htkmp 0/1 Pending 0 35s taint2-544694789-bn7wb 0/1 Pending 0 35s taint3-6d78dbd749-tktkq 0/1 Pending 0 6s
apiVersion: v1
kind: Pod
metadata:name: pod-toleration
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
tolerations:# 添加容忍-key:"tag"# 要容忍的污点的keyoperator:"Equal"# 操作符value:"heima"# 容忍的污点的valueeffect:"NoExecute"# 添加容忍的规则,这里必须和标记的污点规则相同
# 添加容忍之前的pod[root@master ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED
pod-toleration 0/1 Pending 0 3s # 添加容忍之后的pod[root@master ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED
pod-toleration 1/1 Running 0 3s 10.244.1.62 node1
public class HttpClientUtils
{
public static CloseableHttpClient createSSLClientDefault(CookieStore cookies){
SSLContext sslContext=null;
try
{
sslContext=new SSLContextBuilder().l
对于JAVA的join,JDK 是这样说的:join public final void join (long millis )throws InterruptedException Waits at most millis milliseconds for this thread to die. A timeout of 0 means t
在网站项目中,没必要把公用的函数写成一个工具类,有时候面向过程其实更方便。 在入口文件index.php里添加 require_once('protected/function.php'); 即可对其引用,成为公用的函数集合。 function.php如下:
<?php /** * This is the shortcut to D
这篇日志是我写的第三次了 前两次都发布失败!郁闷极了!
由于在web开发中常常用到这一部分所以在此记录一下,呵呵,就到备忘录了!
我对于登录信息时使用session存储的,所以我这里是通过实现HttpSessionAttributeListener这个接口完成的。
1、实现接口类,在web.xml文件中配置监听类,从而可以使该类完成其工作。
public class Ses
Spring Boot 1.3.0.M1于6.12日发布,现在可以从Spring milestone repository下载。这个版本是基于Spring Framework 4.2.0.RC1,并在Spring Boot 1.2之上提供了大量的新特性improvements and new features。主要包含以下:
1.提供一个新的sprin