1.配置网络(复用已有网络)
- 创建docker虚拟网络
# 创建一个名为clickhouse的桥接(bridge)虚拟网络,网关172.26.0.1,网段为172.25.0.0
docker network create -d bridge --subnet=172.26.0.0/16 --gateway=172.26.0.1 clickhouse
注意:本配置在wsl中部署失败
2.启停命令
docker-compose.yml路径下:
构建开启: docker-compose up
停止: docker-compose stop
开启:docker-compose start
4. 单中心集群(多中心类似)
数据中心 | 容器ip | 端口 | agent类型 | 节点名 |
---|---|---|---|---|
dc1 | 172.26.0.120 | 8500 | server | consul-server1 |
dc1 | 172.26.0.121 | 8500 | server | consul-server2 |
dc1 | 172.26.0.122 | 8500 | server | consul-server3 |
dc1 | 172.26.0.123 | 7110 | client | consul-client1 |
dc2 | 172.26.0.124 | 8500 | server | consul-server4 |
dc2 | 172.26.0.125 | 8500 | server | consul-server5 |
dc2 | 172.26.0.126 | 8500 | server | consul-server6 |
dc2 | 172.26.0.127 | 7111 | client | consul-client2 |
5、配置文件
下述json配置文件放置到F:/DockerData/consul-cluster目录下。
- consul-server1.json
{
"datacenter": "dc1",
"node_name": "consul-server1",
"bootstrap_expect": 3,
"encrypt": "qV5WVLHhFnwEle8l/Edi/Q==",
"data_dir": "/consul/data",
"bind_addr": "172.26.0.120",
"server": true,
"enable_script_checks": true,
"log_file":"/consul/log/",
"log_level":"INFO",
"log_rotate_bytes":100000000,
"log_rotate_duration":"24h",
"start_join":[
"172.26.0.120",
"172.26.0.121",
"172.26.0.122"
],
"retry_join":[
"172.26.0.120",
"172.26.0.121",
"172.26.0.122"
],
"retry_interval": "30s",
"acl":{
"enabled":true,
"default_policy":"deny",
"enable_token_persistence":true,
"tokens":{
"master":"hello",
"agent": "64093b2a-89a7-4dd2-4f3a-c6f8f5631f31"
}
}
}
- consul-server2.json
{
"datacenter": "dc1",
"node_name": "consul-server2",
"encrypt": "qV5WVLHhFnwEle8l/Edi/Q==",
"data_dir": "/consul/data",
"bind_addr": "172.26.0.121",
"server": true,
"enable_script_checks": true,
"log_file":"/consul/log/",
"log_level":"INFO",
"log_rotate_bytes":100000000,
"log_rotate_duration":"24h",
"start_join":[
"172.26.0.120",
"172.26.0.121",
"172.26.0.122"
],
"retry_join":[
"172.26.0.120",
"172.26.0.121",
"172.26.0.122"
],
"retry_interval": "30s",
"acl":{
"enabled":true,
"default_policy":"deny",
"enable_token_persistence":true,
"tokens":{
"master":"hello",
"agent": "64093b2a-89a7-4dd2-4f3a-c6f8f5631f31"
}
}
}
- consul-server3.json
{
"datacenter": "dc1",
"node_name": "consul-server3",
"encrypt": "qV5WVLHhFnwEle8l/Edi/Q==",
"data_dir": "/consul/data",
"bind_addr": "172.26.0.122",
"server": true,
"enable_script_checks": true,
"log_file":"/consul/log/",
"log_level":"INFO",
"log_rotate_bytes":100000000,
"log_rotate_duration":"24h",
"start_join":[
"172.26.0.120",
"172.26.0.121",
"172.26.0.122"
],
"retry_join":[
"172.26.0.120",
"172.26.0.121",
"172.26.0.122"
],
"retry_interval": "30s",
"acl":{
"enabled":true,
"default_policy":"deny",
"enable_token_persistence":true,
"tokens":{
"master":"hello",
"agent": "64093b2a-89a7-4dd2-4f3a-c6f8f5631f31"
}
}
}
- consul-client1.json
{
"datacenter": "dc1",
"primary_datacenter": "dc1",
"advertise_addr": "172.26.0.123",
"start_join":[
"172.26.0.120",
"172.26.0.121",
"172.26.0.122"
],
"retry_join":[
"172.26.0.120",
"172.26.0.121",
"172.26.0.122"
],
"bind_addr": "172.26.0.123",
"node_name": "consul-client1",
"client_addr":"0.0.0.0",
"connect":{
"enabled":true
},
"data_dir": "/consul/data",
"log_file":"/consul/log/",
"log_level":"INFO",
"log_rotate_bytes":100000000,
"log_rotate_duration":"24h",
"encrypt": "qV5WVLHhFnwEle8l/Edi/Q==",
"ui":true,
"enable_script_checks":false,
"enable_local_script_checks":true,
"disable_remote_exec":true,
"acl":{
"enabled":true,
"default_policy":"deny",
"enable_token_persistence":true,
"tokens":{
"agent": "64093b2a-89a7-4dd2-4f3a-c6f8f5631f31"
}
}
}
6. 编写 docker-compose.yml
version: "3.7"
services:
consul1:
image: consul:latest
container_name: consul-server1
#restart: always
command: agent
volumes:
- /f/DockerData/consul1:/consul/data
- /f/DockerData/consul1/log:/consul/log
- /f/DockerData/consul-cluster/consul-server1.json:/consul/config/consul.json
networks:
clickhouse:
ipv4_address: 172.26.0.120
consul2:
image: consul:latest
container_name: consul-server2
#restart: always
command: agent
volumes:
- /f/DockerData/consul2:/consul/data
- /f/DockerData/consul2/log:/consul/log
- /f/DockerData/consul-cluster/consul-server2.json:/consul/config/consul.json
networks:
clickhouse:
ipv4_address: 172.26.0.121
consul3:
image: consul:latest
container_name: consul-server3
#restart: always
command: agent
volumes:
- /f/DockerData/consul3:/consul/data
- /f/DockerData/consul3/log:/consul/log
- /f/DockerData/consul-cluster/consul-server3.json:/consul/config/consul.json
networks:
clickhouse:
ipv4_address: 172.26.0.122
consul4:
image: consul:latest
container_name: consul-client1
#restart: always
command: agent -ui
ports:
- 8500:8500
volumes:
- /f/DockerData/consul4:/consul/data
- /f/DockerData/consul4/log:/consul/log
- /f/DockerData/consul-cluster/consul-client1.json:/consul/config/consul.json
networks:
clickhouse:
ipv4_address: 172.26.0.123
networks:
clickhouse:
external: true
7. 配置agent-token--ACL
- 执行 docker-compose up 后,进入consul-server1容器,执行命令,生成agent-token:
curl --request PUT --header "X-Consul-Token: hello" --data '{"Name": "Agent Token","Type": "client","Rules": "node \"\" { policy = \"write\" } service \"\" { policy = \"read\" }"}' http://127.0.0.1:8500/v1/acl/create
{"ID":"64093b2a-89a7-4dd2-4f3a-c6f8f5631f31"}
- 注意:集群第一次启动时,tokens配置均无agent,其中hello为server的master token配置。
-将生成的token添加到acl节点中(每个节点都需要添加),依次重新启动Consul服务。
server:
"acl":{
"enabled":true,
"default_policy":"deny",
"enable_token_persistence":true,
"tokens":{
"master":"hello",
"agent": "64093b2a-89a7-4dd2-4f3a-c6f8f5631f31"
}
}
client:
"acl":{
"enabled":true,
"default_policy":"deny",
"enable_token_persistence":true,
"tokens":{
"agent": "64093b2a-89a7-4dd2-4f3a-c6f8f5631f31"
}
}
8. 登录ui配置
- 登录 127.0.0.1:8500,选择ACL,输入agent密码登录。
- 超级权限密码master-token为:hello
- master-token登录后可以对token和角色,权限进行管理。
9. 参考
- https://www.jianshu.com/p/57e77dec1da8
- https://blog.csdn.net/li450126014/article/details/105951195/ 详细
- https://www.cnblogs.com/cao-lei/p/13048118.html 应用demo
- https://www.cnblogs.com/cao-lei/p/13042750.html
- https://blog.csdn.net/qq_24384579/article/details/86480522