docker-compose 单机合集

文章目录

    • docker安装
    • docker-compose安装
    • 1:mongodb
    • 2:redis
    • 3: nacos
    • 4:kafka , zookeeper , kafka-manager
    • 5:mysql
    • 6: redis
    • 7:es kibana es-head
    • pgsql
    • kong
    • konga
    • jenkins
    • consul
    • portainer管理面板

所有compose文件统一命名 docker-compose.yaml
所有启动 docker-compose up -d
停止 docker-compose down

docker安装

1安装一些必要的系统工具
yum -y install yum-utils device-mapper-persistent-data lvm2
2、添加软件源信息
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
更新 yum 缓存
yum makecache fast
安装 Docker-ce:
yum -y install docker-ce
启动 Docker 后台服务
systemctl start docker
自启动
systemctl enable docker

docker-compose安装

安装 Docker-Compose
通过访问 https://github.com/docker/compose/releases/latest 得到最新的 docker-compose 版本
下载慢可以自己迅雷下载上传上去

1: cd    /usr/local/bin
2: curl -L https://github.com/docker/compose/releases/download/1.20.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
3: chmod a+x /usr/local/bin/docker-compose
4: docker-compose --version

1:mongodb

version: '2'
services:
  mongo:
    image: mongo:4.4.0
    restart: always
    environment:
      - TZ=Asia/Shanghai
    ports:
      - 27017:27017
    volumes:
      - ./data/db:/data/db # 挂载数据目录
      - ./data/log:/var/log/mongodb  # 挂载日志目录
      - ./data/config:/etc/mongo  # 挂载配置目录

2:redis

将redis.conf放到conf目录下

version: '2'
services:
  redis:
    image: redis:5
    container_name: redis
    hostname: redis
    restart: always
    environment:
      - TZ=Asia/Shanghai
    ports:
      - 6379:6379
    volumes:
      - ./conf/redis.conf:/etc/redis/redis.conf
      - ./data:/data
    command:
      redis-server /etc/redis/redis.conf

redis.conf


bind 0.0.0.0

protected-mode no

port 6379

tcp-backlog 511

timeout 0

tcp-keepalive 300

daemonize no

supervised no

pidfile /var/run/redis_6379.pid

loglevel notice

logfile ""

databases 16

always-show-logo yes


save 900 1
save 300 10
save 60 10000

stop-writes-on-bgsave-error yes

rdbcompression yes

rdbchecksum yes

dbfilename dump.rdb

dir ./

replica-serve-stale-data yes

replica-read-only yes

repl-diskless-sync no

repl-diskless-sync-delay 5

repl-disable-tcp-nodelay no

replica-priority 100


requirepass root

lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no


appendonly no

appendfilename "appendonly.aof"

appendfsync everysec
no-appendfsync-on-rewrite no


auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb

aof-load-truncated yes

aof-use-rdb-preamble yes

lua-time-limit 5000

slowlog-log-slower-than 10000

slowlog-max-len 128

latency-monitor-threshold 0

notify-keyspace-events ""

hash-max-ziplist-entries 512
hash-max-ziplist-value 64

list-max-ziplist-size -2

list-compress-depth 0

set-max-intset-entries 512

zset-max-ziplist-entries 128
zset-max-ziplist-value 64

hll-sparse-max-bytes 3000

stream-node-max-bytes 4096
stream-node-max-entries 100

activerehashing yes

client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60

hz 10

dynamic-hz yes

aof-rewrite-incremental-fsync yes

rdb-save-incremental-fsync yes



3: nacos

将application.properties放到当前目录下 nacos/conf/下

version: "2"
services:
  nacos:
    image: nacos/nacos-server:v2.0.4
    container_name: nacos-container
    volumes:
      - ./nacos/standalone-logs/:/home/nacos/logs
      - ./nacos/conf/application.properties:/home/nacos/conf/application.properties
    ports:
      - "8848:8848"
      - "9848:9848"
      - "9555:9555"
    restart: always

application.properties

spring.datasource.platform=mysql
db.num=1
db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
db.user=root
db.password=root

nacos.naming.empty-service.auto-clean=true
nacos.naming.empty-service.clean.initial-delay-ms=50000
nacos.naming.empty-service.clean.period-time-ms=30000

management.endpoints.web.cmcsosure.include=*

management.metrics.export.elastic.enabled=false
management.metrics.export.influx.enabled=false

server.tomcat.accesslog.enabled=true
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i

server.tomcat.basedir=

nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**

nacos.core.auth.system.type=nacos
nacos.core.auth.enabled=false
nacos.core.auth.default.token.expire.seconds=18000
nacos.core.auth.default.token.secret.key=SecretKey012345678901234567890123456789012345678901234567890123456789
nacos.core.auth.caching.enabled=true
nacos.core.auth.enable.userAgentAuthWhite=false
nacos.core.auth.server.identity.key=serverIdentity
nacos.core.auth.server.identity.value=security

nacos.istio.mcp.server.enabled=false

4:kafka , zookeeper , kafka-manager


version: '3'
services:
  zookeeper:
    image: wurstmeister/zookeeper
    container_name: zookeeper_container
    volumes:
      - ./zkdata:/data
    ports:
      - "2181:2181"
    restart: always
  kafka:
    image: wurstmeister/kafka
    container_name: kafka_container
    volumes:
      - ./kfdata:/kafka
    ports:
      - "39092:9092"
    environment:
      - KAFKA_ZOOKEEPER_CONNECT=192.168.0.24:2181
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.0.24:9092
      - KAFKA_LISTENERS=PLAINTEXT://:9092
      - KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
    restart: always
  kafka-manager:
    image: kafkamanager/kafka-manager:2.0.0.2
    container_name: kafka-manager_container
    environment:
      ZK_HOSTS: 192.168.0.24:2181
    ports:
      - 19000:9000

5:mysql

mkdir -p ./mysql/{mydir,datadir,conf,source}

version: '3'
services:
  mysql:
    restart: always
    image: mysql:5.7.18
    container_name: mysql_container
    volumes:
      - ./mysql/mydir:/mydir
      - ./mysql/datadir:/var/lib/mysql
      - ./mysql/conf/my.cnf:/etc/my.cnf
    environment:
      - "MYSQL_ROOT_PASSWORD=root"
      - "TZ=Asia/Shanghai"
    ports:
      - 3306:3306

my.cnf

[mysqld]
user=mysql
default-storage-engine=INNODB
character-set-server=utf8
character-set-client-handshake=FALSE
collation-server=utf8_unicode_ci
init_connect='SET NAMES utf8'
[client]
default-character-set=utf8
[mysql]
default-character-set=utf8

6: redis

version: '2'
services:
    redis:
      image: redis:5.0.0
      container_name: redis
      command: redis-server --requirepass root
      ports:
        - "6379:6379"
      volumes:
        - ./data:/data

7:es kibana es-head

services:
  elasticsearch:
    image: elasticsearch:7.17.1
    container_name: elasticsearch
    environment:
      - discovery.type=single-node
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - TZ=Asia/Shanghai
    ports:
      - "9200:9200"
      - "9300:9300"
    volumes:
      - ./elasticsearch/logs:/usr/share/elasticsearch/logs
      - ./elasticsearch/data:/usr/share/elasticsearch/data
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
    healthcheck:
      test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
      interval: 10s
      timeout: 10s
      retries: 3
  kibana:
    image: kibana:7.17.2
    container_name: kibana
    ports:
      - "5601:5601"
    volumes:
      - ./kibana/kibana.yml:/usr/share/kibana/config/kibana.yml:rw
  es-head:
    image: tobias74/elasticsearch-head:latest
    container_name: es-head
    restart: always
    ports:
      - "9100:9100"

elasticsearch.yml

# 集群名称
cluster.name: elasticsearch-cluster
# 节点名称
node.name: es-node-1
network.bind_host: 0.0.0.0
# 绑定host,0.0.0.0代表当前节点的ip
network.host: 0.0.0.0
# 设置其它节点和该节点交互的ip地址,如果不设置它会自动判断,值必须是个真实的ip地址(本机ip)
network.publish_host: 0.0.0.0
# 设置对外服务的http端口,默认为9200
http.port: 9200
# 设置节点间交互的tcp端口,默认是9300
transport.tcp.port: 9300
# 是否支持跨域,默认为false
http.cors.enabled: true
# 当设置允许跨域,默认为*,表示支持所有域名,如果我们只是允许某些网站能访问,那么可以使用正则表达式。比如只允许本地地址。 /https?:\/\/localhost(:[0-9]+)?/
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
# 表示这个节点是否可以充当主节点
node.master: true
# 是否充当数据节点
node.data: true
# 所有主从节点ip:port
#discovery.seed_hosts: ["192.168.200.135:9300"]  #本地只有一个节点,无法正常启动,先注释
# 这个参数决定了在选主过程中需要 有多少个节点通信  预防脑裂 N/2+1
discovery.zen.minimum_master_nodes: 1
#初始化主节点
#cluster.initial_master_nodes: ["es-node-1"]  #本地只有一个节点,无法正常启动,先注释

kibana.yml

server.name: kibana
# kibana的主机地址 0.0.0.0可表示监听所有IP
server.host: "0.0.0.0"
# kibana访问es的URL
elasticsearch.hosts: [ "http://192.168.133.1:9200" ]
# 显示登陆页面
xpack.monitoring.ui.container.elasticsearch.enabled: true
# 语言
i18n.locale: "zh-CN"

pgsql

version: "3.1"
services:
  db_test:
    image: postgres:9.6
    environment:
      POSTGRES_PASSWORD: root
      POSTGRES_USER:  root
      POSTGRES_DB: dev
      TZ: Asia/Shanghai
    ports:
      - 5432:5432
    volumes:
      - ./data:/var/lib/postgresql/data
    restart: always

kong

version: "3"
services:
  kong-migration:
    image: kong:latest
    command:  "kong migrations bootstrap"
    restart: on-failure
    environment:
      KONG_PG_HOST: xxxIP
      KONG_DATABASE: postgres
      KONG_PG_USER: root
      KONG_PG_PASSWORD: root
  kong:
    image: kong:latest
    restart: always
    environment:
      KONG_PG_HOST: xxxIP
      KONG_DATABASE: postgres
      KONG_PG_USER: root
      KONG_PG_PASSWORD: root
      KONG_CASSANDRA_CONTACT_POINTS: xxxIP
      KONG_PROXY_LISTEN: 0.0.0.0:8000
      KONG_PROXY_LISTEN_SSL: 0.0.0.0:8443
      KONG_ADMIN_LISTEN: 0.0.0.0:8001
      TZ: Asia/Shanghai
    healthcheck:
      test: ["CMD", "curl", "-f", "http://xxxIP:8001"]
      interval: 5s
      timeout: 2s
      retries: 15
    ports:
      - "8001:8001"
      - "8000:8000"
      - "8443:8443"
      - "8444:8444"
	  

konga

version: "3"
services:
  monga-prepare:
    image: pantsel/konga:latest
    command: "-c prepare -a postgres -u postgresql://root:root@xxxIP:5432/konga"
    restart: on-failure
  konga:
    image: pantsel/konga:latest
    restart: always
    environment:
      DB_ADAPTER: postgres
      DB_URI: postgres://root:root@XXXIP:5432/konga
      NODE_ENV: production
      TZ: Asia/Shanghai
    ports:
      - "1337:1337"

jenkins

version: '2.3'
services:
  jenkinsci:
    image: jenkins/jenkins:lts-jdk11
    container_name: jenkins
    restart: always
    privileged: true
    ports:
      - 8080:8080
      - 50000:50000
    volumes:
      - ./data:/var/jenkins_home
      - /etc/localtime:/etc/localtime:ro

启动后
进入data下
修改hudson.model.UpdateCenter.xml文件中的url为
https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json
重启容器
刷新页面 出现输入密码的时候,进入jenkins挂载目录下的updates
执行
sed -i ‘s#http://updates.jenkins.io/download#https://mirrors.tuna.tsinghua.edu.cn/jenkins#g’ default.json && sed -i ‘s#http://www.google.com#https://www.baidu.com#g’ default.json
再次重启容器
然后执行后续处理

consul

version: '2'
services:
  consul1:
    image: consul:1.9.17
    network_mode: bridge
    container_name: consul1
    command: "agent -server -node=node1 -bind=0.0.0.0 -client=0.0.0.0 -bootstrap-expect=1 -client 0.0.0.0 -ui"
	restart: always
    ports:
      - "8500:8500"
	  - "8300:8300"
	  - "8301:8301"
	  - "8302:8302"
	  - "8600:8600"

portainer管理面板

version: '2'
services:
  portainer:
    image: portainer/portainer-ce
    container_name: portainer
    ports:
      - 9000:9000
      - 8000:8000
    restart: always
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - ./data:/data

你可能感兴趣的:(中间件安装,docker,docker,mongodb,redis,kafka,docker-compose)