docker-compose部署常用中间件

目录

一、docker-compose部署mysql

二、docker-compose部署nginx

三、docker-compose部署redis(含主从)

四、docker-compose部署rabbitmq(含主从)

五、docker-compose部署minio(含新旧)

六、docker-compose部署kuboard

七、docker-compose部署fastdfs

八、docker-compose部署mongo

九、docker-compose部署redis单机集群

十、docker-compose部署easymock

十一、docker-compose部署nacos单机


一、docker-compose部署mysql

[root@harbor mysql_cluster]# tree
.
├── conf
│   └── my.cnf
├── init.sql
├── mysqldb
└── mysql.yaml

cat  mysql.yaml

version: '3'
services:
    mysql:
        network_mode: "bridge"
        environment:
            MYSQL_ROOT_PASSWORD: "yourpassword"
            MYSQL_USER: 'test'
            MYSQL_PASS: 'yourpassword'
        image: "mysql:5.7.36"
        restart: always
        volumes:
            - "./mysqldb:/var/lib/mysql"
            - "./conf/my.cnf:/etc/my.cnf"
            #- "./init:/docker-entrypoint-initdb.d/"
        ports:
            - "13306:3306"

cat conf/my.cnf

[mysqld]
symbolic-links=0
skip-host-cache
skip-name-resolve
default-time-zone = '+8:00'
sql_mode ='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION'
lower_case_table_names=1
skip-host-cache
skip-name-resolve
user=mysql
symbolic-links=0
server-id=1
autocommit=1
collation-server=utf8mb4_unicode_ci
character_set_server=utf8mb4
skip_name_resolve=1
max_connections=8000
max_connect_errors=1000
join_buffer_size=128M
tmp_table_size=64M
max_allowed_packet=64M
interactive_timeout=1200
wait_timeout=600
read_buffer_size=16M
read_rnd_buffer_size=8M
sort_buffer_size=8M

binlog_format=row
max_binlog_size=1G
sync_binlog=5
slow_query_log=1
long_query_time=6
log_queries_not_using_indexes=1
log_slow_admin_statements=1
log_throttle_queries_not_using_indexes=10
expire_logs_days=90
min_examined_row_limit=100

innodb_buffer_pool_size=2G
innodb_buffer_pool_instances=8
innodb_buffer_pool_load_at_startup=1
innodb_buffer_pool_dump_at_shutdown=1
innodb_lock_wait_timeout=30

log_timestamps=system
innodb_log_file_size=1G
innodb_log_buffer_size=64M

innodb_file_per_table= 1
plugin-load=rpl_semi_sync_master=semisync_master.so
rpl_semi_sync_master_enabled=1

cat init.sql

create database test;
use test;
create table user
(
id int auto_increment primary key,
username varchar(64) unique not null,
email varchar(120) unique not null,
password_hash varchar(128) not null,
avatar varchar(128) not null
);
insert into user values(1, "zhangsan","[email protected]","passwd","avaterpath");
insert into user values(2, "lisi","[email protected]","passwd","avaterpath");
create database test;
use test;
create table user
(
id int auto_increment primary key,
username varchar(64) unique not null,
email varchar(120) unique not null,
password_hash varchar(128) not null,
avatar varchar(128) not null
);
insert into user values(1, "zhangsan","[email protected]","passwd","avaterpath");
insert into user values(2, "lisi","[email protected]","passwd","avaterpath");

启动:docker-compose -f mysql.yaml up -d

二、docker-compose部署nginx

[root@harbor service]# tree nginx/
nginx/
├── cert
├── conf
│   └── nginx.conf
├── conf.d
├── docker-compose-nginx.yml
└── html

vim conf/nginx.conf

worker_processes  1;
events {
    worker_connections  1024;
}
 
http {
    include       mime.types;
    default_type  application/octet-stream;
 
    sendfile        on;
    tcp_nopush     on;
    client_max_body_size 500m; 
 
    keepalive_timeout  200;
 
    gzip  on;
    gzip_vary on;
    gzip_min_length 1k;
    gzip_comp_level 4;
    gzip_types text/plain application/x-javascript text/css application/xml text/javascript application/javascript;
    gzip_disable "MSIE [1-6]\.";
    upstream dist {
    server  10.99.213.104:443;
    server  10.99.213.105:443;
    }    

    server {
        listen       8640 default ssl;
        ssl_certificate      /etc/nginx/cert/xxx.pem;
        ssl_certificate_key  /etc/nginx/cert/xxx.key;
        location ^~ / {
                 proxy_http_version 1.1;
                 proxy_set_header Connection "";
                 proxy_connect_timeout 60s;
                 proxy_read_timeout 60s;
                 proxy_send_timeout 60s;
                 proxy_buffer_size 512k;
                 proxy_buffering  on;
                 proxy_buffers   8 512k;
                 proxy_busy_buffers_size 1024k;
                 proxy_max_temp_file_size 100M;
                 proxy_pass https://dist;
                 proxy_redirect              off;
                 proxy_set_header            Host $host:$server_port;
                 proxy_set_header            X-real-ip $remote_addr;
                 proxy_set_header            X-Forwarded-For $proxy_add_x_forwarded_for;
                 proxy_set_header Upgrade $http_upgrade;
                 proxy_set_header Connection 'upgrade';
                 proxy_set_header Origin "";
                 add_header X-Frame-Options sameorigin always;
         }


     }
}

vim docker-compose-nginx.yml

version: '3.1'
services:
    nginx:
        image: nginx:1.21.6     # 镜像名称
        container_name: nginx_lishanbin    # 容器名字
        restart: always     # 开机自动重启
        ports:     # 端口号绑定(宿主机:容器内)
            - '5880:80'
            - '6443:443'
        volumes:      # 目录映射(宿主机:容器内)
            - ./conf/nginx.conf:/etc/nginx/nginx.conf
            - ./conf.d:/etc/nginx/conf.d
            - ./html:/usr/share/nginx/html
            - ./cert:/etc/nginx/cert

三、docker-compose部署redis(含主从)

[root@harbor redis]# tree .
.
├── conf
│   └── redis.conf
├── data
│   ├── appendonly.aof
│   └── dump.rdb
└── redis.yaml
requirepass test@dbuser2018
appendonly yes

cat conf/redis.conf

requirepass test@dbuser2018
appendonly yes

cat redis.yaml

  master:
    image: redis:6.2.6
    container_name: redis-master
    restart: always
    command: redis-server --port 6379 --requirepass test@dbuser2018  --appendonly yes
    ports:
      - 6379:6379
    volumes:
      - ./data:/data
      - ./conf/redis.conf:/etc/redis/redis.conf:rw
 
  slave1:
    image: redis:6.2.6
    container_name: redis-slave-1
    restart: always
    command: redis-server --slaveof 10.45.219.81 6379 --port 6380  --requirepass test@dbuser2018 --masterauth test@dbuser2018  --appendonly yes
    ports:
      - 6380:6380
    volumes:
      - ./data:/data
 
 
  slave2:
    image: redis:6.2.6
    container_name: redis-slave-2
    restart: always
    command: redis-server --slaveof 10.45.219.82 6379 --port 6381  --requirepass test@dbuser2018 --masterauth test@dbuser2018  --appendonly yes
    ports:
      - 6381:6381
    volumes:
      - ./data:/data

四、docker-compose部署rabbitmq(含主从)

镜像选择:
选择rabbitmq:3-management是因为带有web管理功能的
如果是rabbit:3.7.8 这种版本 需要进入容器内部执行

docker exec -it rabbitmq bash
rabbitmq-plugins enable rabbitmq_management

[root@harbor rabbitmq]# tree .
.
├── log
├── data
└── rabbitmq.yaml

vim rabbitmq.yaml

version: '3'
 
services:
  rabbitmq1:
    image: rabbitmq:3.8.3-management
    container_name: rabbitmq1
    restart: always
    hostname: rabbitmq1
    extra_hosts:
      - "rabbitmq1:192.168.216.227"
      - "rabbitmq2:192.168.207.97"
    ports:
      - 4369:4369     #erlang发现口
      - 25672:25672   #server间内部通信口
      - 15672:15672   #管理界面ui端口
      - 5672:5672     ##client端通信口
      - 5671:5671
    volumes:
      - ./data:/var/lib/rabbitmq
      - ./log:/var/log/rabbitmq/log"
      #- /etc/hosts:/etc/hosts
      - /etc/localtime:/etc/localtime
       
    environment:
      - RABBITMQ_DEFAULT_USER=root
      - RABBITMQ_DEFAULT_PASS=root
      - RABBITMQ_ERLANG_COOKIE=CURIOAPPLICATION

主从:各节点rabbitmq的存储方式,即节点1是disk,节点2和节点3都是ram

默认情况下,RabbitMQ 启动后是磁盘节点,如果想以内存节点方式加入,可以加 --ram 参数。

如果想要修改节点类型,可以使用命令:

# rabbitmqctl change_cluster_node_type disc(ram)

创建主从:

disk节点(节点1主节点)执行:

rabbitmqctl stop_app
rabbitmqctl reset
rabbitmqctl start_app

ram节点(节点2和节点3)执行:

rabbitmqctl stop_app
rabbitmqctl reset
rabbitmqctl join_cluster --ram rabbit@rabbitmq1
rabbitmqctl start_app 

检查节点状态:

rabbitmqctl cluster_status

docker-compose部署常用中间件_第1张图片

五、docker-compose部署minio(含新旧)

旧版:

version: '3.7'
services:
  minio:
    image: registry.cn-hangzhou.aliyuncs.com/lishanbin/minio:RELEASE.2021-06-17T00-10-46Z
    container_name: minio
    ports:
      - "9000:9000"
    restart: always
    command: server /data 
    environment:
      MINIO_ACCESS_KEY: minio
      MINIO_SECRET_KEY: minio123
    logging:
      options:
        max-size: "50M" # 最大文件上传限制
        max-file: "10"
      driver: json-file
    volumes:
      - ./data:/data # 映射文件路径
      - /etc/localtime:/etc/localtime:ro
      - /etc/timezone/timezone:/etc/timezone:ro
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
      interval: 30s
      timeout: 20s
      retries: 3

新版:

version: '3.7'
services:
  minio:
    image: minio/minio
    container_name: minio
    ports:
      - "9000:9000"
      - "9009:9009"
    restart: always
    command: server /data --console-address ":9009"
    environment:
      MINIO_ROOT_USER: minio
      MINIO_ROOT_PASSWORD: minio123 
    logging:
      options:
        max-size: "50M" # 最大文件上传限制
        max-file: "10"
      driver: json-file
    volumes:
      - ./data:/data # 映射文件路径
      - /etc/localtime:/etc/localtime:ro
      - /etc/timezone/timezone:/etc/timezone:ro
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
      interval: 30s
      timeout: 20s
      retries: 3

六、docker-compose部署kuboard

version: '3'
services:
    kuboard:
        network_mode: "bridge"
        environment:
            KUBOARD_ENDPOINT: 'http://kuboard.my-company.com:80'
            KUBOARD_AGENT_SERVER_UDP_PORT: '10081'
            KUBOARD_AGENT_SERVER_TCP_PORT: '10081'
        image: "eipwork/kuboard:v3.3.0.7"
        restart: always
        volumes:
            - "./data:/data"
        ports:
            -  5880:80/tcp
            -  10081:10081/tcp
            -  10081:10081/udp

七、docker-compose部署fastdfs

vim fastdfs.yaml

version: '3'
 
services:
  fastdfs:
    image: registry.cn-hangzhou.aliyuncs.com/lishanbin/fastdfs:0.0.8
    network_mode: host
    restart: always
    ports:
      - 8888:8888     
      - 22122:22122   
      - 23000:23000   
    volumes:
      - ./data:/home/dfs/data
      #- ./logs:/home/dfs/logs"
      - /etc/localtime:/etc/localtime
       
    environment:
      - FASTDFS_IPADDR=10.11.12.13

验证:

进入容器
/usr/bin/fdfs_upload_file  /etc/fdfs/client.conf  /tmp/demo.jpg 
group1/M00/00/00/rBEAAWJC1jKASTOAAAHGFXQIrhE048.jpg

访问:http://192.168.206.227:8888/group1/M00/00/00/rBEAAWJC1jKASTOAAAHGFXQIrhE048.jpg

八、docker-compose部署mongo

version: '2'
services:
    mongodb:
        image: mongo:4.4
        container_name: "mongodb"
        network_mode: "bridge"
        restart: always
        environment:
           # - MONGO_DATA_DIR=/data/db
           # - MONGO_LOG_DIR=/data/logs
            - MONGO_INITDB_ROOT_USERNAME=admin
            - MONGO_INITDB_ROOT_PASSWORD=985211@Cmiot
        volumes:
            - /data/service/mongo/data:/data/db
        ports:
            - 27017:27017

九、docker-compose部署redis单机集群

mkdir 700{1..6}
mkdir 700{1..6}/data
mkdir 700{1..6}/config
touch 700{1..6}/config/redis.config

cat 7001/config/redis.conf

requirepass xxxx
cluster-enabled yes
port 7001
masterauth xxxx

cat redis.yaml

version: '3'

services:
 redis1:
  image: redis:6.0.9
  restart: always  
  network_mode: host
  volumes:
   - /data/service/redis/7001/config/redis.conf:/etc/redis/redis.conf
   - /data/service/redis/7001/data:/data
  environment:
   - TZ=Asia/Shanghai
   - LANG=en_US.UTF-8
  command: ["redis-server", "/etc/redis/redis.conf"]
  privileged: true    #环境变量
  

 redis2:
  image: redis:6.0.9
  network_mode: host
  restart: always
  volumes:
   - /data/service/redis/7002/config/redis.conf:/etc/redis/redis.conf
   - /data/service/redis/7002/data:/data
  environment:
   - TZ=Asia/Shanghai
   - LANG=en_US.UTF-8
  command: ["redis-server", "/etc/redis/redis.conf"]
  privileged: true    #环境变量

 redis3:
  image: redis:6.0.9
  network_mode: host
  restart: always
  volumes:
   - /data/service/redis/7003/config/redis.conf:/etc/redis/redis.conf
   - /data/service/redis/7003/data:/data
  environment:
   - TZ=Asia/Shanghai
   - LANG=en_US.UTF-8
  command: ["redis-server", "/etc/redis/redis.conf"]
  privileged: true    #环境变量

 redis4:
  image: redis:6.0.9
  network_mode: host
  restart: always
  volumes:
   - /data/service/redis/7004/config/redis.conf:/etc/redis/redis.conf
   - /data/service/redis/7004/data:/data
  environment:
   - TZ=Asia/Shanghai
   - LANG=en_US.UTF-8
  command: ["redis-server", "/etc/redis/redis.conf"]
  privileged: true    #环境变量

 redis5:
  image: redis:6.0.9
  network_mode: host
  restart: always
  volumes:
   - /data/service/redis/7005/config/redis.conf:/etc/redis/redis.conf
   - /data/service/redis/7005/data:/data
  environment:
   - TZ=Asia/Shanghai
   - LANG=en_US.UTF-8
  command: ["redis-server", "/etc/redis/redis.conf"]
  privileged: true    #环境变量

 redis6:
  image: redis:6.0.9
  network_mode: host
  restart: always
  volumes:
   - /data/service/redis/7006/config/redis.conf:/etc/redis/redis.conf
   - /data/service/redis/7006/data:/data
  environment:
   - TZ=Asia/Shanghai
   - LANG=en_US.UTF-8
  command: ["redis-server", "/etc/redis/redis.conf"]
  privileged: true    #环境变量

组建集群:

redis-cli -h 10.11.12.13 -p 7001 --cluster create 10.11.12.13:7001 10.11.12.13:7002 10.11.12.13:7003 10.11.12.13:7004 10.11.12.13:7005 10.11.12.13:7006 --cluster-replicas 1 -a xxx

十、docker-compose部署easymock

version: '3'

services:
  mongodb:
    image: registry.cn-shenzhen.aliyuncs.com/lishanbin/mongo:5.0
    volumes:
      # ./data/db 数据库文件存放地址,根据需要修改为本地地址
      - './data/db:/data/db'
    networks:
      - easy-mock
    restart: always

  redis:
    image: registry.cn-shenzhen.aliyuncs.com/lishanbin/redis:4.0.6
    command: redis-server  --appendonly yes
    volumes:
      # ./data/redis redis 数据文件存放地址,根据需要修改为本地地址
      - './data/redis:/data'
    networks:
      - easy-mock
    restart: always

  web:
    image: easymock/easymock:1.6.0
    command: /bin/bash -c "npm start"
    ports:
      - 7300:7300
    volumes:
      # 日志地址,根据需要修改为本地地址
      - './logs:/home/easy-mock/easy-mock/logs'
      # 配置地址,请使用本地配置地址替换
      # - './production.json:/home/easy-mock/easy-mock/config/production.json'
    networks:

十一、docker-compose部署nacos单机

vim nacos.yml

version: "3"
services:
  nacos:
    image: nacos-server:2.0.4
    container_name: nacos
    privileged: true
    volumes:
     # - ./cluster-logs:/home/nacos/logs
      - ./init.d/custom.properties:/home/nacos/init.d/custom.properties
    ports:
      - "7848:7848"
      - "8848:8848"
      - "9848:9848"
      - "9555:9555"
      - "9849:9849"
    env_file:
      - ./env/nacos-ip.env
    restart: on-failure

cat env/nacos-ip.env

#nacos dev env
#如果支持主机名可以使用hostname,否则使用ip,默认也是ip
PREFER_HOST_MODE=ip
MODE=standalone
#多网卡情况下,指定ip或网卡
NACOS_SERVER_IP=192.168.0.60
#集群中其它节点[ip1:port ip2:port ip3:port]
NACOS_SERVERS=192.168.0.60
#nacos的web端口,不写默认就是8848
NACOS_APPLICATION_PORT=8848
#数据源平台 仅支持mysql或不保存empty
SPRING_DATASOURCE_PLATFORM=mysql
MYSQL_SERVICE_HOST=192.168.0.60
MYSQL_SERVICE_DB_NAME=nacos_config
MYSQL_SERVICE_PORT=3306
MYSQL_SERVICE_USER=root
MYSQL_SERVICE_PASSWORD=xxx
MYSQL_DATABASE_NUM=1
#JVM调优参数
JVM_XMS=2g
JVM_XMX=2g
JVM_XMN=2g
JVM_MS=128m
JVM_MMS=320m

cat init.d/custom.properties

spring.security.enabled=false
#management.security=false
#security.basic.enabled=false
#nacos.security.ignore.urls=/**
#management.metrics.export.elastic.host=http://localhost:9200
# metrics for prometheus
management.endpoints.web.exposure.include=*
 
# metrics for elastic search
#management.metrics.export.elastic.enabled=false
#management.metrics.export.elastic.host=http://localhost:9200
 
# metrics for influx
#management.metrics.export.influx.enabled=false
#management.metrics.export.influx.db=springboot
#management.metrics.export.influx.uri=http://localhost:8086
#management.metrics.export.influx.auto-create-db=true
#management.metrics.export.influx.consistency=one
#management.metrics.export.influx.compressed=true

你可能感兴趣的:(docker,中间件,容器)