黑马秒杀环境部署

Nacos 安装

一:下载镜像

docker pull nacos/nacos-server

二:启动

    docker run -d -p 8848:8848 -e MODE=standalone --restart always --name nacos nacos/nacos-server

zookeeper安装

下载zookeeper镜像

docker pull zookeeper

创建镜像文件夹

cd /data
mkdir zookeeper

启动服务

docker run -d -e TZ="Asia/Shanghai" -p 2181:2181 -v /data/zookeeper:/data --name zookeeper --restart always zookeeper

-e TZ="Asia/Shanghai" # 指定上海时区 
-d # 表示在一直在后台运行容器
-p 2181:2181 # 对端口进行映射,将本地2181端口映射到容器内部的2181端口
--name # 设置创建的容器名称
-v # 将本地目录(文件)挂载到容器指定目录;
--restart always #始终重新启动zookeeper
附:三个常用端口(可以修改)

1、2181:对cline端提供服务

2、3888:选举leader使用

3、2888:集群内机器通讯使用(Leader监听此端口)

查看启动情况

docker ps

 docker kafka安装

1、下载镜像

docker pull wurstmeister/kafka

3、启动kafka(如果服务器在云上,换成外网ip(192.168.1.1))

docker run -d --name kafka \
-p 9092:9092 \
-e KAFKA_BROKER_ID=0 \
-e KAFKA_ZOOKEEPER_CONNECT=192.168.1.1:2181 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.1.1:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 wurstmeister/kafka

4、进入容器执行命令
docker exec -it 容器id bash
cd /opt/kafka_2.13-2.8.1/bin

#创建主题
kafka-topics.sh --create --zookeeper zookeeper地址:2181 --replication-factor 1 --partitions 1 --topic kafkatest  
#添加消息
kafka-console-producer.sh --broker-list localhost:9092 --topic kafkatest
#消费消息
kafka-console-consumer.sh --zookeeper zookeeper地址:2181 --topic kafkatest --from-beginning

docker 安装es集群

#单个进程中的最大线程数
vim /etc/sysctl.conf
vm.max_map_count=262144
#立即生效
/sbin/sysctl -p
mkdir /itcast/tanhua/es-cluster/node01 -p

#复制资料目录下的jvm.options到node01、node02、node03目录
#在node01目录下,创建elasticsearch.yml文件,并输入如下内容:
cluster.name: es-tanhua-cluster
node.name: node01
node.master: true
node.data: true
network.host: 192.168.31.81
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.31.81"]
discovery.zen.minimum_master_nodes: 2
http.cors.enabled: true
http.cors.allow-origin: "*"
#在node02目录下,创建elasticsearch.yml文件,并输入如下内容:
cluster.name: es-tanhua-cluster
node.name: node02
node.master: true
node.data: true
network.host: 192.168.31.81
http.port: 9201
discovery.zen.ping.unicast.hosts: ["192.168.31.81"]
discovery.zen.minimum_master_nodes: 2
http.cors.enabled: true
http.cors.allow-origin: "*"
#在node03目录下,创建elasticsearch.yml文件,并输入如下内容:
cluster.name: es-tanhua-cluster
node.name: node03
node.master: true
node.data: true
network.host: 192.168.31.81
http.port: 9202
discovery.zen.ping.unicast.hosts: ["192.168.31.81"]
discovery.zen.minimum_master_nodes: 2
http.cors.enabled: true
http.cors.allow-origin: "*"
#创建容器
docker create --restart=always --name es-node01 --net host -v
/itcast/tanhua/escluster/node01/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsea
rch.yml -v /itcast/tanhua/escluster/node01/jvm.options:/usr/share/elasticsearch/config/jvm.options -v
es-cluster-node01-data:/usr/share/elasticsearch/data elasticsearch:6.5.4
docker create --restart=always --name es-node02 --net host -v
/itcast/tanhua/escluster/node02/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsea
rch.yml -v /itcast/tanhua/escluster/node02/jvm.options:/usr/share/elasticsearch/config/jvm.options -v
es-cluster-node02-data:/usr/share/elasticsearch/data elasticsearch:6.5.4

cluster/node03/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsea
rch.yml -v /itcast/tanhua/escluster/node03/jvm.options:/usr/share/elasticsearch/config/jvm.options -v
es-cluster-node03-data:/usr/share/elasticsearch/data elasticsearch:6.5.4
#启动容器
docker start es-node01 es-node02 es-node03
#或单个启动并查看日志
docker start es-node01 && docker logs -f es-node01
docker start es-node02 && docker logs -f es-node02
docker start es-node03 && docker logs -f es-node03
 

jvm.options

-Xms128m
-Xmx128m
-XX:+UseConcMarkSweepGC
-XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly
-XX:+AlwaysPreTouch
-Xss1m
-Djava.awt.headless=true
-Dfile.encoding=UTF-8
-Djna.nosys=true
-XX:-OmitStackTraceInFastThrow
-Dio.netty.noUnsafe=true
-Dio.netty.noKeySetOptimization=true
-Dio.netty.recycler.maxCapacityPerThread=0
-Dlog4j.shutdownHookEnabled=false
-Dlog4j2.disable.jmx=true
-Djava.io.tmpdir=${ES_TMPDIR}
-XX:+HeapDumpOnOutOfMemoryError
-XX:HeapDumpPath=data
-XX:ErrorFile=logs/hs_err_pid%p.log
8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime
8:-Xloggc:logs/gc.log
8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m
9-:-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m
9-:-Djava.locale.providers=COMPAT
10-:-XX:UseAVX=2

你可能感兴趣的:(ssh,docker,java,容器)