使用的是 CentOS7
下面命令注意在root用户下运行,避免重复 sudo 省略
复制代码
su - root
参考:Docker 安装文档
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
sudo yum install -y yum-utils
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce docker-ce-cli containerd.io
sudo systemctl start docker
docker -v
sudo docker images
sudo systemctl enable docker
参考:阿里云镜像加速服务
# 创建文件
sudo mkdir -p /etc/docker
# 修改配置, 设置镜像
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://vw9qapdy.mirror.aliyuncs.com"]
}
EOF
# 重启后台线程
sudo systemctl daemon-reload
# 重启docker
sudo systemctl restart docker
sudo docker pull mysql:5.7
sudo docker run -p 3306:3306 --name mysql \
-v /mydata/mysql/log:/var/log/mysql \
-v /mydata/mysql/data:/var/lib/mysql \
-v /mydata/mysql/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=root \
-d mysql:5.7
参数:
查看docker启动的容器:
docker ps
cd /mydata/mysql/conf
my.cnf
vi my.cnf
拷贝以下内容:
[client]
default-character-set=utf8
[mysql]
default-character-set=utf8
[mysqld]
init_connect='SET collation_connection = utf8_unicode_ci'
init_connect='SET NAMES utf8'
character-set-server=utf8
collation-server=utf8_unicode_ci
skip-character-set-client-handshake
skip-name-resolve
docker restart mysql
docker pull redis
mkdir -p /mydata/redis/conf
touch /mydata/redis/conf/redis.conf
docker run -p 6379:6379 --name redis \
-v /mydata/redis/data:/data \
-v /mydata/redis/conf/redis.conf:/etc/redis/redis.conf \
-d redis redis-server /etc/redis/redis.conf
更多redis配置参考:redis配置
echo "appendonly yes" >> /mydata/redis/conf/redis.conf
# 重启生效
docker restart redis
# mysql
docker update mysql --restart=always
# redis
docker update redis --restart=always
# 存储和检索数据
docker pull elasticsearch:7.4.2
# 可视化检索数据
docker pull kibana:7.4.2
# 创建配置文件目录
mkdir -p /mydata/elasticsearch/config
# 创建数据目录
mkdir -p /mydata/elasticsearch/data
# 将/mydata/elasticsearch/文件夹中文件都可读可写
chmod -R 777 /mydata/elasticsearch/
# 配置任意机器可以访问 elasticsearch
echo "http.host: 0.0.0.0" >/mydata/elasticsearch/config/elasticsearch.yml
命令后面的 \是换行符,注意前面有空格
docker run --name elasticsearch -p 9200:9200 -p 9300:9300 \
-e "discovery.type=single-node" \
-e ES_JAVA_OPTS="-Xms64m -Xmx512m" \
-v /mydata/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /mydata/elasticsearch/data:/usr/share/elasticsearch/data \
-v /mydata/elasticsearch/plugins:/usr/share/elasticsearch/plugins \
-d elasticsearch:7.4.2
-p 9200:9200 -p 9300:9300
:向外暴露两个端口,9200用于HTTP REST API请求,9300 ES 在分布式集群状态下 ES 之间的通信端口;-e "discovery.type=single-node"
:es 以单节点运行-e ES_JAVA_OPTS="-Xms64m -Xmx512m"
:设置启动占用内存,不设置可能会占用当前系统所有内存-d elasticsearch:7.6.2
:指定要启动的镜像访问 IP:9200 看到返回的 json 数据说明启动成功。
# 当前 Docker 开机自启,所以 ES 现在也是开机自启
docker update elasticsearch --restart=always
docker run --name kibana \
-e ELASTICSEARCH_HOSTS=http://192.168.163.131:9200 \
-p 5601:5601 \
-d kibana:7.4.2
-e ELASTICSEARCH_HOSTS=``http://192.168.163.131:9200
: 这里要设置成自己的虚拟机IP地址
浏览器输入192.168.163.131:5601 测试
# 当前 Docker 开机自启,所以 kibana 现在也是开机自启
docker update kibana --restart=always
事前准备:
/mydata/elasticsearch/plugins
目录。7.4.2
,下载地址为:Github Release 或访问:镜像地址# 进入挂载的插件目录 /mydata/elasticsearch/plugins
cd /mydata/elasticsearch/plugins
# 安装 wget 下载工具
yum install -y wget
# 下载对应版本的 IK 分词器(这里是7.4.2)
wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.4.2/elasticsearch-analysis-ik-7.4.2.zip
这里已经在挂载的 plugins 目录安装好了 IK分词器。现在我们进入到 es 容器内部检查是否成功安装
# 进入容器内部
docker exec -it elasticsearch /bin/bash
# 查看 es 插件目录
ls /usr/share/elasticsearch/plugins
# 可以看到 elasticsearch-analysis-ik-7.4.2.zip
所以我们之后只需要在挂载的目录/mydata/elasticsearch/plugins
下进行操作即可。
# 进入到 es 的插件目录
cd /mydata/elasticsearch/plugins
# 解压到 plugins 目录下的 ik 目录
unzip elasticsearch-analysis-ik-7.4.2.zip -d ik
# 删除下载的压缩包
rm -f elasticsearch-analysis-ik-7.4.2.zip
# 修改文件夹访问权限
chmod -R 777 ik/
# 进入 es 容器内部
docker exec -it elasticsearch /bin/bash
# 进入 es bin 目录
cd /usr/share/elasticsearch/bin
# 执行查看命令 显示 ik
elasticsearch-plugin list
# 退出容器
exit
# 重启 Elasticsearch
docker restart elasticsearch
GET my_index/_analyze
GET my_index/_analyze
{
"analyzer": "ik_max_word",
"text":"蔡徐坤"
}
docker run --name nacos -d -p 8848:8848 --privileged=true \
--restart=always \
-e JVM_XMS=512m \
-e JVM_XMX=2048m \
-e MODE=standalone \
-e PREFER_HOST_MODE=hostname \
-v /home/nacos/logs:/home/nacos/logs \
nacos/nacos-server:1.2.1
docker run -d --name rabbitmq -p 5671:5671 -p 5672:5672 -p 4369:4369 -p 25672:25672 -p 15671:15671 -p 15672:15672 rabbitmq:management
4369,25672(Erlang发现&集群端口)
5672,5671(AMQP端口)
15672 (web管理后台端口)
61613,61614(STOMP协议端口)
1883,8883(MQTT协议端口)
RabbitMQ随docker自动启动
docker update rabbitmq --restart=always
死性交换机插件下载地址
https://www.rabbitmq.com/networking.html
安装
1. 首先我们将刚下载下来的
rabbitmq_delayed_message_exchange-3.9.0.ez文件上传到RabbitMQ所在服务器
2. 切换到插件所在目录,
执行
docker cp rabbitmq_delayed_message_exchange-3.9.0.ez rabbitmq:/plugins
命令,将刚插件拷贝到容器内plugins目录下
3. 执行
docker exec -it rabbitmq /bin/bash
命令进入到容器内部,并 cd plugins 进入plugins目录
4. 执行
ls -l|grep delay
命令查看插件是否copy成功
5. 在容器内plugins目录下,执行
rabbitmq-plugins enable rabbitmq_delayed_message_exchange
命令启用插件
6. exit
命令退出RabbitMQ容器内部,然后执行
docker restart rabbitmq
命令重启RabbitMQ容器
下载地址:Releases · rabbitmq/rabbitmq-delayed-message-exchange · GitHub
使用版本:rabbitmq_delayed_message_exchange-3.9.0.ez
jvisualvm: win+R 控制台输入jvisualvm即可
jmeter:官网下载
1、jvisualvm 监控内存泄露,跟踪垃圾回收,执行时内存、cpu 分析,线程分析…
运行:正在运行的
休眠:sleep
等待:wait
驻留:线程池里面的空闲线程
监视:阻塞的线程,正在等待锁
2、安装插件方便查看 gc
Cmd 启动 jvisualvm 工具->插件 如果 503 错误解决: 打开网址 VisualVM: Plugins Centers
cmd 查看自己的 jdk 版本,找到对应的 复制下面查询出来的链接。并重新设置上即可
1、性能指标
响应时间(Response Time: RT) 响应时间指用户从客户端发起一个请求开始,到客户端接收到从服务器端返回的响 应结束,整个过程所耗费的时间。
HPS(Hits Per Second) :每秒点击次数,单位是次/秒。
TPS(Transaction per Second):系统每秒处理交易数,单位是笔/秒。
QPS(Query per Second):系统每秒处理查询次数,单位是次/秒。 对于互联网业务中,如果某些业务有且仅有一个请求连接,那么 TPS=QPS=HPS,一 般情况下用 TPS 来衡量整个业务流程,用 QPS 来衡量接口查询次数,用 HPS 来表 示对服务器单击请求。 无论 TPS、QPS、HPS,此指标是衡量系统处理能力非常重要的指标,越大越好,根据经 验,一般情况下: 金融行业:1000TPS~50000TPS,不包括互联网化的活动 保险行业:100TPS~100000TPS,不包括互联网化的活动 制造行业:10TPS~5000TPS 互联网电子商务:10000TPS~1000000TPS 互联网中型网站:1000TPS~50000TPS 互联网小型网站:500TPS~10000TPS
最大响应时间(Max Response Time) 指用户发出请求或者指令到系统做出反应(响应) 的最大时间。
最少响应时间(Mininum ResponseTime) 指用户发出请求或者指令到系统做出反应(响 应)的最少时间。
90%响应时间(90% Response Time) 是指所有用户的响应时间进行排序,第 90%的响 应时间。
从外部看,性能测试主要关注如下三个指标 吞吐量:每秒钟系统能够处理的请求数、任务数。 响应时间:服务处理一个请求或一个任务的耗时。 错误率:一批请求中结果出错的请求所占比例。
1、JMeter 安装 Apache JMeter - Download Apache JMeter 下载对应的压缩包,解压运行 jmeter.bat 即可
具体使用看官方教程
每一个要使用分布式事务的数据库都需要一个 UNDO_LOG
表。
CREATE TABLE `undo_log` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`branch_id` bigint(20) NOT NULL,
`xid` varchar(100) NOT NULL,
`context` varchar(128) NOT NULL,
`rollback_info` longblob NOT NULL,
`log_status` int(11) NOT NULL,
`log_created` datetime NOT NULL,
`log_modified` datetime NOT NULL,
`ext` varchar(100) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
二、Linux 安装 Seata 0.7.1
cd /mydata
# 创建文件夹
mkdir seata
# 进入
cd seata
# 下载
wget https://github.com/seata/seata/releases/download/v0.7.1/seata-server-0.7.1.tar.gz
# 解压
tar -xvf seata-server-0.1.1.tar.gz
# 修改配置
vi conf/registry.conf
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "自己的ip:8848"
namespace = "public"
cluster = "default"
}
}
# 切换到bin目录
cd bin/
# 启动 seata-server
sh seata-server.sh -p 8091 -h 自己的ip
启动seata需要安装jdk
linux安装jdk,教程略
下载零时seata
docker pull seataio/seata-server:1.3.0
导出配置
docker run --name seata-server -p 8091:8091 -d seataio/seata-server:1.3.0
docker cp seata-server:/seata-server /mydata/seata/
docker stop seata-server
docker rm -f seata-server
进入配置文件修改配置
cd /mydata/seata
修改file.conf
transport {
# tcp udt unix-domain-socket
type = "TCP"
#NIO NATIVE
server = "NIO"
#enable heartbeat
heartbeat = true
#thread factory for netty
thread-factory {
boss-thread-prefix = "NettyBoss"
worker-thread-prefix = "NettyServerNIOWorker"
server-executor-thread-prefix = "NettyServerBizHandler"
share-boss-worker = false
client-selector-thread-prefix = "NettyClientSelector"
client-selector-thread-size = 1
client-worker-thread-prefix = "NettyClientWorkerThread"
# netty boss thread size,will not be used for UDT
boss-thread-size = 1
#auto default pin or 8
worker-thread-size = 8
}
shutdown {
# when destroy server, wait seconds
wait = 3
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
vgroup_mapping.gulimall-order-fescar-service-group = "default"
#only support single node
default.grouplist = "127.0.0.1:8091"
#degrade current not support
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
lock {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
}
## database store
db {
## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.
datasource = "dbcp"
## mysql/oracle/h2/oceanbase etc.
db-type = "mysql"
url = "jdbc:mysql://数据库ip:3306/数据库"
user = "root"
password = "root"
min-conn = 1
max-conn = 3
global.table = "global_table"
branch.table = "branch_table"
lock-table = "lock_table"
query-limit = 100
}
}
lock {
## the lock store mode: local、remote
mode = "remote"
local {
## store locks in user's database
}
remote {
## store locks in the seata's server
}
}
recovery {
committing-retry-delay = 30
asyn-committing-retry-delay = 30
rollbacking-retry-delay = 30
timeout-retry-delay = 30
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}
修改registry.conf
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
serverAddr = "nacosIP地址:8848"
namespace = "public"
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
serverAddr = "localhost"
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
启动seata
docker run -d --restart always --name seata-server -p 8091:8091 -v /mydata/seata:/seata-server -e SEATA_IP=ip地址 -e SEATA_PORT=8091 seataio/seata-server:1.3.0
mkdir -p /mydata/nginx/conf
docker run -p 80:80 --name nginx -d nginx:1.10
# 将nginx容器中的nginx目录复制到本机的/mydata/nginx/conf目录
docker container cp nginx:/etc/nginx /mydata/nginx/conf
# 复制的是nginx目录,将该目录的所有文件移动到 conf 目录
mv /mydata/nginx/conf/nginx/* /mydata/nginx/conf/
# 删除多余的 /mydata/nginx/conf/nginx目录
rm -rf /mydata/nginx/conf/nginx
# 停止运行 nginx 容器
docker stop nginx
# 删除 nginx 容器
docker rm nginx
docker run -p 80:80 --name nginx \
-v /mydata/nginx/html:/usr/share/nginx/html \
-v /mydata/nginx/logs:/var/log/nginx \
-v /mydata/nginx/conf/:/etc/nginx \
-d nginx:1.10
docker update nginx --restart=always
# 下载
docker pull bladex/sentinel-dashboard:1.6.3
# 运行
docker run --name sentinel -d -p 8858:8858 -d bladex/sentinel-dashboard:1.6.3
# 开机自启
docker update sentinel --restart=always
小问题
启动项目后访问接口,发现 sentinel 实时监控中一直显示为空白状态,一般是 sentinel 服务器和项目服务器时间不一致造成的,此时就需要同步虚拟机与项目服务器的时间,重启服务器、项目服务器,测试即可。
自动同步时间
# 安装时间同步插件
yum install chrony -y
# 启动时间同步
systemctl start chronyd
# 开机自启
systemctl enable chronyd
docker run -d -p 9411:9411 openzipkin/zipkin
docker update zipkin --restart=always
docker 安装 gitblit
docker run -d --name=gitblit -p 9444:8080 -p 8443:8443 \
-p 9418:9418 -p 29418:29418 -e TZ="Asia/Shanghai" \
-v /mydata/gitblit/data:/opt/gitblit-data \
--restart always jacekkow/gitblit
docker pull minio/minio
mkdir -p /mydata/minio/config
mkdir -p /mydata/minio/data
docker run -p 9000:9000 -p 9090:9090 \
--net=host \
--name minio \
-d --restart=always \
-e "MINIO_ACCESS_KEY=username" \
-e "MINIO_SECRET_KEY=password" \
-v /mydata/minio/data:/mydata/minio/data \
-v /mydata/minio/config:/mydata/minio/config \
minio/minio server \
/data --console-address ":9090" -address ":9000"
MINIO_ACCESS_KEY :账号
MINIO_SECRET_KEY :密码
# 进入mydata目录
cd mydata
# 在mydata目录下创建neo4j目录
mkdir neo4j
cd neo4j
# 在neo4j目录下创建data、logs、conf、import目录
mkdir data logs conf import
# 授权目录logs、data,次数如果不授权,启动容器后会报错
chmod 777 logs
chmod 777 data
# 查看neo4j镜像
docker search neo4j
# 拉取镜像
docker pull neo4j
# 查看镜像
docker images
# 启动neo4j容器
docker run -d --name neo4j --restart=always \
-p 7474:7474 -p 7687:7687 \
-v /mydata/neo4j/data:/data \
-v /mydata/neo4j/logs:/logs \
-v /mydata/neo4j/conf:/var/lib/neo4j/conf \
-v /mydata/neo4j/import:/var/lib/neo4j/import \
--env NEO4J_AUTH=neo4j/123456 neo4j
# 查看启动日志
docker logs -f neo4j
# 进入配置文件目录
cd /mydata/neo4j/conf
vim neo4j.conf
# neo4j.conf配置内容如下
dbms.tx_log.rotation.retention_policy=100M size
dbms.memory.pagecache.size=512M
dbms.default_listen_address=0.0.0.0
dbms.connector.bolt.listen_address=0.0.0.0:7687
dbms.connector.http.listen_address=0.0.0.0:7474
dbms.directories.logs=/logs
# 配置完后重启容器生效
docker restart neo4j
1、下载并启动MongoDB
docker run -d --name mongo-yapi mongo
2、下载YApi镜像
docker pull registry.cn-hangzhou.aliyuncs.com/anoy/yapi
docker run -it --rm \
--link mongo-yapi:mongo \
--entrypoint npm \
--workdir /api/vendors \
registry.cn-hangzhou.aliyuncs.com/anoy/yapi \
run install-server
初始化管理员账号成功,账号名[email protected]
,密码:ymfe.org
\4. 启动yapi服务
docker run -d \
--name yapi \
--link mongo-yapi:mongo \
--workdir /api/vendors \
-p 3000:3000 \
registry.cn-hangzhou.aliyuncs.com/anoy/yapi \
server/app.js
访问
IP:3000
yum install -y docker-compose
通过yum安装的是默认版本,如果部符合需求可以升级
升级:
卸载原有的
rm -rf /usr/local/bin/docker-compose
下载指定版本的docker-compose,可以直接改版本号
curl -L https://github.com/docker/compose/releases/download/1.27.2/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
如果太慢可以使用国内镜像源
curl -L https://get.daocloud.io/docker/compose/releases/download/1.28.5/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
赋权
chmod +x /usr/local/bin/docker-compose
docker-compose脚本
创建脚本
vim docker-compose.yml
内容为:
version: '3.6'
services:
web:
image: 'gitlab/gitlab-ee:latest'
restart: always
hostname: 'gitlab.example.com'
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'http://gitlab.example.com:8929'
gitlab_rails['gitlab_shell_ssh_port'] = 2224
container_name: gitlab
ports:
- '8929:8929'
- '2224:22'
volumes:
- './config:/etc/gitlab'
- './logs:/var/log/gitlab'
- './data:/var/opt/gitlab'
shm_size: '256m'
在当前文件夹下执行
docker-compose up -d
通过docker-compose一键卸载(在当前文件夹下执行)
docker-compose down
安装elasticsearch
docker run --name elasticsearch -p 9200:9200 -p 9300:9300 \
-e "discovery.type=single-node" \
-e ES_JAVA_OPTS="-Xms64m -Xmx512m" \
-v /mydata/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /mydata/elasticsearch/data:/usr/share/elasticsearch/data \
-v /mydata/elasticsearch/plugins:/usr/share/elasticsearch/plugins \
-d elasticsearch:7.4.2
安装kibana
docker run --name kibana \
-e ELASTICSEARCH_HOSTS=http://192.168.163.131:9200 \
-p 5601:5601 \
-d kibana:7.4.2
将kibana的配置文件copy出来
docker container cp kibana:/opt/kibana/config/kibana.yml /mydata/elk/kibana/config
停止两个容器用脚本启动
docker stop elasticsearch
docker stop kibana
docker rm -f elasticsearch
docker rm -f kibana
在/mydata/elk/文件夹下写脚本
vim elk-dc.yaml
脚本内容如下
version: '3'
services:
elasticsearch:
image: elasticsearch:7.17.0 #镜像
container_name: elk_elasticsearch #定义容器名称
#restart: always #开机启动,失败也会一直重启
ports:
- 9200:9200
- 9300:9300
environment:
cluster.name: elasticsearch #设置集群名称为elasticsearch
discovery.type: single-node #以单一节点模式启动
ES_JAVA_OPTS: "-Xms256m -Xmx256m" #设置使用jvm内存大小
ELASTIC_PASSWORD: changeme
volumes:
- /mydata/elk/elasticsearch/data:/usr/share/elasticsearch/data #数据文件挂载
- /mydata/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins #插件文件挂载
networks:
- elk
logstash:
image: logstash:7.17.0
container_name: elk_logstash
#restart: always
ports:
- 4560:4560
volumes:
- /mydata/elk/logstash/pipeline:/usr/share/logstash/pipeline
- /mydata/elk/logstash/pipeline/logstash-sample.conf:/usr/share/logstash/pipeline/logstash.conf #挂载logstash的配置文件
depends_on:
- elasticsearch #kibana在elasticsearch启动之后再启动
links:
- elasticsearch:es #可以用es这个域名访问elasticsearch服务
networks:
- elk
kibana:
image: kibana:7.17.0
container_name: elk_kibana
#restart: always
ports:
- 5601:5601
environment:
- ELASTICSEARCH_URL=http://192.168.1.181:9200 #设置访问elasticsearch的地址
volumes:
- /mydata/elk/kibana/config:/usr/share/kibana/config
depends_on:
- elasticsearch #kibana在elasticsearch启动之后再启动
networks:
- elk
networks:
elk:
driver: bridge
volumes:
mydata/elk:
启动部署脚本
docker-compose -f elk-dc.yaml up -d
批量卸载
docker-compose -f elk-dc.yaml down
拉取Jenkins镜像
docker pull jenkins/jenkins
编写docker-compose.yml
version: "3.1"
services:
jenkins:
image: jenkins/jenkins
container_name: jenkins
ports:
- 8080:8080
- 50000:50000
volumes:
- ./data/:/var/jenkins_home/
执行安装命令
docker-compose up -d
首次启动会因为数据卷data目录没有权限导致启动失败,设置data目录写权限
chmod -R a+w data/
重新启动Jenkins容器后,由于Jenkins需要下载大量内容,但是由于默认下载地址下载速度较慢,需要重新设置下载地址为国内镜像站
# 修改数据卷中的hudson.model.UpdateCenter.xml文件
default
https://updates.jenkins.io/update-center.json
# 将下载地址替换为http://mirror.esuni.jp/jenkins/updates/update-center.json
default
http://mirror.esuni.jp/jenkins/updates/update-center.json
# 清华大学的插件源也可以https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json
再次重启Jenkins容器,访问Jenkins(需要稍微等会)
查看密码登录Jenkins,并登录下载插件
docker exec -it jenkins cat /var/jenkins_home/secrets/initialAdminPassword
选择需要安装的插件
下载完毕设置信息进入首页(可能会出现下载失败的插件)
由于Jenkins需要从Git拉取代码、需要本地构建、甚至需要直接发布自定义镜像到Docker仓库,所以Jenkins需要配置大量内容。
准备好GitLab仓库中的项目,并且通过Jenkins配置项目的实现当前项目的DevOps基本流程。
构建Maven工程发布到GitLab(Gitee、Github均可)
Jenkins点击左侧导航新建任务
选择自由风格构建任务
Jenkins需要将Git上存放的源码存储到Jenkins服务所在磁盘的本地
配置任务源码拉取的地址
Jenkins立即构建
查看构建工程的日志,点击上述③的任务条即可
可以看到源码已经拉取带Jenkins本地,可以根据第三行日志信息,查看Jenkins本地拉取到的源码。
查看Jenkins容器中/var/jenkins_home/workspace/test的源码
代码拉取到Jenkins本地后,需要在Jenkins中对代码进行构建,这里需要Maven的环境,而Maven需要Java的环境,接下来需要在Jenkins中安装JDK和Maven,并且配置到Jenkins服务。
准备JDK、Maven压缩包通过数据卷映射到Jenkins容器内部
maven配置文件
<mirror>
<id>alimavenid>
<name>aliyun mavenname>
<url>http://maven.aliyun.com/nexus/content/groups/public/url>
<mirrorOf>centralmirrorOf>
mirror>
<profile>
<id>jdk-1.8id>
<activation>
<activeByDefault>trueactiveByDefault>
<jdk>1.8jdk>
activation>
<properties>
<maven.compiler.source>1.8maven.compiler.source>
<maven.compiler.target>1.8maven.compiler.target>
<maven.compiler.compilerVersion>1.8maven.compiler.compilerVersion>
properties>
profile>
jenkinsfile文件示例
pipeline {
agent any
environment {
sonarLogin = '2bab7bf7d5af25e2c2ca2f178af2c3c55c64d5d8'
harborUser = 'admin'
harborPassword = 'Harbor12345'
harborHost = '192.168.11.12:8888'
harborRepo = 'repository'
}
stages {
stage('拉取Git代码'){
steps {
checkout([$class: 'GitSCM', branches: [[name: '$tag']], extensions: [], userRemoteConfigs: [[url: 'http://49.233.115.171:8929/root/lsx.git']]])
}
}
stage('Maven构建代码'){
steps {
sh '/var/jenkins_home/maven/bin/mvn clean package -DskipTests'
}
}
stage('SonarQube检测代码'){
steps {
sh '/var/jenkins_home/sonar-scanner/bin/sonar-scanner -Dsonar.sources=./ -Dsonar.projectname=${JOB_NAME} -Dsonar.projectKey=${JOB_NAME} -Dsonar.java.binaries=target/ -Dsonar.login=${sonarLogin}'
}
}
stage('制作自定义镜像'){
steps {
sh '''cd docker
mv ../target/*.jar ./
docker build -t ${JOB_NAME}:$tag .
'''
}
}
stage('推送自定义镜像'){
steps {
sh '''docker login -u ${harborUser} -p ${harborPassword} ${harborHost}
docker tag ${JOB_NAME}:$tag ${harborHost}/${harborRepo}/${JOB_NAME}:$tag
docker push ${harborHost}/${harborRepo}/${JOB_NAME}:$tag'''
}
}
stage('通知目标服务器'){
steps {
sshPublisher(publishers: [sshPublisherDesc(configName: 'centos-docker', transfers: [sshTransfer(cleanRemote: false, excludes: '', execCommand: "/usr/bin/deploy.sh $harborHost $harborRepo $JOB_NAME $tag $port", execTimeout: 120000, flatten: false, makeEmptyDirs: false, noDefaultExcludes: false, patternSeparator: '[, ]+', remoteDirectory: '', remoteDirectorySDF: false, removePrefix: '', sourceFiles: '')], usePromotionTimestamp: false, useWorkspaceInPromotion: false, verbose: false)])
}
}
}
post {
success {
dingtalk (
robot: 'Jenkins-DingDing',
type:'MARKDOWN',
title: "success: ${JOB_NAME}",
text: ["- 成功构建:${JOB_NAME}项目!\n- 版本:${tag}\n- 持续时间:${currentBuild.durationString}\n- 任务:#${JOB_NAME}"]
)
}
failure {
dingtalk (
robot: 'Jenkins-DingDing',
type:'MARKDOWN',
title: "fail: ${JOB_NAME}",
text: ["- 失败构建:${JOB_NAME}项目!\n- 版本:${tag}\n- 持续时间:${currentBuild.durationString}\n- 任务:#${JOB_NAME}"]
)
}
}
}
Linux下Nginx开关服务,正常方式是这样({nginx}-为Nginx安装路径):
{nginx}/sbin/nginx #启动
{nginx}/sbin/nginx -s stop #停止
这种方式带来很多不便,因此介绍一种快捷启动关闭Nginx的方式。
在/etc/init.d下创建nginx启动脚本文件:
vim /etc/init.d/nginx
i进入编辑状态,粘贴以下代码后保存:
将/usr/local/nginx/替换为自身nginx的安装路径。
#!/bin/sh
#
# nginx - this script starts and stops the nginx daemon
#
# chkconfig: - 85 15
# description: Nginx is an HTTP(S) server, HTTP(S) reverse \
# proxy and IMAP/POP3 proxy server
# processname: nginx
# config: /etc/nginx/nginx.conf
# config: /etc/sysconfig/nginx
# pidfile: /var/run/nginx.pid
# Source function library.
. /etc/rc.d/init.d/functions
# Source networking configuration.
. /etc/sysconfig/network
# Check that networking is up.
[ "$NETWORKING" = "no" ] && exit 0
nginx="/usr/local/nginx/sbin/nginx"
prog=$(basename $nginx)
NGINX_CONF_FILE="/usr/local/nginx/conf/nginx.conf"
[ -f /etc/sysconfig/nginx ] && . /etc/sysconfig/nginx
lockfile=/var/lock/subsys/nginx
start() {
[ -x $nginx ] || exit 5
[ -f $NGINX_CONF_FILE ] || exit 6
echo -n $"Starting $prog: "
daemon $nginx -c $NGINX_CONF_FILE
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc $prog -QUIT
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
killall -9 nginx
}
restart() {
configtest || return $?
stop
sleep 1
start
}
reload() {
configtest || return $?
echo -n $"Reloading $prog: "
killproc $nginx -HUP
RETVAL=$?
echo
}
force_reload() {
restart
}
configtest() {
$nginx -t -c $NGINX_CONF_FILE
}
rh_status() {
status $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart|configtest)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
exit 2
esac
修改脚本权限:
chmod 755 nginx
将脚本文件加入到chkconfig中:
chkconfig --add nginx
设置nginx开机在3和5级别自动启动:
chkconfig --level 35 nginx on
创建软连接:
cd /usr/bin
ln -s /etc/init.d/nginx
这里边的命令都可以执行:
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
以下是示例:
nginx start
nginx stop
nginx restart