使用docker-compose搭建fastdfs分布式文件服务器集群笔记

目录

前提条件

集群规划

开始安装

拉取docker镜像包

准备tracker配置工作

创建tracker工作目录

创建tracker.conf配置文件

开放tracker所需端口

准备storage配置工作

创建storage工作目录

创建storage.conf配置文件

创建nginx.conf配置文件

创建mod_fastdfs.conf配置文件

创建storage.sh启动脚本

创建client.conf配置文件

开放storage所需端口

开始安装

创建docker-compose.yml编排文件

启动编排好的服务

查看启动日志

查看启动的服务

查看fastdfs集群运行情况

参数说明

统计storage文件数量

参考文章


前提条件

两台主机node01node02均需安装dockerdocker-compose环境,然后按照本文章给出的配置,别忘了将配置的IP修改成自己对应服务器上的IP,一顿复制粘贴后,即可完成傻瓜式安装,有手就行,快来爽快的撸一把~

集群规划

node01 IP: 192.168.163.130

node02 IP: 192.168.163.132

tracker服务器: node01,node02

storage服务器:node01,node02

端口规划:22122(tracker服务的端口)、23002(storage服务的端口)、9101(nginx服务的端口)

安装根目录:/data/fastdfs

开始安装

拉取docker镜像包

docker pull morunchang/fastdfs

准备tracker配置工作

创建tracker工作目录

mkdir -p /data/fastdfs/tracker/data /data/fastdfs/tracker/conf

创建tracker.conf配置文件

cat < /data/fastdfs/tracker/conf/tracker.conf
disabled=false
bind_addr=
port=22122
connect_timeout=30
network_timeout=30
base_path=/data/fast_data
max_connections=256
accept_threads=1
work_threads=4
store_lookup=2
store_group=group1
store_server=0
store_path=0
download_server=0
reserved_storage_space = 10%
log_level=info
run_by_group=
run_by_user=
allow_hosts=*
sync_log_buff_interval = 10
check_active_interval = 120
thread_stack_size = 64KB
storage_ip_changed_auto_adjust = true
storage_sync_file_max_delay = 86400
storage_sync_file_max_time = 300
use_trunk_file = false
slot_min_size = 256
slot_max_size = 16MB
trunk_file_size = 64MB
trunk_create_file_advance = false
trunk_create_file_time_base = 02:00
trunk_create_file_interval = 86400
trunk_create_file_space_threshold = 20G
trunk_init_check_occupying = false
trunk_init_reload_from_binlog = false
trunk_compress_binlog_min_interval = 0
use_storage_id = false
storage_ids_filename = storage_ids.conf
id_type_in_filename = ip
store_slave_file_use_link = false
rotate_error_log = false
error_log_rotate_time=00:00
rotate_error_log_size = 0
log_file_keep_days = 0
use_connection_pool = false
connection_pool_max_idle_time = 3600
http.server_port=8080
http.check_alive_interval=30
http.check_alive_type=tcp
http.check_alive_uri=/status.html
EOF

开放tracker所需端口

firewall-cmd --zone=public --add-port=22122/tcp --permanent
 
firewall-cmd --reload
 
firewall-cmd --list-all

准备storage配置工作

创建storage工作目录

mkdir -p /data/fastdfs/storage/data /data/fastdfs/storage/conf

创建storage.conf配置文件

cat < /data/fastdfs/storage/conf/storage.conf
disabled=false
group_name=group1
bind_addr=
client_bind=true
port=23002
connect_timeout=30
network_timeout=30
heart_beat_interval=30
stat_report_interval=60
base_path=/data/fast_data
max_connections=256
buff_size = 256KB
accept_threads=1
work_threads=4
disk_rw_separated = true
disk_reader_threads = 1
disk_writer_threads = 1
sync_wait_msec=50
sync_interval=0
sync_start_time=00:00
sync_end_time=23:59
write_mark_file_freq=500
store_path_count=1
store_path0=/data/fast_data
subdir_count_per_path=256
#tracker集群
tracker_server=192.168.163.130:22122
tracker_server=192.168.163.132:22122
log_level=debug
run_by_group=
run_by_user=
allow_hosts=*
file_distribute_path_mode=0
file_distribute_rotate_count=100
fsync_after_written_bytes=0
sync_log_buff_interval=10
sync_binlog_buff_interval=10
sync_stat_file_interval=300
thread_stack_size=512KB
upload_priority=10
if_alias_prefix=
check_file_duplicate=0
file_signature_method=hash
key_namespace=FastDFS
keep_alive=0
use_access_log = true
rotate_access_log = false
access_log_rotate_time=00:00
rotate_error_log = false
error_log_rotate_time=00:00
rotate_access_log_size = 0
rotate_error_log_size = 0
log_file_keep_days = 0
file_sync_skip_invalid_record=false
use_connection_pool = false
connection_pool_max_idle_time = 3600
http.domain_name=
http.server_port=9101
EOF

创建nginx.conf配置文件

cat < /data/fastdfs/storage/conf/nginx.conf
user  root;
worker_processes  1;
error_log  /data/fast_data/logs/nginx-error.log;
 
events {
    worker_connections  1024;
}
 
http {
    include       mime.types;
    default_type  application/octet-stream;
 
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
 
    access_log  /data/fast_data/logs/nginx-access.log  main;
    sendfile        on;
    keepalive_timeout  65;
 
    server {
        listen       9101;
        server_name  localhost;
 
        location / {
            root   html;
            index  index.html index.htm;
        }
 
        location ~ /group1/M00 {
                    root /data/fast_data/data;
                    ngx_fastdfs_module;
        }
 
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }
}
EOF

创建mod_fastdfs.conf配置文件

cat < /data/fastdfs/storage/conf/mod_fastdfs.conf
connect_timeout=30
network_timeout=30
base_path=/data/fast_data
load_fdfs_parameters_from_tracker=true
storage_sync_file_max_delay = 86400
use_storage_id = false
storage_ids_filename = storage_ids.conf
#tracker集群
tracker_server=192.168.163.130:22122
tracker_server=192.168.163.132:22122
storage_server_port=23002
group_name=group1
url_have_group_name = true
store_path_count=1
store_path0=/data/fast_data
log_level=info
log_filename=
response_mode=proxy
if_alias_prefix=
flv_support = true
flv_extension = flv
group_count = 0
 
#HTTP default content type
http.default_content_type = application/octet-stream
 
#MIME types mapping filename
#MIME types file format: MIME_type extensions
#such as: image/jpeg jpeg jpg jpe
#you can use apache’s MIME file: mime.types
http.mime_types_filename=/etc/nginx/conf/mime.types
EOF

创建storage.sh启动脚本

cat < /data/fastdfs/storage/conf/storage.sh
#!/bin/sh
/data/fastdfs/storage/fdfs_storaged /etc/fdfs/storage.conf
/etc/nginx/sbin/nginx
tail -f /data/fast_data/logs/storaged.log
EOF

创建client.conf配置文件

cat < /data/fastdfs/storage/conf/client.conf
# connect timeout in seconds
# default value is 30s
connect_timeout=30
 
# network timeout in seconds
# default value is 30s
network_timeout=30
 
# the base path to store log files
base_path=/data/fast_data
 
# tracker_server can ocur more than once, and tracker_server format is
#  "host:port", host can be hostname or ip address
#tracker集群
tracker_server=192.168.163.130:22122
tracker_server=192.168.163.132:22122
 
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
 
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
 
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
 
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
 
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
 
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
 
#HTTP settings
http.tracker_server_port=80
 
#use "#include" directive to include HTTP other settiongs
#include http.conf
EOF

开放storage所需端口

firewall-cmd --zone=public --add-port=23002/tcp --permanent
​
firewall-cmd --zone=public --add-port=9101/tcp --permanent
 
firewall-cmd --reload
 
firewall-cmd --list-all

开始安装

创建docker-compose.yml编排文件

cat < /data/fastdfs/docker-compose.yml
version: '3.7'
services:
  fastdfs-tracker:
    image: morunchang/fastdfs
    container_name: fastdfs-tracker
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/fastdfs/tracker/data:/data/fast_data
      - /data/fastdfs/tracker/conf/tracker.conf:/etc/fdfs/tracker.conf
    environment:
      - TZ=Asia/Shanghai
    network_mode: "host"
    command: "sh tracker.sh"
 
  fastdfs-storage:
    image: morunchang/fastdfs
    container_name: fastdfs-storage
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/fastdfs/storage/data:/data/fast_data
      - /data/fastdfs/storage/conf/storage.sh:/storage.sh
      - /data/fastdfs/storage/conf/storage.conf:/etc/fdfs/storage.conf
      - /data/fastdfs/storage/conf/nginx.conf:/etc/nginx/conf/nginx.conf
      - /data/fastdfs/storage/conf/mod_fastdfs.conf:/etc/fdfs/mod_fastdfs.conf
      - /data/fastdfs/storage/conf/client.conf:/data/fastdfs/conf/client.conf
    environment:
      - TZ=Asia/Shanghai
    network_mode: "host"
    command: "sh storage.sh"
EOF

启动编排好的服务

cd /data/fastdfs/ && docker-compose up -d

查看启动日志

docker-compose logs -f

查看启动的服务

docker-compose ps

查看fastdfs集群运行情况

docker exec -it fastdfs-storage fdfs_monitor /data/fastdfs/conf/client.conf

参数说明

tracker_server_count:2 --表示2个Tracker Server
tracker server is 192.168.163.130:22122 --表示Leader Tracker
group count: 1 --表示有1个group
group name = group1 --组名称是group1
storage server count = 2 --组内有2个storage
active server count = 2 --活动的storage有2个
storage server port = 23002 --storage的端口
storage HTTP port = 9101 --storage的文件访问端口
store path count = 1 --storage只挂了一个存储目录
total_upload_count = 11 --总共上传了多少个文件
total_upload_bytes = 691405 --总共上传了多少字节
success_upload_bytes = 691405 --成功上传了多少字节
total_download_count = 2 --总共下载了多少文件(使用java客户端)

统计storage文件数量

MethodA:
cd /data/fastdfs/storage/data/data
ls -lR|grep "^-"|wc -l
MethodB:
cat binlog.000 | wc -l

你可能感兴趣的:(docker-compose,docker,docker-compose,fastdfs,分布式存储,nginx)