tidb单机部署实例

1. 集群部署相关命令

参考《TiDB 软件和硬件环境建议配置》,以下操作Ubuntu LTS系统上。

a.准备ubuntu环境1: 磁盘分区,swap关闭

参考:官方 磁盘格式化

# 实例:4t Nvme固态
parted /dev/nvme0n1
(parted) print
(parted) mklabel gpt 
(parted) mkpart primary 0KB 4TB 
(parted) print

mkfs.ext4 -F /dev/nvme0n1
# 查看文件系统类型
blkid

vi /etc/fstab
UUID="040c7a9c-1dd3-494c-b7b9-298ec2165b83"   /DATA4T   ext4    defaults,nodelalloc,noatime    0    2

echo "vm.swappiness = 0">> /etc/sysctl.conf
swapoff -a && swapon -a
# reboot

b.系统设置2: 工具安装、其他设置,免登陆

ufw disable
apt-get install ntp curl net-tools ifupdown network-scripts openssh-server mysql-client-core-8.0

# 查看网卡名称
ifconfig 
# 设置静态IP
gedit  /etc/network/interfaces
# interfaces(5) file used by ifup(8) and ifdown(8)
auto lo
iface lo inet loopback
 
auto enp4s0
iface enp4s0 inet static
address 192.168.1.11
gateway 192.168.1.1
netmask 255.255.255.0

# SSH设置
vi /etc/ssh/sshd_config
++
MaxSessions 20

service sshd restart
# 添加用户,免登陆
adduser tidb
passwd tidb
vi /etc/sudoers
++
tidb ALL=(ALL)  NOPASSWD:ALL

c.部署配置:

参考:官方 使用 TiUP 部署(推荐)

curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh
source .bashrc
tiup cluster template > topology.yaml

修改topology.yaml,单机配置:

# # Global variables are applied to all deployments and used as the default value of
# # the deployments if a specific deployment value is missing.
global:
  user: "tidb"
  ssh_port: 22
  deploy_dir: "/DATA4T/tidb-deploy"
  # # TiDB Cluster data storage directory
  data_dir: "/DATA4T/tidb-data"
  arch: "amd64"

monitored:
  node_exporter_port: 9100
  blackbox_exporter_port: 9115

server_configs:
  tidb:
    log.slow-threshold: 300
  tikv:
    readpool.storage.use-unified-pool: false
    readpool.coprocessor.use-unified-pool: true
  pd:
    replication.enable-placement-rules: true
    replication.location-labels:
      - host
  tiflash:
    logger.level: "info"

pd_servers:
  - host: 192.168.1.11

tidb_servers:
  - host: 192.168.1.11
    port: 4000
    status_port: 10080
    deploy_dir: "/DATA4T/tidb-deploy/tidb-4000"
    log_dir: "/DATA4T/tidb-deploy/tidb-4000/log"
  - host: 192.168.1.11
    port: 4001
    status_port: 10081
    deploy_dir: "/DATA4T/tidb-deploy/tidb-4001"
    log_dir: "/DATA4T/tidb-deploy/tidb-4001/log"
  - host: 192.168.1.11
    port: 4002
    status_port: 10082
    deploy_dir: "/DATA4T/tidb-deploy/tidb-4002"
    log_dir: "/DATA4T/tidb-deploy/tidb-4002/log"

tikv_servers:
  - host: 192.168.1.11
    port: 20160
    status_port: 20180
    deploy_dir: "/DATA4T/data1/tidb-deploy/tikv-20160"
    data_dir: "/DATA4T/data1/tidb-data/tikv-20160"
    log_dir: "/DATA4T/data1/tidb-deploy/tikv-20160/log"
    config:
      server.labels: { host: "192.168.1.11" }
  - host: 192.168.1.11
    port: 20161
    status_port: 20181
    deploy_dir: "/DATA4T/data2/tidb-deploy/tikv-20161"
    data_dir: "/DATA4T/data2/tidb-data/tikv-20161"
    log_dir: "/DATA4T/data2/tidb-deploy/tikv-20161/log"
    config:
      server.labels: { host: "192.168.1.11" }
  - host: 192.168.1.11
    # ssh_port: 22
    port: 20162
    status_port: 20182
    deploy_dir: "/DATA4T/data3/tidb-deploy/tikv-20162"
    data_dir: "/DATA4T/data3/tidb-data/tikv-20162"
    log_dir: "/DATA4T/data3/tidb-deploy/tikv-20162/log"
    # numa_node: "0"
    config:
      server.labels: { host: "192.168.1.11" }

tiflash_servers:
  - host: 192.168.1.11
    tcp_port: 9000
    http_port: 8123
    flash_service_port: 3930
    flash_proxy_port: 20170
    flash_proxy_status_port: 20292
    metrics_port: 8234
    deploy_dir: /DATA4T/data1/tidb-deploy/tiflash-9000
    data_dir: /DATA4T/data1/tidb-data/tiflash-9000
    log_dir: /DATA4T/data1/tidb-deploy/tiflash-9000/log
  - host: 192.168.1.11
    tcp_port: 9001
    http_port: 8124
    flash_service_port: 3931
    flash_proxy_port: 20171
    flash_proxy_status_port: 20293
    metrics_port: 8235
    deploy_dir: /DATA4T/data2/tidb-deploy/tiflash-9001
    data_dir: /DATA4T/data2/tidb-data/tiflash-9001
    log_dir: /DATA4T/data2/tidb-deploy/tiflash-9001/log
  - host: 192.168.1.11
    # ssh_port: 22
    tcp_port: 9002
    http_port: 8125
    flash_service_port: 3932
    flash_proxy_port: 20172
    flash_proxy_status_port: 20294
    metrics_port: 8236
    deploy_dir: /DATA4T/data3/tidb-deploy/tiflash-9002
    data_dir: /DATA4T/data3/tidb-data/tiflash-9002
    log_dir: /DATA4T/data3/tidb-deploy/tiflash-9002/log

monitoring_servers:
  - host: 192.168.1.11
  
grafana_servers:
  - host: 192.168.1.11

alertmanager_servers:
  - host: 192.168.1.11

应用配置,并尝试启动集群,验证

tiup cluster deploy tidb-test v6.1.0 ./topology.yaml --user tidb -p
tiup cluster start tidb-test --init
Started cluster `tidb-test` successfully
The root password of TiDB database has been changed.
The new password is: 'V+12S&Bh5*JY91_3EZ'.
Copy and record it to somewhere safe, it is only displayed once, and will not be stored.
The generated password can NOT be get and shown again.

tiup cluster display tidb-test
mysql -u root -h 192.168.1.11 -P 4000 -p

验证mysql连接、运维后台[http://127.0.0.1:2379/dashboard/],用户root、密码如上。

2. 数据迁移

a. 使用dumpling导出mysql

tiup install dumpling
# 远程数据库连接验证
mysql -h192.168.1.10 -P3307 -uroot -p123456

导出mysql

tiup dumpling -u root -P 3307 -h 192.168.1.10 -p 123456 -o ./export/table1 -F 8G -T jobdb.tb1 -t 20 --compress gzip
tiup dumpling -u root -P 3307 -h 192.168.1.10 -p 123456 -o ./export/table1 -F 8G -T jobdb.tb2 -t 20 --compress gzip


tiup dumpling -u root -P 3307 -h 192.168.1.10 -p dA1mJ3jA1eC6hA6gU0fV5eA0bM0sI5dR -o ./export/table1 -F 8G -T table1.page -t 20 --compress gzip
tiup dumpling -u root -P 3307 -h 192.168.1.10 -p dA1mJ3jA1eC6hA6gU0fV5eA0bM0sI5dR -o ./export/table1 -F 8G -T table1.page_analysis -t 20 --compress gzip

b. 建立下游表结构

从大数据量分库分表 MySQL 合并迁移数据到 TiDB

# 原mysql库
show create table table;
# tidb 4000端口库
# 执行导入建表语句,同步库、表结构

c. 使用tidb-lightning导入数据

批量选中*.sql.gz文件,选择解压目录到磁盘

tiup tidb-lightning -config tidb-lightning.toml
# 等待完成

数据库查看结果,已经分区过的大表查询结果:

mysql> select count(*) from page;
+-----------+
| count(*)  |
+-----------+
| 104408407 |
+-----------+
1 row in set (4.51 sec)

很快,完美。

d. 创建新用户

select user,host from user;
select user,host,plugin,authentication_string from mysql.user;
select user,host,plugin from mysql.user;

-- 13 腾讯云
drop user 'search'@'%';
create user search@'%' identified by 'qq8d#p^azkzqvkyWHQNTd8M6HKb8##V9';
grant all privileges on *.* to search@'%';
flush privileges;

drop user 'server'@'%';
create user server@'%' identified by 'h@VvZIDwGERTxM^pnw1PrdEj!VgIcZ#V';
grant all privileges on complaint_paper.* to server@'%';
flush privileges;

drop user 'test'@'%';
create user test@'%' identified by '123456t';
grant select,insert on *.* to test@'%';
flush privileges;


-- 11 tidb => db
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'Fn0AxdMrqcwLPrwLa9bR1cJ9IF#x0Gzh';

drop user 'search'@'%';
create user search@'%' identified by 'qq8d#p^azfzqvkyWHQNTd8M6HKb8##V9';
update mysql.user set authentication_string = '' where user = 'search';
FLUSH PRIVILEGES;
update mysql.user set host = '%', plugin = 'mysql_native_password' where user = 'search';
ALTER USER 'search'@'%' IDENTIFIED WITH mysql_native_password BY 'qq8d#p^azfzqvkyWHQNTd8M6HKb8##V9';
grant all privileges on *.* to search@'%';
flush privileges;

drop user 'tidb_job'@'%';
create user tidb_job@'%' identified by 'Gy1sDiq8sgzlJeT3WEZ&RZ4BNowy#tY1';
update mysql.user set authentication_string = '' where user = 'tidb_job';
FLUSH PRIVILEGES;
update mysql.user set host = '%', plugin = 'mysql_native_password' where user = 'tidb_job';
ALTER USER 'tidb_job'@'%' IDENTIFIED WITH mysql_native_password BY 'Gy1sDiq8sgzlJeT3WEZ&RZ4BNowy#tY1';
grant all privileges on table1.* to tidb_job@'%';
grant all privileges on platform.* to tidb_job@'%';
flush privileges;

drop user 'test'@'%';
create user test@'%' identified by '123456t';
update mysql.user set authentication_string = '' where user = 'test';
FLUSH PRIVILEGES;
update mysql.user set host = '%', plugin = 'mysql_native_password' where user = 'test';
ALTER USER 'test'@'%' IDENTIFIED WITH mysql_native_password BY '123456t';
grant select,insert on *.* to test@'%';
flush privileges;

你可能感兴趣的:(tidb运维数据库后端)