mycat1.6.7.5.最新版安装

架构图

mycat1.6.7.5.最新版安装_第1张图片

规划

[root@db01 ~]# mysql -S /data/3307/mysql.sock -e "show variables like 'server_id'"
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 7     |
+---------------+-------+
[root@db01 ~]# mysql -S /data/3308/mysql.sock -e "show variables like 'server_id'"
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 8     |
+---------------+-------+
[root@db01 ~]# mysql -S /data/3309/mysql.sock -e "show variables like 'server_id'"
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 9     |
+---------------+-------+
[root@db01 ~]# mysql -S /data/3310/mysql.sock -e "show variables like 'server_id'"
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 10    |
+---------------+-------+

[root@db02 ~]# mysql -S /data/3307/mysql.sock -e "show variables like 'server_id'"
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 17    |
+---------------+-------+
[root@db02 ~]# mysql -S /data/3308/mysql.sock -e "show variables like 'server_id'"
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 18    |
+---------------+-------+
[root@db02 ~]# mysql -S /data/3309/mysql.sock -e "show variables like 'server_id'"
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 19    |
+---------------+-------+
[root@db02 ~]# mysql -S /data/3310/mysql.sock -e "show variables like 'server_id'"
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 20    |
+---------------+-------+

节点主从规划
箭头指向谁是主库
    10.0.0.51:3307  <----->  10.0.0.52:3307
    10.0.0.51:3309  ------>  10.0.0.51:3307
    10.0.0.52:3309  ------>  10.0.0.52:3307

    10.0.0.52:3308  <----->  10.0.0.51:3308
    10.0.0.52:3310  ------>  10.0.0.52:3308
    10.0.0.51:3310  ------>  10.0.0.51:3308
分片规划
shard1:
    Master:10.0.0.51:3307
    slave1:10.0.0.51:3309
    Standby Master:10.0.0.52:3307
    slave2:10.0.0.52:3309
	
shard2:
    Master:10.0.0.52:3308
    slave1:10.0.0.52:3310
    Standby Master:10.0.0.51:3308
    slave2:10.0.0.51:3310

配置主从环境

# shard1
##  10.0.0.51:3307 <-----> 10.0.0.52:3307

# db02
mysql  -s /data/3307/mysql.sock -e "grant replication slave on *.* to repl@'10.0.0.%' identified by '123';"
mysql  -s /data/3307/mysql.sock -e "grant all  on *.* to root@'10.0.0.%' identified by '123'  with grant option;"

# db01
mysql  -s /data/3307/mysql.sock -e "change master to master_host='10.0.0.52', master_port=3307, master_auto_position=1, master_user='repl', master_password='123';"
mysql  -s /data/3307/mysql.sock -e "start slave;"
mysql  -s /data/3307/mysql.sock -e "show slave status\g"

# db02
mysql  -s /data/3307/mysql.sock -e "change master to master_host='10.0.0.51', master_port=3307, master_auto_position=1, master_user='repl', master_password='123';"
mysql  -s /data/3307/mysql.sock -e "start slave;"
mysql  -s /data/3307/mysql.sock -e "show slave status\g"

## 10.0.0.51:3309 ------> 10.0.0.51:3307

# db01
mysql  -s /data/3309/mysql.sock  -e "change master to master_host='10.0.0.51', master_port=3307, master_auto_position=1, master_user='repl', master_password='123';"
mysql  -s /data/3309/mysql.sock  -e "start slave;"
mysql  -s /data/3309/mysql.sock  -e "show slave status\g"

## 10.0.0.52:3309 ------> 10.0.0.52:3307
# db02

mysql  -s /data/3309/mysql.sock -e "change master to master_host='10.0.0.52', master_port=3307, master_auto_position=1, master_user='repl', master_password='123';"
mysql  -s /data/3309/mysql.sock -e "start slave;"
mysql  -s /data/3309/mysql.sock -e "show slave status\g"

====================================================================

# shard2
## 10.0.0.52:3308 <-----> 10.0.0.51:3308

# db01
mysql  -s /data/3308/mysql.sock -e "grant replication slave on *.* to repl@'10.0.0.%' identified by '123';"
mysql  -s /data/3308/mysql.sock -e "grant all  on *.* to root@'10.0.0.%' identified by '123'  with grant option;"

# db02
mysql  -s /data/3308/mysql.sock -e "change master to master_host='10.0.0.51', master_port=3308, master_auto_position=1, master_user='repl', master_password='123';"
mysql  -s /data/3308/mysql.sock -e "start slave;"
mysql  -s /data/3308/mysql.sock -e "show slave status\g"

# db01
mysql  -s /data/3308/mysql.sock -e "change master to master_host='10.0.0.52', master_port=3308, master_auto_position=1, master_user='repl', master_password='123';"
mysql  -s /data/3308/mysql.sock -e "start slave;"
mysql  -s /data/3308/mysql.sock -e "show slave status\g"

## 10.0.0.52:3310 -----> 10.0.0.52:3308
# db02
mysql  -s /data/3310/mysql.sock -e "change master to master_host='10.0.0.52', master_port=3308, master_auto_position=1, master_user='repl', master_password='123';"
mysql  -s /data/3310/mysql.sock -e "start slave;"
mysql  -s /data/3310/mysql.sock -e "show slave status\g"

##10.0.0.51:3310 -----> 10.0.0.51:3308
# db01
mysql  -s /data/3310/mysql.sock -e "change master to master_host='10.0.0.51', master_port=3308, master_auto_position=1, master_user='repl', master_password='123';"
mysql  -s /data/3310/mysql.sock -e "start slave;"
mysql  -s /data/3310/mysql.sock -e "show slave status\g"

#检查主从状态,16个yes即为成功
mysql -s /data/3307/mysql.sock -e "show slave status\g"|grep yes
mysql -s /data/3308/mysql.sock -e "show slave status\g"|grep yes
mysql -s /data/3309/mysql.sock -e "show slave status\g"|grep yes
mysql -s /data/3310/mysql.sock -e "show slave status\g"|grep yes

安装

yum install -y java
http://dl.mycat.io/
export path=/data/mycat/bin:$path
mycat start
mysql -uroot -p123456 -h 127.0.0.1 -p8066

配置文件介绍

schema.xml
主配置文件:节点信息、读写分离、高可用设置、调用分片策略…

rule.xml
分片策略的定义、功能、使用用方法

server.xml
mycat服务有关配置: 用户、网络、权限、策略、资源…

xx.txt文件
分片参数定义文件

log4j2.xml
mycat 相关日志记录配置

wrapper.log : 启动日志
mycat.log :工作日志

读写分离

grant all on *.* to root@'10.0.0.%' identified by '123'; #创建mycat访问后端的用户
vim schema.xml
"1.0"?>  
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">  
"http://io.mycat/">
#逻辑库,访问TESTDB会到达dn1节点
"TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="dn1"> 
</schema>  
#数据节点,dn1节点指向的localhost1的world库
    "dn1" dataHost="localhost1" database= "world" />
#定义localhost1
    "localhost1" maxCon="1000" minCon="10" balance="1"  writeType="0" dbType="mysql"  dbDriver="native" switchType="1"> 
        select user()</heartbeat>
#读和写的分工,host名称随意
    "db1" url="10.0.0.51:3307" user="root" password="123"> 
            "db2" url="10.0.0.51:3309" user="root" password="123" /> 
    </writeHost> 
    </dataHost>  
</mycat:schema>

测试

#连接mycat 服务
[root@db01 ~]# mysql -uroot -p123456 -h 10.0.0.51 -P8066
#测试读
mysql> select @@server_id;
#测试写
mysql> begin ; select @@server_id;commit;

读写分离和高可用

"1.0"?>  
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">  
"http://io.mycat/">
"TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="sh1"> 
</schema>  
    "sh1" dataHost="oldguo1" database= "world" />  
    "oldguo1" maxCon="1000" minCon="10" balance="1"  writeType="0" dbType="mysql"  dbDriver="native" switchType="1"> 
        select user()</heartbeat>  
#第一个write负责写操作,其他三个负责读,写节点宕机后,后面的read也不提供服务,第二个write接管写操作,下面的read提供读操作
    "db1" url="10.0.0.51:3307" user="root" password="123"> 
            "db2" url="10.0.0.51:3309" user="root" password="123" /> 
    </writeHost> 
    "db3" url="10.0.0.52:3307" user="root" password="123"> 
            "db4" url="10.0.0.52:3309" user="root" password="123" /> 
    </writeHost>        
    </dataHost>  
</mycat:schema>

参数介绍

balance属性
读操作负载均衡类型,目前的取值有3种: 
1. balance="0", 不开启读写分离机制,所有读操作都发送到当前可用的writeHost上。 
2. balance="1",全部的readHost与standby writeHost参与select语句的负载均衡,简单的说,
  当双主双从模式(M1->S1,M2->S2,并且M1与 M2互为主备),正常情况下,M2,S1,S2都参与select语句的负载均衡。 
3. balance="2",所有读操作都随机的在writeHost、readhost上分发。

writeType属性
写操作,负载均衡类型,目前的取值有2种: 
1. writeType="0", 所有写操作发送到配置的第一个writeHost,
第一个挂了切到还生存的第二个writeHost,重新启动后已切换后的为主,切换记录在配置文件中:dnindex.properties . 
2. writeType=“1”,所有写操作都随机的发送到配置的writeHost,但不推荐使用

switchType属性
-1 表示不自动切换 
1 默认值,自动切换 
2 基于MySQL主从同步的状态决定是否切换 ,心跳语句为 show slave status 

连接有关
maxCon="1000":最大的并发连接数
minCon="10"  :mycat在启动之后,会在后端节点上自动开启的连接线程

tempReadHostAvailable="1"
这个一主一从时(1个writehost,1个readhost时),可以开启这个参数,如果2个writehost,2个readhost时
select user()</heartbeat>  监测心跳

垂直分表

访问不同的表,到达不同的后端

"1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
"http://io.mycat/">
"TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="sh1">
        "user" dataNode="sh1"/>
        
"order_t" dataNode="sh2"/> </schema> "sh1" dataHost="oldguo1" database= "taobao" /> "sh2" dataHost="oldguo2" database= "taobao" /> "oldguo1" maxCon="1000" minCon="10" balance="1" writeType="0" dbType="mysql" dbDriver="native" switchType="1"> select user()</heartbeat> "db1" url="10.0.0.51:3307" user="root" password="123"> "db2" url="10.0.0.51:3309" user="root" password="123" /> </writeHost> "db3" url="10.0.0.52:3307" user="root" password="123"> "db4" url="10.0.0.52:3309" user="root" password="123" /> </writeHost> </dataHost> "oldguo2" maxCon="1000" minCon="10" balance="1" writeType="0" dbType="mysql" dbDriver="native" switchType="1"> select user()</heartbeat> "db1" url="10.0.0.51:3308" user="root" password="123"> "db2" url="10.0.0.51:3310" user="root" password="123" /> </writeHost> "db3" url="10.0.0.52:3308" user="root" password="123"> "db4" url="10.0.0.52:3310" user="root" password="123" /> </writeHost> </dataHost> </mycat:schema>

范围分片

将一个表中的列,按照访问量,进行拆分

"t3" dataNode="sh1,sh2" rule="auto-sharding-long" /> vim rule.xml "auto-sharding-long"> id</columns> #按照id列进行拆分 rang-long</algorithm> #函数的名称 </rule> <function name="rang-long" class="io.mycat.route.function.AutoPartitionByLong"> "mapFile">autopartition-long.txt</property> #定义的具体内容的文件名称 </function> vim autopartition-long.txt 0-10=0 #0-10数据行访问第一个分片 10-20=1 #10-20数据行访问第二个分片

取模分片

分片键(一个列)与节点数量进行取余,得到余数,将数据写入对应节点

"t4" dataNode="sh1,sh2" rule="mod-long" /> vim rule.xml <function name="mod-long" class="io.mycat.route.function.PartitionByMod"> <!-- how many data nodes --> "count">2</property> </function>

枚举分片

输入不同的值,到达对应的后端

"t5" dataNode="sh1,sh2" rule="sharding-by-intfile" /> vim rule.xml "sharding-by-intfile"> name</columns> hash-int</algorithm> </rule> </tableRule> <function name="hash-int" class="org.opencloudb.route.function.PartitionByFileMap"> "mapFile">partition-hash-int.txt</property> "type">1</property> </function> vim partition-hash-int.txt bj=0 sh=1 DEFAULT_NODE=1 #其他的值写入第二个分片

全局表

很多业务都需要join这个表,把这个表存放到每个节点上一份,避免跨库

"t_area" primaryKey="id" type="global" dataNode="sh1,sh2" /> #primaryKey关联条件

ER分片

全局表占用空间,引出了ER分片,右表和左表的分片策略一致,就会避免跨库

"a" dataNode="sh1,sh2" rule="mod-long"> "b" joinKey="aid" parentKey="id" /> </table> #a表的id列关联b表的aid列 vim rule.xml "mod-long_oldguo"> #更改名称,避免重复 id</columns> mod-long_oldguo</algorithm> </rule> </tableRule> <function name="mod-long_oldguo" class="io.mycat.route.function.PartitionByMod"> <!-- how many data nodes --> "count">2</property> </function>

mycat管理端

mysql -uroot -p123456 -h10.0.0.51 -P9066
查看帮助 
show @@help;

查看Mycat 服务情况 
show @@server ;

查看分片信息
mysql> show @@datanode;

查看数据源
show @@datasource

重新加载配置信息
reload @@config          : schema.xml                        
reload @@config_all      :  所有配置重新加载

修改逻辑库

"TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="sh1">
"yb" checkSQLschema="false" sqlMaxLimit="100" dataNode="sh1">

server.xml
 "root" defaultAccount="true">#默认管理端账号
                "password">123456</property>#默认管理端密码
                "schemas">oldboy,yb</property>#添加逻辑库
                "defaultSchema">oldboy</property>
                <!--No MyCAT Database selected 错误前会尝试使用该schema作为schema,不设置则为null,报错 -->

                <!-- 表级 DML 权限设置 -->
                <!--            
                "false">
                        "TESTDB" dml="0110" >
                                
"tb01" dml="0000"></table>
"tb02" dml="1111"></table> </schema> </privileges> --> </user> "user"> "password">user</property> "schemas">oldboy,yb</property>#添加逻辑库 "readOnly">true</property> "defaultSchema">oldboy</property> </user>

你可能感兴趣的:(mycat1.6.7.5.最新版安装)