Netty+MongoDB集群+Kafka集群解决高并发以及实现海量数据存储

1、环境要求

准备一台安装有Docker的虚拟机。

2、Netty简单介绍

        Netty 是一个高性能、异步的、基于事件驱动的 NIO 框架。Netty简化和流线化了网络应用的编程开发过程。

3、MongoDB简单介绍

        MongoDB是一个高可用、分布式、灵活模式的文档数据库,用于大容量数据存储。文档存储一般用类似json的格式存储,存储的内容是文档型的。这样也就有机会对某些字段建立索引,实现关系数据库的某些功能。

4、Kafka简单介绍

        Kafka是一种高吞吐量的分布式发布订阅消息系统,它可以处理消费者在网站中的所有动作流数据。可作为消息中间件。       

5、编写Netty项目代码

5、1 编写配置文件

#netty启动监听的端口号
server.port=38080

#kafka服务集群地址
kafka.service=172.16.59.180:9092,172.16.59.180:9093,172.16.59.180:9094

#指定kafka中的topic
kafka.topic=MY-GEO-SERVER

 5、2 编写配置类代码

package cn.edu.glut.geoserver.config;

import cn.hutool.core.convert.Convert;
import cn.hutool.setting.Setting;

/**
 * @author Grong
 * @Date 2022-09-19 21:33
 */
public class MyConfig {

    public static Setting setting;

    static {
        //读取配置文件,读取文件的路径就是在classpath下
        setting = new Setting("my.setting");
    }

    /**
     * 获取端口号
     * @return
     */
    public static int getPort(){
        return Convert.toInt(setting.get("server.port"));
    }

    /**
     * 获取kafka服务集群地址
     *
     * @return
     */
    public static String getKafkaServiceAddress(){
        return setting.get("kafka.service");
    }

    /**
     * 获取kafka中的topic
     * @return
     */
    public static String getKafkaTopic(){
        return setting.get("kafka.topic");
    }

}

 5、3 编写Kafka服务代码

package cn.edu.glut.geoserver.service;

import cn.edu.glut.geoserver.config.MyConfig;
import cn.edu.glut.geoserver.netty.handler.Request;
import cn.hutool.core.map.MapUtil;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.json.JSONUtil;
import io.netty.util.internal.StringUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.Future;

/**
 * 发送kafka消息的业务
 * @author Grong
 * @Date 2022-09-20 15:46
 */
@Slf4j
public class KafkaService {

    //发送者
    private KafkaProducer producer;

    public KafkaService() {
        //完成kafka消息发送者初始化工作

        //配置
        Properties properties = new Properties();
        //设置集群地址
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, MyConfig.getKafkaServiceAddress());
        //消息key的序列化方式
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        //消息value的序列化方式
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

        this.producer = new KafkaProducer(properties);
    }

    /**
     * 发送消息到kafka
     *
     * @param request
     * @return
     */
    public Boolean sendMessage(Request request){
        //获取参数
        Double longitude = request.getDoubleParam("longitude", 0d);
        Double latitude = request.getDoubleParam("latitude", 0d);
        Double speed = request.getDoubleParam("speed", 0d);
        String routeId = request.getParam("routeId");
        Long userId = request.getLongParam("userId", 0L);
        //将参数封装成json发送到kafka
        String msgStr = JSONUtil.toJsonStr(MapUtil.builder()
                .put("longitude", longitude)
                .put("latitude", latitude)
                .put("speed", speed)
                .put("routeId", routeId)
                .put("userId", userId)
                .build());
        //获取topic
        String topic = MyConfig.getKafkaTopic();

        System.out.println(StrUtil.format("向【{}】发送消息【{}】", topic, msgStr));
//        log.info("向【{}】发送消息【{}】", topic, msgStr);

        this.producer.send(new ProducerRecord(topic, msgStr), (metadata, exception) -> {
            //处理异常
            if (ObjectUtil.isNotEmpty(exception)) {
                //出现异常
                System.out.println(StrUtil.format("发送消息失败: msg = {}", msgStr, exception));
                log.error("发送消息失败: msg = {}", msgStr, exception);
            }
        });

        return true;
    }
}

5、4 编写Netty服务器端代码

package cn.edu.glut.geoserver.netty.server;

import cn.edu.glut.geoserver.config.MyConfig;
import cn.edu.glut.geoserver.netty.handler.ServerHandler;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;

/**
 * netty服务器端
 *
 * @author Grong
 * @Date 2022-09-19 21:39
 */
public class MyGeoHttpServer {

    private static EventLoopGroup bossGroup;
    private static EventLoopGroup workerGroup;

    static {
        bossGroup = new NioEventLoopGroup(1);
        workerGroup = new NioEventLoopGroup();
    }

    public static void run(int port){
        new MyGeoHttpServer().run0(port);
    }

    /**
     * netty启动方法
     */
    public void run0(int port){
        ServerBootstrap serverBootstrap = new ServerBootstrap();

        //设置参数
        serverBootstrap.group(bossGroup, workerGroup)
                .channel(NioServerSocketChannel.class)
                .childHandler(new ChannelInitializer() {

                    @Override
                    protected void initChannel(SocketChannel ch) throws Exception {
                        ChannelPipeline pipeline = ch.pipeline();
                        //添加http协议的编解码器
                        pipeline.addLast(new HttpRequestDecoder());//对于http协议的解码器
                        pipeline.addLast(new HttpResponseEncoder());//对于HTTP协议的编码器,用于数据响应
                        pipeline.addLast(new HttpObjectAggregator(1024 * 128));//将请求的数据,url中或者请求头中聚合在一起
                        //添加自定义的处理器
                        pipeline.addLast(new ServerHandler());
                    }
                });

        try {
            //监听端口
            ChannelFuture future = serverBootstrap.bind(port).sync();

            System.out.println("Netty服务启动成功...端口号为:" + port);

            future.channel().closeFuture().sync();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } finally {
            bossGroup.shutdownGracefully();
            workerGroup.shutdownGracefully();
        }

    }

    public static void main(String[] args) {
        int port = MyConfig.getPort();
        MyGeoHttpServer.run(port);
    }
}

5、5 编写自定义Handler

package cn.edu.glut.geoserver.netty.handler;

import cn.edu.glut.geoserver.service.KafkaService;
import cn.hutool.core.map.MapUtil;
import cn.hutool.json.JSONUtil;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.*;
import io.netty.util.CharsetUtil;

/**
 * 自定义http web服务处理器类
 *
 * @author Grong
 * @Date 2022-09-19 21:36
 */
public class ServerHandler extends SimpleChannelInboundHandler {

    private KafkaService kafkaService = new KafkaService();

    /**
     * 接收客户端发送的消息,并且向客户端响应消息
     *
     * @param ctx
     * @param fullHttpRequest
     * @throws Exception
     */
    @Override
    protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest fullHttpRequest) throws Exception {
        //获取请求中的参数
        Request request = Request.build(ctx, fullHttpRequest);
        //发送消息到kafka
        Boolean flag = this.kafkaService.sendMessage(request);
        if (flag) {
            //构造响应json数据
            String result = JSONUtil.toJsonStr(MapUtil.builder().put("status", "ok").build());
            response(ctx, result);
        }else{
            //构造响应json数据
            String result = JSONUtil.toJsonStr(MapUtil.builder().put("status", "error").build());
            response(ctx, result);
        }
    }


    /**
     * 服务器端向客户端响应的方法
     *
     * @param ctx 通道处理器上下文
     * @param result 构造的Json响应数据
     */
    private void response(ChannelHandlerContext ctx, String result){

        //给客户端响应
        FullHttpResponse httpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
        //响应的内容
        httpResponse.content().writeBytes(Unpooled.copiedBuffer(result, CharsetUtil.UTF_8));
        //设置响应头的响应体数据类型和编码格式
        httpResponse.headers().set(HttpHeaderNames.CONTENT_TYPE, "application/json;charset=utf-8");
        ctx.writeAndFlush(httpResponse)
                .addListener(ChannelFutureListener.CLOSE); //http是短连接,响应完成后,需要将channel关闭
    }
}

6、部署MongoDB分片集群

6、1 拉取MongoDB镜像

docker pull mongo:4.0.3

 6、2 创建3个config容器实例

$ docker run -d --name configsvr01  -p 17000:27019 -v mongoconfigsvr-data-01:/data/configdb mongo:4.0.3 --configsvr --replSet "rs_configsvr"  --bind_ip_all

$ docker run -d --name configsvr02  -p 17001:27019 -v mongoconfigsvr-data-02:/data/configdb mongo:4.0.3 --configsvr --replSet "rs_configsvr"  --bind_ip_all

$ docker run -d --name configsvr03  -p 17002:27019 -v mongoconfigsvr-data-03:/data/configdb mongo:4.0.3 --configsvr --replSet "rs_configsvr"  --bind_ip_all

 6、3 config集群初始化

# 进入容器内部
docker exec -it configsvr01 /bin/bash

# 连接到MongoDB
mongo 172.16.59.180:17000

# 初始化
rs.initiate()

# 添加两个副本配置节点
rs.add("172.16.59.180:17001")
rs.add("172.16.59.180:17002")

 6、4 创建2个shard分片,每个分片都有3个数据节点

$ shared1
docker run -d --name shardsvr01  -p 37000:27018 -v mongoshardsvr-data-01:/data/db mongo:4.0.3 --replSet "rs_shardsvr1" --bind_ip_all --shardsvr
docker run -d  --name shardsvr02  -p 37001:27018 -v mongoshardsvr-data-02:/data/db mongo:4.0.3 --replSet "rs_shardsvr1" --bind_ip_all --shardsvr
docker run -d  --name shardsvr03  -p 37002:27018 -v mongoshardsvr-data-03:/data/db mongo:4.0.3 --replSet "rs_shardsvr1" --bind_ip_all --shardsvr

$ shared2
docker run -d  --name shardsvr04  -p 37003:27018 -v mongoshardsvr-data-04:/data/db mongo:4.0.3 --replSet "rs_shardsvr2" --bind_ip_all --shardsvr
docker run -d  --name shardsvr05  -p 37004:27018 -v mongoshardsvr-data-05:/data/db mongo:4.0.3 --replSet "rs_shardsvr2" --bind_ip_all --shardsvr
docker run -d  --name shardsvr06  -p 37005:27018 -v mongoshardsvr-data-06:/data/db mongo:4.0.3 --replSet "rs_shardsvr2" --bind_ip_all --shardsvr

 6、5 初始化分片集群

# 进入容器
docker exec -it shardsvr01 /bin/bash

# 连接到第一个分片集群
mongo 172.16.59.180:37000

# 初始化
rs.initiate()

# 添加分片集群1副本节点
rs.add("172.16.59.180:37001")

# 添加仲裁节点
rs.addArb("172.16.59.180:37002")


# 退出后进入第二个分片集群中
docker exec -it shardsvr04 /bin/bash

# 连接到第二个分片集群
mongo 172.16.59.180:37003

# 初始化
rs.initiate()

# 添加分片集群1副本节点
rs.addArb("172.16.59.180:37004")

# 添加仲裁节点
rs.add("172.16.59.180:37005")

 6、6 创建mongos节点容器,指定config服务

docker run -d --name mongos -p 6666:27017 --entrypoint "mongos" mongo:4.0.3 --configdb rs_configsvr/172.16.59.180:17000,172.16.59.180:17001,172.16.59.180:17002 --bind_ip_all

 6、7 添加shared节点

# 进入mongos容器实例内部
docker exec -it mongos bash

# 连接到mongos
mongo 1172.16.59.180:6666

# 添加shard节点
sh.addShard("rs_shardsvr1/172.16.59.180:37000,172.16.59.180:37001,172.16.59.180:37002")
sh.addShard("rs_shardsvr2/172.16.59.180:37003,172.16.59.180:37004,172.16.59.180:37005")


 6、8 开启分片

#启用分片
sh.enableSharding("geoserver")

#设置集合的分片规则,按照userId的hash值进行区分
sh.shardCollection("geoserver.tb_route_point", {"userId": "hashed" })

7、部署Kafka集群

7、1 拉取Kafka镜像

docker pull bitnami/kafka:2.8.0

 7、2 部署集群

docker run -d --network=host --privileged=true \
--name kafka-node1 -p 9092:9092 \
-e KAFKA_BROKER_ID=1 \
-e KAFKA_CFG_ZOOKEEPER_CONNECT=172.16.59.180:2181 \
-e KAFKA_ZOOKEEPER_PROTOCOL=PLAINTEXT \
-e KAFKA_CFG_LISTENERS=PLAINTEXT://:9092 \
-e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://172.16.59.180:9092 \
-e ALLOW_PLAINTEXT_LISTENER=yes \
-v kafka-node1-data:/bitnami/kafka/data \
-v kafka-node1-config:/bitnami/kafka/config \
bitnami/kafka:2.8.0

docker run -d --network=host --privileged=true \
--name kafka-node2 -p 9093:9092 \
-e KAFKA_BROKER_ID=2 \
-e KAFKA_CFG_ZOOKEEPER_CONNECT=172.16.59.180:2181 \
-e KAFKA_ZOOKEEPER_PROTOCOL=PLAINTEXT \
-e KAFKA_CFG_LISTENERS=PLAINTEXT://:9093 \
-e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://172.16.59.180:9093 \
-e ALLOW_PLAINTEXT_LISTENER=yes \
-v kafka-node2-data:/bitnami/kafka/data \
-v kafka-node2-config:/bitnami/kafka/config \
bitnami/kafka:2.8.0

docker run -d --network=host --privileged=true \
--name kafka-node3 -p 9094:9092 \
-e KAFKA_BROKER_ID=3 \
-e KAFKA_CFG_ZOOKEEPER_CONNECT=172.16.59.180:2181 \
-e KAFKA_ZOOKEEPER_PROTOCOL=PLAINTEXT \
-e KAFKA_CFG_LISTENERS=PLAINTEXT://:9094 \
-e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://172.16.59.180:9094 \
-e ALLOW_PLAINTEXT_LISTENER=yes \
-v kafka-node3-data:/bitnami/kafka/data \
-v kafka-node3-config:/bitnami/kafka/config \
bitnami/kafka:2.8.0

8、编写SpringBoot项目代码

        创建消费者类,获取Kafka集群接收到的消息:

@Component
public class GeoServerConsumer {

    @Autowired
    private MongoTemplate mongoTemplate;

    // 消费监听
    @KafkaListener(topics = {"MY-GEO-SERVER"}) //指定topic的名称
    public void onMessage(List> records) {
        System.out.println(StrUtil.format("接收到{}条消息,内容为:{}", records.size(), records));
        List routePointList = records.stream().map(record -> {
            JSONObject jsonObject = JSONUtil.parseObj(record.value());

            RoutePoint routePoint = new RoutePoint();
            routePoint.setId(ObjectId.get());
            routePoint.setLongitude(jsonObject.getDouble("longitude"));
            routePoint.setLatitude(jsonObject.getDouble("latitude"));
            routePoint.setSpeed(jsonObject.getDouble("speed"));
            routePoint.setRouteId(jsonObject.getStr("routeId"));
            routePoint.setUserId(jsonObject.getLong("userId"));
            routePoint.setCreated(System.currentTimeMillis());
            return routePoint;
        }).collect(Collectors.toList());
        
        this.mongoTemplate.insert(routePointList,RoutePoint.class);

    }
}

至此,一个解决高并发以及实现海量数据存储的小Demo就实现了。可以使用Jmeter工具对这个小Demo进行压力测试。
 

你可能感兴趣的:(docker,springboot,kafka,mongodb,java,spring,boot)