kafka + websocket 给前端展示

依赖:


        org.apache.kafka
        kafka_2.12
        2.2.0
    
    
        org.apache.kafka
        kafka-clients
        2.2.0
    
    
        javax
        javaee-api
         8.0
    
    
        org.springframework.boot
        spring-boot-starter-websocket
    

kafka代码:

Producer

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.log4j.Logger;

import java.util.Properties;

public class Producer {

static Logger log = Logger.getLogger(Producer.class);

private static KafkaProducer producer = null;

/*
初始化生产者
 */
static {
    Properties configs = initConfig();
    producer = new KafkaProducer(configs);
}

/*
初始化配置
 */
private static Properties initConfig(){
    Properties props = new Properties();
    props.put("bootstrap.servers", MQDict.MQ_ADDRESS_COLLECTION);
    props.put("acks", "all");
    props.put("retries", 0);
    props.put("batch.size", 16384);
    props.put("key.serializer", StringSerializer.class.getName());
    props.put("value.serializer", StringSerializer.class.getName());
    return props;
}

}

SocketConsumer 和websocket结合 保持长监听

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.util.Arrays;
import java.util.Properties;

public class SocketConsumer extends Thread {

@Override
public void run(){
    Properties props = new Properties();
    props.put("bootstrap.servers", MQDict.MQ_ADDRESS_COLLECTION);
    props.put("group.id", MQDict.CONSUMER_GROUP_ID);
    props.put("enable.auto.commit", MQDict.CONSUMER_ENABLE_AUTO_COMMIT);
    props.put("auto.commit.interval.ms", MQDict.CONSUMER_AUTO_COMMIT_INTERVAL_MS);
    props.put("session.timeout.ms", MQDict.CONSUMER_SESSION_TIMEOUT_MS);
    props.put("max.poll.records", MQDict.CONSUMER_MAX_POLL_RECORDS);
    props.put("auto.offset.reset", "earliest");
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    Consumer consumer = new KafkaConsumer(props);
    consumer.subscribe(Arrays.asList(MQDict.CONSUMER_TOPIC));
    while (true) {
        ConsumerRecords records = consumer.poll(MQDict.CONSUMER_POLL_TIME_OUT);
        System.out.println("&&&&&&&&&&&&&&&&&&&&&&"+records.count()+"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^");
        for(ConsumerRecord c1: records) {
            System.out.println(c1.value()+"%%%%%%"+"12345678");
            WebSocketServer.sendMessage("utsDemo",c1.value());
        }
    }
}

}

ConsumerLinster 监听kafka消费者

import org.springframework.stereotype.Component;

@Component
public class ConsumerLinster {

public  ConsumerLinster(){
    SocketConsumer socketConsumer = new SocketConsumer();
    socketConsumer.start();
}

}

websocket

WebSocketServer

import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import javax.websocket.*;
import javax.websocket.server.PathParam;
import javax.websocket.server.ServerEndpoint;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;

@Component
@ServerEndpoint("/ws/{socketname}")
@Slf4j
public class WebSocketServer {

private Session session;

private static AtomicInteger onlineNum = new AtomicInteger();

private static Map map = new ConcurrentHashMap();

/**

  • 建立连接

  • @param session session

  • @param socketname 用户ID
    */

    @OnOpen
    public void onOpen(Session session, @PathParam(“socketname”) String socketname) {
    int num = onlineNum.incrementAndGet();
    this.session = session;
    map.put(socketname, this.session);
    log.info(“新建立客户端连接,sessionId为:{},当前连接数为:{}”, session.getId(), num);
    }

/**

  • 关闭连接

  • @param session session
    */

    @OnClose
    public void onClose(Session session) {
    map.values().remove(session);
    int num = onlineNum.decrementAndGet();
    log.info(“sessionId为{}的客户端连接关闭,当前剩余连接数为:{}”, session.getId(), num);
    }

/**

  • 客户端发送消息

  • @param session session

  • @param message 消息
    */

    @OnMessage
    public void onMessage(Session session, String message) {
    log.info(“sessionId : {} , onMessage : {}”, session.getId(), message);
    }

/**

  • 发生错误
    */

    @OnError
    public void onError(Session session, Throwable error) {
    log.error(“WebSocket发生错误:{},Session ID: {}”, error, session.getId());
    }

/**

  • 服务器端推送消息

*/
public static void sendMessage(String socketname,String jsonString){
Session nowsession = map.get(socketname);

    if(nowsession!=null){
        try {
            nowsession.getBasicRemote().sendText(jsonString);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

}

WebSocketConfig

@Configuration
public class WebSocketConfig extends ServerEndpointConfig.Configurator {

@Bean
public ServerEndpointExporter serverEndpointExporter() {
    return new ServerEndpointExporter();
}

}

遇到session一直为null问题

RequestListener

@Component
@Slf4j
public class RequestListener implements ServletRequestListener {
@Override
public void requestInitialized(ServletRequestEvent sre) {
//将所有request请求都携带上httpSession
HttpSession session = ((HttpServletRequest) sre.getServletRequest()).getSession();
log.info(“将所有request请求都携带上httpSession {}”,session.getId());
}
public RequestListener() {}

@Override
public void requestDestroyed(ServletRequestEvent arg0)  {}

}

kafka配置参数

public class MQDict {

    public static final String MQ_ADDRESS_COLLECTION = "192.168.3.253:9092";			//kafka地址
    public static final String CONSUMER_TOPIC = "utsDemo";						//消费者连接的topic
    public static final String PRODUCER_TOPIC = "utsDemo";						//生产者连接的topic
    public static final String CONSUMER_GROUP_ID = "1";								//groupId,可以分开配置
    public static final String CONSUMER_ENABLE_AUTO_COMMIT = "true";				//是否自动提交(消费者)
    public static final String CONSUMER_AUTO_COMMIT_INTERVAL_MS = "1000";
    public static final String CONSUMER_SESSION_TIMEOUT_MS = "30000";				//连接超时时间
    public static final int CONSUMER_MAX_POLL_RECORDS = 100;							//每次拉取数
    public static final Duration CONSUMER_POLL_TIME_OUT = Duration.ofMillis(3000);	//拉去数据超时时间

}

参考文章:

https://www.cnblogs.com/coder163/p/8605645.html

有的文章是在忘记了 感谢分享相关内容的人

你可能感兴趣的:(kafka,websocket,session)