kafka+flink实现wordCount及数据写入mysql

kafka+flink实现wordCount及数据写入mysql_第1张图片

 step1:搭建flink环境

    step2:搭建kafka环境(配置系统变量$KAFKA_HOME)

    step3:搭建zookeeper环境

    step4:启动zookeeper:进入zookeeper的bin目录下输入:zkServer.sh start

    step5:启动kafka:进入kafka的bin目录下输入:kafka-server-start.sh $KAFKA_HOME/config/server.properties

    step6:创建topic,生产者,消费者,查看topic状态

    1.创建topic: zk
        kafka-topics.sh --create --zookeeper hadoop001:2181 --replication-factor 1 --partitions 1 --topic hello_topic
    2.查看所有topic
        kafka-topics.sh --list --zookeeper hadoop001:2181
    3.发送消息: broker
        kafka-console-producer.sh --broker-list hadoop001:9092 --topic hello_topic
     4.消费消息: zk

        kafka-console-consumer.sh --zookeeper hadoop001:2181 --topic hello_topic --from-beginning

    step7:生成wordCount demo代码

    1.pom引用(kafka+flink+mysql)

    
        org.apache.flink
        flink-clients_2.11
        1.0.0
    
    
    
        org.apache.flink
        flink-streaming-java_2.11
        1.0.0
        
    
        
            org.apache.flink
            flink-streaming-scala_2.11
            1.4.2
        
        
            mysql
            mysql-connector-java
            5.1.38
        
    
    
        org.apache.flink
        flink-java
        1.0.0
    
        
        
            org.apache.flink
            flink-scala_2.11
            1.4.2
        
        
        org.apache.flink
        flink-connector-kafka-0.9_2.10
        1.0.0

    

    2.实现代码

    public static void main(String[] args) throws Exception {
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000);
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "192.168.1.200:9092");//kafka server address
        properties.setProperty("zookeeper.connect", "192.168.1.200:2181");//zookeeper address
        properties.setProperty("group.id", "testFlink");//kafa topic
  FlinkKafkaConsumer09 myConsumer = new FlinkKafkaConsumer09("testFlink", new         SimpleStringSchema(),properties);
        DataStream stream = env.addSource(myConsumer);
        DataStream> counts = stream.flatMap(new LineSplitter()).keyBy(0).sum(1);
        counts.print();
        env.execute("WordCount from Kafka data");

    }



    public static final class LineSplitter implements FlatMapFunction> {
        private static final long serialVersionUID = 1L;
        public void flatMap(String value, Collector> out) {
            String[] tokens = value.toLowerCase().split("\\W+");
            for (String token : tokens) {
                if (token.length() > 0) {
                    out.collect(new Tuple2(token, 1));
                }
            }
        }
    }

    step8:flink+mysql

    public static void main(String[] args) throws Exception {
        Properties pro = new Properties();
        pro.put("bootstrap.servers", Config.getString("kafka.hosts"));
        pro.put("zookeeper.connect", Config.getString("kafka.zookper"));
        pro.put("group.id", Config.getString("kafka.group"));
        StreamExecutionEnvironment env = StreamExecutionEnvironment
                .getExecutionEnvironment();
        env.getConfig().disableSysoutLogging();  //设置此可以屏蔽掉日记打印情况
        env.getConfig().setRestartStrategy(
                RestartStrategies.fixedDelayRestart(4, 10));
        env.enableCheckpointing(5000);
        DataStream sourceStream = env
                .addSource(new FlinkKafkaConsumer09(Config
                        .getString("kafka.topic"), new SimpleStringSchema(),
                        pro));
      DataStream> sourceStreamTra = sourceStream.filter(new FilterFunction() {
            @Override
            public boolean filter(String value) throws Exception {
                return StringUtils.isNotBlank(value);
            }
        }).map(new MapFunction>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Tuple3 map(String value)
                    throws Exception {
                String[] args = value.split(":");
                return new Tuple3(Integer
                        .valueOf(args[0]), args[1],args[2]);
            }
        });
        sourceStreamTra.addSink(new MysqlSink());
        env.execute("data to mysql start");

    }

    step9:插入mysql DB的代码

     private static final long serialVersionUID = 1L;
    private Connection connection;
    private PreparedStatement preparedStatement;
    String username = Config.getString("mysql.user");
    String password = Config.getString("mysql.password");;
    String drivername = Config.getString("mysql.driver");
    String dburl = Config.getString("mysql.url");


    @Override
    public void invoke(Tuple3 value) throws Exception {
        Class.forName(drivername);
        connection = DriverManager.getConnection(dburl, username, password);
        String sql = "replace into flinkData(deptno,dname,location) values(?,?,?)";
        preparedStatement = connection.prepareStatement(sql);
        preparedStatement.setInt(1, value.f0);
        preparedStatement.setString(2, value.f1);
        preparedStatement.setString(3, value.f2);
        int insert = preparedStatement.executeUpdate();
        if(insert>0){
            System.out.println("value = [" + value + "]");
        }
        if (preparedStatement != null) {
            preparedStatement.close();
        }
        if (connection != null) {
            connection.close();
        }
    }

原文:https://blog.csdn.net/long19900613/article/details/80725073 

你可能感兴趣的:(Flink从入门到实践)