使用JdbcInsertBolt、JdbcLookupBolt请直接看官网
官网介绍
这里代码给的例子是wordcount,用的jdbcClient直接执行SQL
maven pom.xml
4.0.0
com.sid.bigdata
storm
0.0.1
jar
storm
http://maven.apache.org
UTF-8
1.1.1
org.apache.storm
storm-core
${storm.version}
org.apache.storm
storm-jdbc
${storm.version}
mysql
mysql-connector-java
5.1.31
spout
package integration.jdbc;
import java.util.Map;
import java.util.Random;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;
/**
* @author liyijie
* @date 2018年6月13日下午8:32:24
* @email [email protected]
* @remark
* @version
*/
public class WordCountSpout extends BaseRichSpout{
private SpoutOutputCollector collector;
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
}
public static final String[] words = new String[]{"aaa","bbb","ccc","aa","bb","a"};
/**
* 1.把每一行数据发射出去
* */
public void nextTuple() {
Random random = new Random();
String word =words[random.nextInt(words.length)]; //获取文件中的每行内容
//发射出去
this.collector.emit(new Values(word));
System.out.println("emit: "+word);
Utils.sleep(1000L);
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word"));
}
}
bolt
package integration.jdbc;
import java.sql.Types;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.storm.jdbc.common.Column;
import org.apache.storm.jdbc.common.ConnectionProvider;
import org.apache.storm.jdbc.common.HikariCPConnectionProvider;
import org.apache.storm.jdbc.common.JdbcClient;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import com.google.common.collect.Maps;
/**
* @author liyijie
* @date 2018年6月13日下午8:58:54
* @email [email protected]
* @remark
* @version
*
* 词频汇总Bolt
*/
public class CountBolt extends BaseRichBolt{
private OutputCollector collector;
private JdbcClient jdbcClient;
private ConnectionProvider connectionProvider;
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
this.collector = collector;
Map hikariConfigMap = Maps.newHashMap();
hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource");
hikariConfigMap.put("dataSource.url", "jdbc:mysql://localhost/sid");
hikariConfigMap.put("dataSource.user","root");
hikariConfigMap.put("dataSource.password","密码");
connectionProvider = new HikariCPConnectionProvider(hikariConfigMap);
//对数据库连接池进行初始化
connectionProvider.prepare();
jdbcClient = new JdbcClient(connectionProvider, 30);
}
Map map = new HashMap();
/**
* 业务逻辑
* 1.获取每个单词
* 2.对所有单词进行汇总
* 3.输出
* */
public void execute(Tuple input) {
String word = input.getStringByField("word");
Integer count = map.get(word);
if(count==null){
count=0;
}
count++;
map.put(word, count);
//查询该word是否存在
List list = new ArrayList();
//创建一列将值传入 列名 值 值的类型
list.add(new Column("word", word, Types.VARCHAR));
List> select = jdbcClient.select("select word from wordcount where word = ?",list);
//计算出查询的条数
Long n = select.stream().count();
if(n>=1){
//update
jdbcClient.executeSql("update wordcount set word_count = "+map.get(word)+" where word = '"+word+"'");
}else{
//insert
jdbcClient.executeSql("insert into wordcount values( '"+word+"',"+map.get(word)+")");
}
//collector.emit(new Values(word,map.get(word)));
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
//后面jdbc insert bolt直接把这里的输出写Mysql里去了,所以这里的fileds的名字要跟mysql表的字段名字对应
declarer.declare(new Fields("word","word_count"));
}
@Override
public void cleanup() {
connectionProvider.cleanup();
}
}
topology
package integration.jdbc;
import java.sql.Types;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.topology.TopologyBuilder;
/**
* @author liyijie
* @date 2018年6月13日上午1:01:08
* @email [email protected]
* @remark
* @version
*/
public class LocalWordCountStormJdbcTopology {
public static void main(String[] args) {
//本地模式,没有提交到服务器集群上,不需要搭建storm集群
LocalCluster cluster = new LocalCluster();
//TopologyBuilder根据spout和bolt来构建Topology
//storm中任何一个作业都是通过Topology方式进行提交的
//Topology中需要指定spout和bolt的执行顺序
TopologyBuilder tb = new TopologyBuilder();
tb.setSpout("DataSourceSpout", new WordCountSpout());
//SumBolt以随机分组的方式从DataSourceSpout中接收数据
tb.setBolt("CountBolt", new CountBolt()).shuffleGrouping("DataSourceSpout");
/**
Map hikariConfigMap = Maps.newHashMap();
hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource");
hikariConfigMap.put("dataSource.url", "jdbc:mysql://localhost/sid");
hikariConfigMap.put("dataSource.user","root");
hikariConfigMap.put("dataSource.password","Liyijie331");
ConnectionProvider connectionProvider = new HikariCPConnectionProvider(hikariConfigMap);
JdbcClient jdbcClient = new JdbcClient(connectionProvider, 30);
*/
/**写Mysql
//mysql的表名
String tableName = "wordcount";
JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(tableName, connectionProvider);
JdbcInsertBolt userPersistanceBolt = new JdbcInsertBolt(connectionProvider, simpleJdbcMapper)
.withTableName(tableName)
.withQueryTimeoutSecs(30);
tb.setBolt("JdbcInsertBolt", userPersistanceBolt).shuffleGrouping("CountBolt");
*/
//第一个参数是topology的名称,第三个参数是Topology
cluster.submitTopology("LocalWordCountStormJdbcTopology", new Config(), tb.createTopology());
}
}
结果