flink当中对于实时处理,有很多的算子,我们可以来看看常用的算子主要有哪些,dataStream当中的算子主要分为三大类,
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
object FlinkUnion {
def main(args: Array[String]): Unit = {
val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
import org.apache.flink.api.scala._
//获取第一个dataStream
val firstStream: DataStream[String] = environment.fromElements("hello world","test scala")
//获取第二个dataStream
val secondStream: DataStream[String] = environment.fromElements("second test","spark flink")
//将两个流进行合并起来
val unionAll: DataStream[String] = firstStream.union(secondStream)
//结果不做任何处理
val unionResult: DataStream[String] = unionAll.map(x => {
// println(x)
x
})
//调用sink算子,打印输出结果
unionResult.print().setParallelism(1)
//开始运行
environment.execute()
}
}
import org.apache.flink.streaming.api.scala.{ConnectedStreams, DataStream, StreamExecutionEnvironment}
object FlinkConnect {
def main(args: Array[String]): Unit = {
//获取程序入口类
val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
//导入隐式转换的包
import org.apache.flink.api.scala._
//定义string类型的dataStream
val strStream: DataStream[String] = environment.fromElements("hello world","abc test")
//定义int类型的dataStream
val intStream: DataStream[Int] = environment.fromElements(1,2,3,4,5)
//两个流进行connect操作
val connectedStream: ConnectedStreams[String, Int] = strStream.connect(intStream)
//通过map对数据进行处理,传入两个函数
val connectResult: DataStream[Any] = connectedStream.map(x =>{ x + "abc"},y =>{ y * 2 })
connectResult.print().setParallelism(1)
environment.execute("connect stream")
}
}
import java.{lang, util}
import org.apache.flink.streaming.api.collector.selector.OutputSelector
import org.apache.flink.streaming.api.scala.{DataStream, SplitStream, StreamExecutionEnvironment}
object FlinkSplit {
def main(args: Array[String]): Unit = {
val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
import org.apache.flink.api.scala._
//获取第一个dataStream
val resultDataStream: DataStream[String] = environment.fromElements("hello world","test spark","spark flink")
//通过split来对我们的流进行切分操作
val splitStream: SplitStream[String] = resultDataStream.split(new OutputSelector[String] {
override def select(out: String): lang.Iterable[String] = {
val strings = new util.ArrayList[String]()
if (out.contains("hello")) {
//如果包含hello,那么我们就给这个流起名字叫做hello
strings.add("hello")
} else {
strings.add("other")
}
strings
}
})
//对我么的stream进行选择
val helloStream: DataStream[String] = splitStream.select("hello")
//打印包含hello的所有的字符串
helloStream.print().setParallelism(1)
environment.execute()
}
}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
object FlinkPartition {
def main(args: Array[String]): Unit = {
val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
import org.apache.flink.api.scala._
val dataStream: DataStream[String] = environment.fromElements("hello world","test spark","abc hello","hello flink")
val resultStream: DataStream[(String, Int)] = dataStream.filter(x => x.contains("hello"))
// .shuffle //随机的重新分发数据,上游的数据,随机的发送到下游的分区里面去
// .rescale
.rebalance //对数据重新进行分区,涉及到shuffle的过程
.flatMap(x => x.split(" "))
.map(x => (x, 1))
.keyBy(0)
.sum(1)
resultStream.print().setParallelism(1)
environment.execute()
}
}
import org.apache.flink.api.common.functions.Partitioner
class MyPartitioner extends Partitioner[String]{
override def partition(word: String, num: Int): Int = {
println("分区个数为" + num)
if(word.contains("hello")){
0
}else{
1
}
}
}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
object FlinkCustomerPartition {
def main(args: Array[String]): Unit = {
val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
//设置我们的分区数,如果不设置,默认使用CPU核数作为分区个数
environment.setParallelism(2)
import org.apache.flink.api.scala._
//获取dataStream
val sourceStream: DataStream[String] = environment.fromElements("hello world","spark flink","hello world","hive hadoop")
val rePartition: DataStream[String] = sourceStream.partitionCustom(new MyPartitioner,x => x +"")
rePartition.map(x =>{
println("数据的key为" + x + "线程为" + Thread.currentThread().getId)
x
})
rePartition.print()
environment.execute()
}
}
<dependency>
<groupId>org.apache.bahirgroupId>
<artifactId>flink-connector-redis_2.11artifactId>
<version>1.0version>
dependency>
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.connectors.redis.RedisSink
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig
import org.apache.flink.streaming.connectors.redis.common.mapper.{RedisCommand, RedisCommandDescription, RedisMapper}
object Stream2Redis {
def main(args: Array[String]): Unit = {
//获取程序入口类
val executionEnvironment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
import org.apache.flink.api.scala._
//组织数据
val streamSource: DataStream[String] = executionEnvironment.fromElements("hello world","key value")
//将数据包装成为key,value对形式的tuple
val tupleValue: DataStream[(String, String)] = streamSource.map(x =>(x.split(" ")(0),x.split(" ")(1)))
val builder = new FlinkJedisPoolConfig.Builder
builder.setHost("node03")
builder.setPort(6379)
builder.setTimeout(5000)
builder.setMaxTotal(50)
builder.setMaxIdle(10)
builder.setMinIdle(5)
val config: FlinkJedisPoolConfig = builder.build()
//获取redis sink
val redisSink = new RedisSink[Tuple2[String,String]](config,new MyRedisMapper)
//使用我们自定义的sink
tupleValue.addSink(redisSink)
//执行程序
executionEnvironment.execute("redisSink")
}
}
class MyRedisMapper extends RedisMapper[Tuple2[String,String]]{
override def getCommandDescription: RedisCommandDescription = {
new RedisCommandDescription(RedisCommand.SET)
}
override def getKeyFromData(data: (String, String)): String = {
data._1
}
override def getValueFromData(data: (String, String)): String = {
data._2
}
}