//Register a TableSource
val kafkaTableSource = new KafkaCsvTableSource(
"foo",
properties,
new CsvRowDeserializationSchema(typeInfo),
typeInfo)
tableEnv.registerTableSource("KafkaCsvTable", kafkaTableSource)
val kafkaCsvTable = tableEnv.scan("KafkaCsvTable")
得到一个Table之后,就可以使用Table API,进行数据的过滤
val filterResult = kafkaCsvTable.where('imsi like "460%").select("imsi,lac,cell")
DataStream动态增加字段
将Table转换为DataStream
val dsRow: DataStream[Row] = tableEnv.toAppendStream(filterResult)
增加字段
val newDsRows = dsRow.map(row => {
val ret = new Row(row.getArity() + 2)
for(i <- 0 to row.getArity()-1) {
ret.setField(i, row.getField(i))
}
val isSpecifiedLocation = if(ret.getField(1).equals(ret.getField(2))) true else false
ret.setField(row.getArity(), isSpecifiedLocation)
ret.setField(row.getArity()+1, System.currentTimeMillis())
ret
})
再将新生成的DataStream注册为Table,进行最终的过滤
tableEnv.registerDataStream("newTable", newDsRows)
val newKafkaCsvTable = tableEnv.scan("newTable")
val newResult = newKafkaCsvTable.filter('isSpecifiedLocation === true).select("imsi,lac,cell,isSpecifiedLocation,timestamp")
Flink向kafka写数据
本文使用的是Flink提供的Kafka09JsonTableSink类直接将结果输出为json格式
val sink = new Kafka09JsonTableSink("bar", properties, new FlinkFixedPartitioner[Row])
newResult.writeToSink(sink)
测试用例
执行./bin/flink run -c com.woople.streaming.scala.examples.kafka.FlinkKafkaDemo /opt/flink-tutorials-1.0-bundle.jar
org.apache.flink.table.api.TableException: An input of GenericTypeInfo cannot be converted to Table. Please specify the type of the input with a RowTypeInfo.
在网上找到FLINK-6500,参考里面的方法,在代码中添加了这行代码之后,问题解决了
implicit val tpe: TypeInformation[Row] = new RowTypeInfo(types, names)
package com.woople.flink.streaming.connectors.kafka;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase;
import org.apache.flink.streaming.util.serialization.DeserializationSchema;
import org.apache.flink.table.sources.StreamTableSource;
import org.apache.flink.types.Row;
import org.apache.flink.util.Preconditions;
import java.util.Properties;
public class KafkaCsvTableSource implements StreamTableSource {
/** The Kafka topic to consume. */
private final String topic;
/** Properties for the Kafka consumer. */
private final Properties properties;
/** Deserialization schema to use for Kafka records. */
private final DeserializationSchema deserializationSchema;
/** Type information describing the result type. */
private final TypeInformation typeInfo;
/**
* Creates a generic Kafka {@link StreamTableSource}.
*
* @param topic Kafka topic to consume.
* @param properties Properties for the Kafka consumer.
* @param deserializationSchema Deserialization schema to use for Kafka records.
* @param typeInfo Type information describing the result type.
*/
public KafkaCsvTableSource(
String topic,
Properties properties,
DeserializationSchema deserializationSchema,
TypeInformation typeInfo) {
this.topic = Preconditions.checkNotNull(topic, "Topic");
this.properties = Preconditions.checkNotNull(properties, "Properties");
this.deserializationSchema = Preconditions.checkNotNull(deserializationSchema, "Deserialization schema");
this.typeInfo = Preconditions.checkNotNull(typeInfo, "Type information");
}
/**
* NOTE: This method is for internal use only for defining a TableSource.
* Do not use it in Table API programs.
*/
@Override
public DataStream getDataStream(StreamExecutionEnvironment env) {
// Version-specific Kafka consumer
FlinkKafkaConsumerBase kafkaConsumer = getKafkaConsumer(topic, properties, deserializationSchema);
return env.addSource(kafkaConsumer);
}
@Override
public TypeInformation getReturnType() {
return typeInfo;
}
/**
* Returns the version-specific Kafka consumer.
*
* @param topic Kafka topic to consume.
* @param properties Properties for the Kafka consumer.
* @param deserializationSchema Deserialization schema to use for Kafka records.
* @return The version-specific Kafka consumer
*/
private FlinkKafkaConsumerBase getKafkaConsumer(String topic, Properties properties, DeserializationSchema deserializationSchema) {
return new FlinkKafkaConsumer010(topic, deserializationSchema, properties);
}
/**
* Returns the deserialization schema.
*
* @return The deserialization schema
*/
protected DeserializationSchema getDeserializationSchema() {
return deserializationSchema;
}
@Override
public String explainSource() {
return "";
}
}
CsvRowDeserializationSchema.java
package com.woople.flink.streaming.connectors.kafka;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.streaming.util.serialization.DeserializationSchema;
import org.apache.flink.types.Row;
import org.apache.flink.util.Preconditions;
import java.io.IOException;
public class CsvRowDeserializationSchema implements DeserializationSchema {
/** Type information describing the result type. */
private final TypeInformation typeInfo;
/** Field names to parse. Indices match fieldTypes indices. */
private final String[] fieldNames;
/** Types to parse fields as. Indices match fieldNames indices. */
private final TypeInformation[] fieldTypes;
/** Flag indicating whether to fail on a missing field. */
private boolean failOnMissingField;
/**
* Creates a JSON deserialization schema for the given fields and types.
*
* @param typeInfo Type information describing the result type. The field names are used
* to parse the JSON file and so are the types.
*/
public CsvRowDeserializationSchema(TypeInformation typeInfo) {
Preconditions.checkNotNull(typeInfo, "Type information");
this.typeInfo = typeInfo;
this.fieldNames = ((RowTypeInfo) typeInfo).getFieldNames();
this.fieldTypes = ((RowTypeInfo) typeInfo).getFieldTypes();
}
@Override
public Row deserialize(byte[] message) throws IOException {
try {
String messages = new String(message);
String[] messagesArray = messages.split(",");
Row row = new Row(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) {
row.setField(i, messagesArray[i]);
}
return row;
} catch (Throwable t) {
throw new IOException("Failed to deserialize JSON object.", t);
}
}
@Override
public boolean isEndOfStream(Row nextElement) {
return false;
}
@Override
public TypeInformation getProducedType() {
return typeInfo;
}
/**
* Configures the failure behaviour if a JSON field is missing.
*
*
By default, a missing field is ignored and the field is set to null.
*
* @param failOnMissingField Flag indicating whether to fail or not on a missing field.
*/
public void setFailOnMissingField(boolean failOnMissingField) {
this.failOnMissingField = failOnMissingField;
}
}
FlinkKafkaDemo.scala
package com.woople.streaming.scala.examples.kafka
import java.util.Properties
import com.woople.flink.streaming.connectors.kafka.{CsvRowDeserializationSchema, KafkaCsvTableSource}
import org.apache.flink.api.common.typeinfo.{TypeInformation, Types}
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.connectors.kafka.Kafka09JsonTableSink
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.scala._
import org.apache.flink.types.Row
object FlinkKafkaDemo {
def main(args: Array[String]) {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tableEnv = TableEnvironment.getTableEnvironment(env)
val typeInfo = Types.ROW_NAMED(Array("imsi","lac","cell"), Types.STRING, Types.STRING, Types.STRING)
val properties = new Properties()
properties.setProperty("bootstrap.servers", "10.1.236.66:6667")
properties.setProperty("group.id", "test")
//Register a TableSource
val kafkaTableSource = new KafkaCsvTableSource(
"foo",
properties,
new CsvRowDeserializationSchema(typeInfo),
typeInfo)
tableEnv.registerTableSource("KafkaCsvTable", kafkaTableSource)
val kafkaCsvTable = tableEnv.scan("KafkaCsvTable")
val filterResult = kafkaCsvTable.where('imsi like "460%").select("imsi,lac,cell")
val dsRow: DataStream[Row] = tableEnv.toAppendStream(filterResult)
{
val types = Array[TypeInformation[_]](
Types.STRING,
Types.STRING,
Types.STRING,
Types.BOOLEAN,
Types.LONG)
val names = Array("imsi","lac","cell","isSpecifiedLocation","timestamp")
implicit val tpe: TypeInformation[Row] = new RowTypeInfo(types, names)
val newDsRows = dsRow.map(row => {
val ret = new Row(row.getArity() + 2)
for(i <- 0 to row.getArity()-1) {
ret.setField(i, row.getField(i))
}
val isSpecifiedLocation = if(ret.getField(1).equals(ret.getField(2))) true else false
ret.setField(row.getArity(), isSpecifiedLocation)
ret.setField(row.getArity()+1, System.currentTimeMillis())
ret
})
tableEnv.registerDataStream("newTable", newDsRows)
val newKafkaCsvTable = tableEnv.scan("newTable")
val newResult = newKafkaCsvTable.filter('isSpecifiedLocation === true).select("imsi,lac,cell,isSpecifiedLocation,timestamp")
val sink = new Kafka09JsonTableSink("bar", properties, new FlinkFixedPartitioner[Row])
newResult.writeToSink(sink)
env.execute("Flink kafka demo")
}
}
}
原题链接:#137 Single Number II
要求:
给定一个整型数组,其中除了一个元素之外,每个元素都出现三次。找出这个元素
注意:算法的时间复杂度应为O(n),最好不使用额外的内存空间
难度:中等
分析:
与#136类似,都是考察位运算。不过出现两次的可以使用异或运算的特性 n XOR n = 0, n XOR 0 = n,即某一
A message containing letters from A-Z is being encoded to numbers using the following mapping:
'A' -> 1
'B' -> 2
...
'Z' -> 26
Given an encoded message containing digits, det