一、Flink DateSet定制API详解(JAVA版) -003
Reduce
以element为粒度,对element进行合并操作。最后只能形成一个结果。
执行程序:
package code.book.batch.dataset.advance.api;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
public class ReduceFunction001java {
public static void main(String[] args) throws Exception {
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet text = env.fromElements(1, 2, 3, 4, 5, 6,7);
DataSet text2 = text.reduce(new ReduceFunction() {
@Override
public Integer reduce(Integer intermediateResult, Integer next) throws Exception {
return intermediateResult + next;
}
});
text2.print();
DataSet text3 = text.reduce(new ReduceFunction() {
@Override
public Integer reduce(Integer intermediateResult, Integer next) throws Exception {
return intermediateResult * next;
}
});
text3.print();
DataSet text4 = text.reduce(new ReduceFunction() {
@Override
public Integer reduce(Integer intermediateResult, Integer next) throws Exception {
if (intermediateResult % 2 == 0) {
return intermediateResult + next;
} else {
return intermediateResult * next;
}
}
});
text4.print();
DataSet text5 = text.reduce(new ReduceFunction() {
@Override
public Integer reduce(Integer intermediateResult, Integer next) throws Exception {
System.out.println("intermediateResult=" + intermediateResult + " ,next=" + next);
return intermediateResult + next;
}
});
text5.collect();
}
}
执行结果:
text2.print()
28
text3.print()
5040
text4.print()
157
text5.print()
intermediateResult=1 ,next=2
intermediateResult=3 ,next=3
intermediateResult=6 ,next=4
intermediateResult=10 ,next=5
intermediateResult=15 ,next=6
intermediateResult=21 ,next=7
reduceGroup
对每一组的元素分别进行合并操作。与reduce类似,不过它能为每一组产生一个结果。
如果没有分组,就当作一个分组,此时和reduce一样,只会产生一个结果。
执行程序:
package code.book.batch.dataset.advance.api;
import org.apache.flink.api.common.functions.GroupReduceFunction;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;
import java.util.Iterator;
public class GroupReduceFunction001java {
public static void main(String[] args) throws Exception {
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet text = env.fromElements(1, 2, 3, 4, 5, 6, 7);
DataSet text2 = text.reduceGroup(new GroupReduceFunction() {
@Override
public void reduce(Iterable iterable,
Collector collector) throws Exception {
int sum = 0;
Iterator itor = iterable.iterator();
while (itor.hasNext()) {
sum += itor.next();
}
collector.collect(sum);
}
});
text2.print();
DataSet> text3 = text.reduceGroup(
new GroupReduceFunction>() {
@Override
public void reduce(Iterable iterable,
Collector> collector)throws Exception {
int sum0 = 0;
int sum1 = 0;
Iterator itor = iterable.iterator();
while (itor.hasNext()) {
int v = itor.next();
if (v % 2 == 0) {
sum0 += v;
} else {
sum1 += v;
}
}
collector.collect(new Tuple2(sum0, sum1));
}
});
text3.print();
DataSet> data = env.fromElements(
new Tuple2("zhangsan", 1000), new Tuple2("lisi", 1001),
new Tuple2("zhangsan", 3000), new Tuple2("lisi", 1002));
DataSet> data2 = data.groupBy(0).reduceGroup(
new GroupReduceFunction, Tuple2>() {
@Override
public void reduce(Iterable> iterable,
Collector> collector) throws Exception {
int salary = 0;
String name = "";
Iterator> itor = iterable.iterator();
while (itor.hasNext()) {
Tuple2 t = itor.next();
name = t.f0;
salary += t.f1;
}
collector.collect(new Tuple2(name, salary));
}
});
data2.print();
}
}
执行结果:
text3.print()
28
text3.print()
(12,16)
data2.print
(lisi,2003)
(zhangsan,4000)