package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
/**
* Flink程序的重启策略
*
* 1.固定重启次数,每一次延迟指定的时间
* 2.重启的次数不会超过指定的次数,如果超过,程序退出。
*/
public class RestartStrategyDemo1 {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//设置重启策略,严格的说不是JobManager或者TaskManager重启,而是TaskManager中的subTask重启
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(5)));//每次异常5秒后重启,最多对3次异常进行处理,第4次异常会退出
//虽然设置了重启策略,但是异常之前的数据(中间结果)由于没有进行checkpointing,异常重启后会丢失异常之前的数据
DataStreamSource lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator> wordAndOne = lines.flatMap(new FlatMapFunction>() {
@Override
public void flatMap(String line, Collector> out) throws Exception {
if (line.startsWith("error")) {
throw new RuntimeException("有错误数据出现,抛出异常!");
}
String[] words = line.split(" ");
for (String word : words) {
out.collect(Tuple2.of(word, 1));
}
}
});
wordAndOne.keyBy(t -> t.f0).sum(1).print();
env.execute();
//每次重启,中间结果会丢失
}
}
package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
/**
* Flink程序的重启策略
*
* 如果程序开启的Checkpointing,默认的重启策略是无限重启!
*/
public class RestartStrategyDemo2 {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//开启checkpoint,5秒钟进行一次checkpoint,即5秒钟会将状态保存到statebackend中(默认是保存到jobManager的内存中)
env.enableCheckpointing(5000);
DataStreamSource lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator> wordAndOne = lines.flatMap(new FlatMapFunction>() {
@Override
public void flatMap(String line, Collector> out) throws Exception {
if (line.startsWith("error")) {
throw new RuntimeException("有错误数据出现,抛出异常!");
}
String[] words = line.split(" ");
for (String word : words) {
out.collect(Tuple2.of(word, 1));
}
}
});
wordAndOne.keyBy(t -> t.f0).sum(1).print();
env.execute();
//每次重启,中间结果不会丢失
}
}
指定时间段内可以重启的次数,每个时间段内重启次数限定,到了下一个时间段,重启次数更新。
package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
/**
* Flink程序的重启策略
*
* 错误率重启策略(在一段时间内可也重启指定的次数,如果超过时间范围,重新计数)
*/
public class RestartStrategyDemo3 {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//30秒内最多可也重启3次,每次重启延迟2秒
env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.seconds(30), Time.seconds(2)));
DataStreamSource lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator> wordAndOne = lines.flatMap(new FlatMapFunction>() {
@Override
public void flatMap(String line, Collector> out) throws Exception {
if (line.startsWith("error")) {
throw new RuntimeException("有错误数据出现,抛出异常!");
}
String[] words = line.split(" ");
for (String word : words) {
out.collect(Tuple2.of(word, 1));
}
}
});
wordAndOne.keyBy(t -> t.f0).sum(1).print();
env.execute();
}
}
Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
不需要关心Key,只需要关心Value
对于valueState,flink自身的状态功能是在调了keyBy算子后的keyedStream再调sum或reduce算子实现的
这里不调sum或者value算子,而是自定义api实现功能
package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
/**
* 将ValueState的底层实现
*
* Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
*
* ValueState是KeyedState中的一种
*
*/
public class ValueStateDemo1 {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//设置重启策略
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 5000));
DataStreamSource lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator> wordAndOne = lines.flatMap(new FlatMapFunction>() {
@Override
public void flatMap(String line, Collector> out) throws Exception {
if (line.startsWith("error")) {
throw new RuntimeException("有错误数据出现,抛出异常!");
}
String[] words = line.split(" ");
for (String word : words) {
out.collect(Tuple2.of(word, 1));
}
}
});
KeyedStream, String> keyedStream = wordAndOne.keyBy(t -> t.f0);
SingleOutputStreamOperator> res = keyedStream.map(new MapFunction, Tuple2>() {
private Integer counter = 0;
@Override
public Tuple2 map(Tuple2 tp) throws Exception {
Integer current = tp.f1;
counter += current;
tp.f1 = counter;
return tp;
}
});
res.print();
env.execute();
//in:
//spark
//spark
//flink
//flink
//flink
//flink
//hive
//out:
//1>(spark,1)
//1>(spark,2)
//4>(flink,1)
//4>(flink,2)
//4>(flink,3)
//4>(flink,4)
//1>(hive,3)
//对于同一个分区不同组的数据,这个自定义api没有办法分辨
//重启后垃圾处理器回收counter变量,数据清零
//为了区分同分区内的不同组,尝试用map结构来代替Integer
}
}
为了区分同分区内的不同组,尝试用map结构来代替Integer
package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
import java.util.HashMap;
import java.util.Map;
/**
* 将ValueState的底层实现
*
* Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
*
* ValueState是KeyedState中的一种
*
* 1.KeyedState底层是一个Map结构
* 2.如果想要容错,必须开启checkpointing,并且按照Flink的状态编程API进行编程(将中间结果保存都Flink特殊的变量中)
*
*/
public class ValueStateDemo2 {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//开启checkpoint
env.enableCheckpointing(5000);
//设置重启策略
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 5000));
DataStreamSource lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator> wordAndOne = lines.flatMap(new FlatMapFunction>() {
@Override
public void flatMap(String line, Collector> out) throws Exception {
if (line.startsWith("error")) {
throw new RuntimeException("有错误数据出现,抛出异常!");
}
String[] words = line.split(" ");
for (String word : words) {
out.collect(Tuple2.of(word, 1));
}
}
});
KeyedStream, String> keyedStream = wordAndOne.keyBy(t -> t.f0);
SingleOutputStreamOperator> res = keyedStream.map(new MapFunction, Tuple2>() {
private Map counter = new HashMap<>();
@Override
public Tuple2 map(Tuple2 tp) throws Exception {
String word = tp.f0;
Integer current = tp.f1;
Integer historyCount = counter.get(word);
if (historyCount == null) {
historyCount = 0;
}
int sum = historyCount + current;
//更新数据
counter.put(word, sum);
//输出数据
tp.f1 = sum;
return tp;
}
});
res.print();
env.execute();
}
}
可以正常区分同分区不同组的数据了,但是不论是否开启checkpointing重启后数据都会丢失
普通的map数据结构,flink不认识,没有办法存数据。还是需要使用flink的api
package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
import java.util.HashMap;
import java.util.Map;
/**
* 将ValueState的底层实现
*
* Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
*
* ValueState是KeyedState中的一种
*
* 1.KeyedState底层是一个Map结构
* 2.如果想要容错,必须开启checkpointing,并且按照Flink的状态编程API进行编程(将中间结果保存都Flink特殊的变量中)
*
* 使用Flink的ValueState编程API实现WordCount的功能
*
*/
public class ValueStateDemo3 {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//开启checkpoint
env.enableCheckpointing(5000);
//设置重启策略
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 5000));
DataStreamSource lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator> wordAndOne = lines.flatMap(new FlatMapFunction>() {
@Override
public void flatMap(String line, Collector> out) throws Exception {
if (line.startsWith("error")) {
throw new RuntimeException("有错误数据出现,抛出异常!");
}
String[] words = line.split(" ");
for (String word : words) {
out.collect(Tuple2.of(word, 1));
}
}
});
KeyedStream, String> keyedStream = wordAndOne.keyBy(t -> t.f0);
SingleOutputStreamOperator> res = keyedStream.map(new RichMapFunction, Tuple2>() {
private ValueState valueState;//valueState不用管key,与key相关的操作都在内部实现了
//前边代码块使用一个map来存数据,这里却只有一个integer,感觉上没有和key绑定在一起,但是实际上由于使用了keyedState,存取数据都会和对应的key绑定
//在open方法中初始化状态或恢复状态
@Override
public void open(Configuration parameters) throws Exception {
//定义状态描述器(描述状态的类型、名称)
ValueStateDescriptor stateDescriptor = new ValueStateDescriptor<>("wc-state", Integer.class);//如果这里包含泛型,那就需要使用typeInformation.of(new type)
//初始化或恢复状态(在状态存储的地方读状态)
valueState = getRuntimeContext().getState(stateDescriptor);
}
@Override
public Tuple2 map(Tuple2 input) throws Exception {
//String word = input.f0; //这个key没有用处,update内部能直接获取key
Integer current = input.f1;
//看似没有根据key来取,实际上内部会获取当前的key,根据当前的key取出对应的value
Integer history = valueState.value();
if (history == null) {
history = 0;
}
current += history;
//更新状态
valueState.update(current);
//输出数据
input.f1 = current;
return input;
}
});
res.print();
env.execute();
}
}
即使使用了flink的状态编程API,不设置checkpointing也不能保存状态,重启后数据清零。所以一定要开启checkpointing,来存储中间状态。
不需要关心Key,只需要关心Map
package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
/**
* 将ValueState的底层实现
*
* Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
*
* ValueState是KeyedState中的一种
*
* 1.KeyedState底层是一个Map结构
* 2.如果想要容错,必须开启checkpointing,并且按照Flink的状态编程API进行编程(将中间结果保存都Flink特殊的变量中)
*
* ValueState : Map
* MapState : Map>
* ListState : Map>
*
*/
public class MapStateDemo {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//开启checkpoint
env.enableCheckpointing(5000);
//辽宁省,沈阳市,3000
//辽宁省,大连市,4000
//辽宁省,鞍山市,4000
//河北省,廊坊市,2000
//河北省,邢台市,3000
//河北省,石家庄市,2000
DataStreamSource lines = env.socketTextStream("localhost", 8888, "\n", 5);
//对数据进行整理
SingleOutputStreamOperator> tpStream = lines.map(new MapFunction>() {
@Override
public Tuple3 map(String line) throws Exception {
String[] fields = line.split(",");
String province = fields[0];
String city = fields[1];
int money = Integer.parseInt(fields[2]);
return Tuple3.of(province, city, money);
}
});
//按照省份进行keyBy,将同一个省份的数据分到同一个分区中,并且按照城市累加金额
//按照(省份,城市)keyBy的话,(省份,城市)可能导致同省份进入到不同的分区里
KeyedStream, String> keyedStream = tpStream.keyBy(t -> t.f0);
SingleOutputStreamOperator> res = keyedStream.map(new CityMoneyFunction());
res.print();
env.execute();
}
private static class CityMoneyFunction extends RichMapFunction, Tuple3> {
private MapState mapState;
@Override
public void open(Configuration parameters) throws Exception {
//定义MapStateDescriptor
MapStateDescriptor stateDescriptor = new MapStateDescriptor<>("city-money-state", String.class, Integer.class);
//初始化或恢复状态
mapState = getRuntimeContext().getMapState(stateDescriptor);
}
@Override
public Tuple3 map(Tuple3 input) throws Exception {
String city = input.f1;
Integer money = input.f2;
Integer history = mapState.get(city);//根据小key取小value
if (history == null) {
history = 0;
}
money += history;
//更新状态
mapState.put(city, money);
//输出数据
input.f2 = money;
return input;
}
}
}
不需要关心Key,只需要关心List
package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.ArrayList;
import java.util.List;
/**
* 将ValueState的底层实现
*
* Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
*
* ValueState是KeyedState中的一种
*
* 1.KeyedState底层是一个Map结构
* 2.如果想要容错,必须开启checkpointing,并且按照Flink的状态编程API进行编程(将中间结果保存都Flink特殊的变量中)
*
* ValueState : Map
* MapState : Map>
* ListState : Map>
*
*/
public class ListStateDemo {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//开启checkpoint
env.enableCheckpointing(5000);
//将同一个用户,最近的10个行为保存起来
//u001,view
//u001,pay
//u002,view
//u002,view
DataStreamSource lines = env.socketTextStream("localhost", 8888, "\n", 5);
SingleOutputStreamOperator> tpStream = lines.map(new MapFunction>() {
@Override
public Tuple2 map(String value) throws Exception {
if (value.startsWith("error")) {
throw new RuntimeException("数据出问题了!");
}
String[] fields = value.split(",");
String uid = fields[0];
String event = fields[1];
return Tuple2.of(uid, event);
}
});
KeyedStream, String> keyedStream = tpStream.keyBy(t -> t.f0);
//将用一个用户的行为数据按照先后顺序保存起来
SingleOutputStreamOperator>> res = keyedStream.map(new UserEventFunction());
res.print();
env.execute();
}
private static class UserEventFunction extends RichMapFunction, Tuple2>> {
private ListState listState;
@Override
public void open(Configuration parameters) throws Exception {
//定义状态描述器
ListStateDescriptor stateDescriptor = new ListStateDescriptor<>("event-state", String.class);
//初始化或恢复状态
listState = getRuntimeContext().getListState(stateDescriptor);
}
@Override
public Tuple2> map(Tuple2 input) throws Exception {
String event = input.f1;
listState.add(event);
ArrayList events = (ArrayList) listState.get();
if (events.size() > 10) {
events.remove(0);
}
return Tuple2.of(input.f0, events);
// events是内部的引用,不需要更新
}
}
}
输入如下数据:(字段含义,用户ID,活动ID),活动可以重复览次
user01,activity01,view
user01,activity01,join
user01,activity02,view
user02,activity02,view
user02,activity02,view
user03,activity02,view
user02,activity02,join
user03,activity01,view
实时统计出各个活动,各种事件的次数和人数(次数出现就累计,人数要按照用户ID去重)
activity01,view,2,2
activity01,join,1,1
activity02,view,4,3
activity02,join,1,1
package cn._51doit.flink.day06;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.api.java.tuple.Tuple4;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;
import java.util.HashSet;
/**
* 使用Flink的状态,统计各个活动,各种事件的次数和人数
*
*/
public class ActivityCount {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(10000);
DataStreamSource lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator> tpStream = lines.map(new MapFunction>() {
@Override
public Tuple3 map(String value) throws Exception {
String[] fields = value.split(",");
String uid = fields[0];
String aid = fields[1];
String event = fields[2];
return Tuple3.of(uid, aid, event);
}
});
//按照活动ID和事件联合起来KeyBy
KeyedStream, Tuple2> keyedStream = tpStream.keyBy(new KeySelector, Tuple2>() {
@Override
public Tuple2 getKey(Tuple3 value) throws Exception {
return Tuple2.of(value.f1, value.f2);
}
});
SingleOutputStreamOperator> res = keyedStream.process(new ActivityCountFunction());
//process的作用和map相似,map是输入一条输出一条,process输入一条可以输出多条。像输出就collect一下
res.print();
env.execute();
}
private static class ActivityCountFunction extends KeyedProcessFunction, Tuple3, Tuple4> {
//泛型是 Key、输入和输出。Key是活动id和事件的联合,In是tp3,Out是时间联合+事件数+人数
private ValueState countState;
private ValueState> uidState;
@Override
public void open(Configuration parameters) throws Exception {
//初始化状态
//记录次数的状态
ValueStateDescriptor countStateDescriptor = new ValueStateDescriptor<>("count-state", Integer.class);
countState = getRuntimeContext().getState(countStateDescriptor);
//记录uid的状态(人数) 用HashSet去重,不可重复性,后边的用户ID会覆盖前边的用户id
ValueStateDescriptor> uidStateDescriptor = new ValueStateDescriptor<>("uid-state", TypeInformation.of(new TypeHint>() {}));
uidState = getRuntimeContext().getState(uidStateDescriptor);
}
@Override
public void processElement(Tuple3 input, Context ctx, Collector> out) throws Exception {
String uid = input.f0;
//统计次数
Integer history = countState.value();
if (history == null) {
history = 0;
}
int count = history + 1;
//更新状态
countState.update(count);
//统计人数
HashSet set = uidState.value();
if (set == null) {
set = new HashSet<>();
}
set.add(uid);
uidState.update(set);
//输出数据
out.collect(Tuple4.of(ctx.getCurrentKey().f0, ctx.getCurrentKey().f1, count, set.size()));
}
}
}