public class MyHDFSSupport {
private static final Log logger = LogFactory.getLog(MyHDFSSupport.class);
private static ThreadLocal<MyInformation> threadLocalInformations = new ThreadLocal<MyInformation>() {
public MyInformation initialValue() {
return new MyInformation();
}
};
public static void sendFlowEvent(String project, String event) throws URISyntaxException {
MyInformation information = threadLocalInformations.get();
synchronized (information) {// 所有的逻辑都在这里
information.checkHourEqual();// 1 跨小时的文件需要关闭
information.ensureOpen(project); // 2 开启
information.write(event); // 3 写文件
information.closeForFileSize();// 4 跨1G的文件需要关闭
}
}
}
=============
public class MyInformation {
private static final Log logger = LogFactory.getLog(MyInformation.class);
private static Configuration conf = null;
static {
conf = new Configuration();
conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
}
private static final ThreadLocal<SimpleDateFormat> HOURFORMAT = new ThreadLocal<SimpleDateFormat>() {
protected SimpleDateFormat initialValue() {
return new SimpleDateFormat("/yyyy/MM/dd/HH/");
}
};
private String hdfsMasterIP;
private String hdfsBackIP;
private FSDataOutputStream hdfsStream;
private String streamCreateHour;// 记录上面文件流写文件的小时
private long hdfsFileSize;
private long lastWriteTime;
public MyInformation() {
hdfsMasterIP = MonitorRpcHdfsConfig.getInstance().getHdfsIp();
hdfsBackIP = MonitorRpcHdfsConfig.getInstance().getHdfsIp1();
hdfsStream = null;
streamCreateHour = null;
hdfsFileSize = 0;
lastWriteTime = 0;
// 初始化时增加进入超时监控队列
MonitorQueue.addObject(this);
}
public void checkHourEqual() {
if (null == this.hdfsStream || null == this.streamCreateHour)
return;// 在合理的前提下
boolean hourEqual = HOURFORMAT.get().format(new Date()).equals(this.streamCreateHour);
if (!hourEqual) {// 不是同一个小时就关闭流
this.close(false);
}
}
public void ensureOpen(String projectName) {
// 在合理的前提下
if (null != this.hdfsStream)
return;
// 尝试打开
MonitorRpcHdfsConfig config = MonitorRpcHdfsConfig.getInstance();
final String dateStr = HOURFORMAT.get().format(new Date());
String path = "/BDM/" + projectName + dateStr + UUID.randomUUID().toString();
FileSystem hdfsFileSystem = null;
FSDataOutputStream fsStream = null;
try {
URI masterUri = new URI(config.getHdfsScheme() + this.hdfsMasterIP + ":" + config.getHdfsPort());
hdfsFileSystem = FileSystem.get(masterUri, conf);
fsStream = hdfsFileSystem.create(new Path(path), true);
} catch (Exception e) {
logger.error("通过master ip 获取 file system 失败", e);
try {
URI backupUri = new URI(config.getHdfsScheme() + this.hdfsBackIP + ":" + config.getHdfsPort());
hdfsFileSystem = FileSystem.get(backupUri, conf);
fsStream = hdfsFileSystem.create(new Path(path), true);
this.exchangeIP();
} catch (Exception e1) {
logger.error("通过slave ip 获取 file system 失败" + e1.toString());
}
}
// 保留结果
if (null != fsStream) {
this.hdfsStream = fsStream;
this.streamCreateHour = dateStr;
this.hdfsFileSize = 0;
this.lastWriteTime = System.currentTimeMillis();
}
}
public void write(String event) {
if (null == this.hdfsStream)//在合理的条件下
return;
try {//写文件
this.hdfsStream.writeBytes(event);
this.hdfsFileSize += event.length();
this.lastWriteTime = System.currentTimeMillis();
} catch (IOException e) {
this.close(true);
}
}
public void closeForFileSize() {
if (null == this.hdfsStream) {//在必要的前提下
return;
}
// 必要时关闭文件,更新内容大小
if (this.hdfsFileSize >= MonitorRpcHdfsConfig.getInstance().getHdfsFileMax()) {
this.close(false);
}
}
public void closeForWriteIdleTime() {
if (null == this.hdfsStream) {
return;
}
MonitorRpcHdfsConfig config = MonitorRpcHdfsConfig.getInstance();
if (System.currentTimeMillis() - this.lastWriteTime >= config.getHdfsFileOperationTimeSpan()) {
this.close(false);
}
}
private void close(boolean exception) {
// 重新设置stream
logger.debug("MyInformation.close() is invoked..." + (exception ? "写异常发生" : ""));
if (null != hdfsStream) {
try {
hdfsStream.close();
} catch (Exception e) {
logger.error("", e);
}
}
// 全部复原
hdfsStream = null;
streamCreateHour = null;
hdfsFileSize = (long) 0;
lastWriteTime = 0;
}
public void exchangeIP() {
String oldMaster = this.hdfsMasterIP;
String oldBackUp = this.hdfsBackIP;
this.hdfsMasterIP = oldBackUp;
this.hdfsBackIP = oldMaster;
}
}
===
public class MonitorQueue {
// poll: 若队列为空,返回null。
// remove:若队列为空,抛出NoSuchElementException异常。
// take:若队列为空,发生阻塞,等待有元素。
// put---无空间会等待
// add--- 满时,立即返回,会抛出异常
// offer---满时,立即返回,不抛异常
// private static final Logger logger =
// LoggerFactory.getLogger(MonitorQueue.class);
public static BlockingQueue<MyInformation> objectQueue = new LinkedBlockingQueue<MyInformation>();
public static void addObject(MyInformation obj) {
objectQueue.offer(obj);
}
public static MyInformation getObject() {
return objectQueue.poll();
}
static {
// 一旦有对象加入时,只启动一次检测线程
Runnable r = new MonitorRunnable();
new Thread(r).start();
}
}
===
public class MonitorRunnable implements Runnable {
private ArrayList<MyInformation> informations = new ArrayList<MyInformation>();
public MonitorRunnable() {
}
@Override
public void run() {
MonitorRpcHdfsConfig config = MonitorRpcHdfsConfig.getInstance();
while (true) {
// 然后再处理MonitorQueue里的对象
MyInformation myInfor = null;
while ((myInfor = MonitorQueue.getObject()) != null) {
// 添加到本地
informations.add(myInfor);
}
// 先遍历每一个ArrayList
for (MyInformation information : informations) {
// 尝试获取锁,获取锁后,写句柄是无法写入的
synchronized (information) {
information.closeForWriteIdleTime();
}
}
// 睡眠一段时间,防止过度占用写句柄
try {
Thread.sleep(config.getHdfsCheckFsstreamPeriod());
} catch (Exception e) {
e.printStackTrace();
}
}
}
}