Flink 自定义Sink 之 写入HDFS

一、pom.xml添加依赖。


  org.apache.flink
  flink-connector-filesystem_${scala.binary.version}
  ${flink.version}



    org.apache.hadoop
    hadoop-common
    ${hadoop.version}
    provided


    org.apache.hadoop
    hadoop-hdfs
    ${hadoop.version}
    provided
    
        
            xml-apis
            xml-apis
        
    

二、HDFS帮助类。

object HdfsUtil {
  private val LOGGER: Logger = LoggerFactory.getLogger(HdfsUtil.getClass)

  /**
    * 是否目录
    * */
  def isDir(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.isDirectory(new Path(path))
  }

  /**
    * 是否目录
    * */
  def isDir(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.isDirectory(path)
  }

  /**
    * 是否文件
    * */
  def isFile(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.isFile(new Path(path))
  }

  /**
    * 是否文件
    * */
  def isFile(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.isFile(path)
  }

  /**
    * 创建文件
    * */
  def createFile(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.createNewFile(new Path(path))
  }

  /**
    * 创建文件
    * */
  def createFile(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.createNewFile(path)
  }

  /**
    * 创建目录
    * */
  def createDir(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.mkdirs(new Path(path))
  }

  /**
    * 创建目录
    * */
  def createDir(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.mkdirs(path)
  }

  /**
    * 文件是否存在
    * */
  def exists(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.exists(new Path(path))
  }

  /**
    * 文件是否存在
    * */
  def exists(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.exists(path)
  }

  /**
    * 删除
    * */
  def delete(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.delete(new Path(path), true)
  }

  /**
    * 删除
    * */
  def delete(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.delete(path, true)
  }

  /**
    * 追加写入
    * */
  def append(hdfs: FileSystem, path: String, content: String): Boolean = {
    if(StringUtils.isEmpty(path) || content == null) {
      return false
    }

    if(!exists(hdfs, path)) {
      createFile(hdfs, path)
    }
    hdfs.getConf.setBoolean("dfs.support.append", true)

    try {
      val append = hdfs.append(new Path(path))
      append.write(content.getBytes("UTF-8"))
      append.write(10) // 换行
      append.flush()
      append.close()
      true
    } catch {
      case e: Exception => {
        LOGGER.error(s"append file exception, path{$path}, content{$content}", e)
        false
      }
    }
  }

  /**
    * 读取文件
    * */
  def read(hdfs: FileSystem, file: String): Array[Byte] = {
    val result = new Array[Byte](0)
    if(StringUtils.isEmpty(file)) {
      return result
    }
    if(exists(hdfs, file)) {
      return result
    }

    var isr: InputStreamReader = null
    var br: BufferedReader = null
    try {
      val path = new Path(file)
      val inputStream = hdfs.open(path)
      isr = new InputStreamReader(inputStream)
      br = new BufferedReader(isr)

      var content = new StringBuilder
      var line: String = br.readLine()
      while (line != null) {
        content ++= line
        line = br.readLine()
      }

      br.close()
      isr.close()

      content.toString().getBytes("UTF-8")
    } catch {
      case e: Exception => {
        LOGGER.error(s"read file exception, file{$file}", e)
        result
      }
    } finally {
      try {
        isr.close()
      } catch {
        case _: Exception => {}
      }
      try {
        br.close()
      } catch {
        case _: Exception => {}
      }
    }
  }

  /**
    * 上传本地文件
    * */
  def uploadLocalFile(hdfs: FileSystem, localPath: String, hdfsPath: String): Boolean = {
    if(StringUtils.isEmpty(localPath) || StringUtils.isEmpty(hdfsPath)) {
      return false
    }

    val src = new Path(localPath)
    val dst = new Path(hdfsPath)
    hdfs.copyFromLocalFile(src, dst)
    true
  }

  /**
    * 列出目录下所有文件
    * */
  def list(hdfs: FileSystem, path: String): List[String] = {
    val list: List[String] = new ArrayList[String]()
    if(StringUtils.isEmpty(path)) {
      return list
    }

    val stats = hdfs.listStatus(new Path(path))
    for(i <- 0 to stats.length-1) {
      if(stats(i).isFile) {
        // path.getName,只是文件名,不包括路径
        // path.getParent,只是父文件的文件名,不包括路径
        // path.toString,完整的路径名
        list.add(stats(i).getPath.toString)
      } else if(stats(i).isDirectory) {
        list.add(stats(i).getPath.toString)
      } else if(stats(i).isSymlink) {
        list.add(stats(i).getPath.toString)
      }
    }

    list
  }

三、自定义Sink。

import java.util.{Date, Properties}

import com.igg.flink.tool.common.Constant
import com.igg.flink.tool.member.bean.MemberLogInfo
import com.igg.flink.tool.utils.{DateUtil, FileUtil, HdfsUtil}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.sink.{RichSinkFunction, SinkFunction}
import org.apache.hadoop.fs.FileSystem

class HdfsSink extends RichSinkFunction[MemberLogInfo] {
  private val serialVersionUID = 10000L

  var default_fs: String = null
  var conf: org.apache.hadoop.conf.Configuration = null
  var fs: FileSystem = null
  var rabbitmqProps: Properties = null

  override def open(parameters: Configuration): Unit = {
    // 加载配置文件
    rabbitmqProps = FileUtil.loadProperties(Constant.CONFIG_NAME_RABBITMQ)
    default_fs = rabbitmqProps.getProperty("hdfs.fs.default.fs")

    conf = new org.apache.hadoop.conf.Configuration()
    // 指定使用 hdfs 系统, namenode 所在节点
    // 9000端口用于Filesystem metadata operations
    conf.set("fs.defaultFS", default_fs)
    // 禁用 hdfs 的fs等缓存
    // conf.setBoolean("fs.hdfs.impl.disable.cache", true)
    fs = FileSystem.get(conf)

    // 初始化目录
    HdfsUtil.createDir(fs, rabbitmqProps.getProperty("hdfs.save.path.login"))
  }

  override def invoke(value: MemberLogInfo, context: SinkFunction.Context[_]): Unit = {
    // 文件名
    val fileName = DateUtil.format(new Date(value.getTimestamp), DateUtil.yyyyMMddSpt) + ".log"
    // 输出内容
    val content = value.getIggid + "\t" + value.getType

    HdfsUtil.append(fs, rabbitmqProps.getProperty("hdfs.save.path.login") + fileName, content)
  }

  override def close(): Unit = {
    if (fs != null) {
      try {
        fs.close()
      } catch {
        case e: Exception => {}
      }
    }
  }
}

四、总结。

(1)自定义sink,只需继承 SinkFunction 或 RichSinkFunction 即可。区别在于 RichSinkFunction 提供了更加丰富的方法,如open(), close()等方法。

(2)写入hdfs,使用了原生的hdfs接口。只适合单线程,不适合多线程写入。

(3)如果要多线程写入hdfs,推荐使用 StreamFileSink 或 BucketingSink 写入HDFS。如何使用,可查阅我之前写的文章。

【一起学习】

你可能感兴趣的:(Flink,flink,自定义,sink,hdfs)