SparkSQL统计新增用户

原始数据,放置在IDEA的data文件夹下的log1.txt

192.168.33.6,hunter,2017-09-15 10:30:20,/a
192.168.33.7,hunter,2017-09-15 10:30:26,/b
192.168.33.6,jack,2017-09-15 10:30:27,/a
192.168.33.8,tom,2017-09-15 10:30:28,/b
192.168.33.9,rose,2017-09-15 10:30:30,/b
192.168.33.10,julia,2017-09-15 10:30:40,/c
192.168.33.16,hunter,2017-09-16 10:30:20,/a
192.168.33.18,jerry,2017-09-16 10:30:30,/b
192.168.33.26,jack,2017-09-16 10:30:40,/a
192.168.33.18,polo,2017-09-16 10:30:50,/b
192.168.33.39,nissan,2017-09-16 10:30:53,/b
192.168.33.39,nissan,2017-09-16 10:30:55,/a
192.168.33.39,nissan,2017-09-16 10:30:58,/c
192.168.33.20,ford,2017-09-16 10:30:54,/c
192.168.33.46,hunter,2017-09-17 10:30:21,/a
192.168.43.18,jerry,2017-09-17 10:30:22,/b
192.168.43.26,tom,2017-09-17 10:30:23,/a
192.168.53.18,bmw,2017-09-17 10:30:24,/b
192.168.63.39,benz,2017-09-17 10:30:25,/b
192.168.33.25,haval,2017-09-17 10:30:30,/c
192.168.33.10,julia,2017-09-17 10:30:40,/c

通过SparkSQL读取文件,注册成一张零食表 log1
TODO 统计每日日活用户(只要当天有一条访问记录的用户[取最早的一条访问记录]),对用户去重

select
ip,uid,access_time,url,day
from
(
    select
    ip,uid,access_time,url,day,
    row_number() over(partition by uid,day order by access_time) rn
    from
    (select
    ip,uid,access_time,url,
    date_format(access_time,'yyyy-MM-dd') day
    from log1
    ) t
    order by day
) t1
where rn=1

结果
2017-09-15活跃用户:hunter,tom,julia,jack,rose
2017-09-16活跃用户:hunter,nissan,jack,ford,polo,jerry
2017-09-17活跃用户:jerry,julia,haval,tom,hunter,benz,bmw
可以通过这个继续求留存用户、留存率

留存用户:2017-09-15登录了应用,2017-09-16继续登录了应用的用户

留存率:留存用户占新增用户(活跃用户)的比例

+------+----------+
|   uid|       day|
+------+----------+
|hunter|2017-09-15|
|  jack|2017-09-15|
| julia|2017-09-15|
|   tom|2017-09-15|
|  rose|2017-09-15|
|nissan|2017-09-16|
|  jack|2017-09-16|
|hunter|2017-09-16|
|  ford|2017-09-16|
|  polo|2017-09-16|
| jerry|2017-09-16|
| jerry|2017-09-17|
| julia|2017-09-17|
| haval|2017-09-17|
|   tom|2017-09-17|
|hunter|2017-09-17|
|  benz|2017-09-17|
|   bmw|2017-09-17|
+------+----------+

求20170917的当日新增,昨日新增、历史累计用户

select
count(distinct t.uid) cnt
from
(select
    ip,uid,access_time,url,
    date_format(access_time,'yyyy-MM-dd') day
    from log1 where date_format(access_time,'yyyy-MM-dd') = '2017-09-17'
) t
left join --取历史用户
(
    select
    ip,uid,access_time,url,
    date_format(access_time,'yyyy-MM-dd') day
    from log1 where date_format(access_time,'yyyy-MM-dd') <= '2017-09-16'
) t1
on t.uid=t1.uid
where t1.uid is null
-- group by t.uid,t.day
union all --昨日新增(20170916)
select
count(distinct t.uid) cnt
from
(select
    ip,uid,access_time,url,
    date_format(access_time,'yyyy-MM-dd') day
    from log1 where date_format(access_time,'yyyy-MM-dd') = '2017-09-16'
) t
left join
(--取历史用户
    select
    ip,uid,access_time,url,
    date_format(access_time,'yyyy-MM-dd') day
    from log1 where date_format(access_time,'yyyy-MM-dd') <= '2017-09-15'
) t1
on t.uid=t1.uid
where t1.uid is null
-- group by t.uid,t.day
union all --历史累计用户
select
    count(distinct uid) cnt
from log1

结果
当日(0917)新增:3
昨日(0916)新增:4
历史累计:12

+---+
|cnt|
+---+
|  3|
|  4|
| 12|
+---+

优化

count distinct一般都很慢,可以使用count group by代替 即

select
count(1)
from
(
    select
    uid
    from log1
    group by 1
)

IDEA代码

package com.ruozedata.bigdata.sqlzc

import com.ruozedata.bigdata.sql01.Domain.order
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object LogApp {

  case class logsrc(ip: String, uid: String, access_time: String, url: String)

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder() //
      .master("local[2]") //
      .appName("SparkSessionApp") //
      .getOrCreate()

    spark.sparkContext.setLogLevel("ERROR")
    val log1RDD: RDD[String] = spark.sparkContext.textFile("data/log1.txt")

    import spark.implicits._
      log1RDD.map(x => {
      val splits: Array[String] = x.split(",")
      val IP = splits(0)
      val UID = splits(1)
      val ACCESSTIME = splits(2)
      val URL = splits(3)
      logsrc(IP, UID, ACCESSTIME, URL)
    }).toDF.createTempView("log1")

    //TODO 统计每日日活用户(只要当天有一条访问记录的用户[取最早的一条访问记录]),对用户去重
    spark.sql(
      """
       select
        |uid,day
        |from
        |(
        |   select
        |   ip,uid,access_time,url,day,
        |   row_number() over(partition by uid,day order by access_time) rn
        |   from
        |   (select
        |   ip,uid,access_time,url,
        |   date_format(access_time,'yyyy-MM-dd') day
        |   from log1
        |   ) t
        |   order by day
        |) t1
        |where rn=1
        |""".stripMargin).show()

    //TODO  新增
    spark.sql(
      """
        |select
        |count(distinct t.uid) cnt
        |from
        |(select
        |   ip,uid,access_time,url,
        |   date_format(access_time,'yyyy-MM-dd') day
        |   from log1 where date_format(access_time,'yyyy-MM-dd') = '2017-09-17'
        |) t
        |left join --取历史用户
        |(
        |   select
        |   ip,uid,access_time,url,
        |   date_format(access_time,'yyyy-MM-dd') day
        |   from log1 where date_format(access_time,'yyyy-MM-dd') <= '2017-09-16'
        |) t1
        |on t.uid=t1.uid
        |where t1.uid is null
        |-- group by t.uid,t.day
        |union all --昨日新增(20170916)
        |select
        |count(distinct t.uid) cnt
        |from
        |(select
        |   ip,uid,access_time,url,
        |   date_format(access_time,'yyyy-MM-dd') day
        |   from log1 where date_format(access_time,'yyyy-MM-dd') = '2017-09-16'
        |) t
        |left join
        |(--取历史用户
        |   select
        |   ip,uid,access_time,url,
        |   date_format(access_time,'yyyy-MM-dd') day
        |   from log1 where date_format(access_time,'yyyy-MM-dd') <= '2017-09-15'
        |) t1
        |on t.uid=t1.uid
        |where t1.uid is null
        |-- group by t.uid,t.day
        |union all --历史累计用户
        |select
        |   count(distinct uid) cnt
        |from log1
        |""".stripMargin).show()

    spark.stop()
  }
}

你可能感兴趣的:(SparkSQL统计新增用户)