spark04(点击流日志分析案例)

文章目录

  • 创建maven工程导入jar包
  • 访问的pv
  • 访问的uv
  • 访问的topN

创建maven工程导入jar包

<properties>
        <scala.version>2.11.8</scala.version>
        <spark.version>2.2.0</spark.version>
    </properties>
    <dependencies>
        <dependency>
            <groupId>org.scala-lang</groupId>
            <artifactId>scala-library</artifactId>
            <version>${scala.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.7.5</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.38</version>
        </dependency>

    </dependencies>
    <build>
        <sourceDirectory>src/main/scala</sourceDirectory>
        <testSourceDirectory>src/test/scala</testSourceDirectory>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.0</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                    <!--    <verbal>true</verbal>-->
                </configuration>
            </plugin>
            <plugin>
                <groupId>net.alchim31.maven</groupId>
                <artifactId>scala-maven-plugin</artifactId>
                <version>3.2.0</version>
                <executions>
                    <execution>
                        <goals>
                            <goal>compile</goal>
                            <goal>testCompile</goal>
                        </goals>
                        <configuration>
                            <args>
                                <arg>-dependencyfile</arg>
                                <arg>${project.build.directory}/.scala_dependencies</arg>
                            </args>
                        </configuration>
                    </execution>
                </executions>
            </plugin>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>3.1.1</version>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <filters>
                                <filter>
                                    <artifact>*:*</artifact>
                                    <excludes>
                                        <exclude>META-INF/*.SF</exclude>
                                        <exclude>META-INF/*.DSA</exclude>
                                        <exclude>META-INF/*.RSA</exclude>
                                    </excludes>
                                </filter>
                            </filters>
                            <transformers>
                                <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
                                    <mainClass></mainClass>
                                </transformer>
                            </transformers>
                        </configuration>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

访问的pv

package cn.itcast
  import org.apache.spark.rdd.RDD
  import org.apache.spark.{SparkConf, SparkContext}
  object PV {
  def main(args: Array[String]): Unit = {
        //todo:创建sparkconf,设置appName
        //todo:setMaster("local[2]")在本地模拟spark运行 这里的数字表示 使用2个线程
        val sparkConf: SparkConf = new SparkConf().setAppName("PV").setMaster("local[2]")
        //todo:创建SparkContext
        val sc: SparkContext = new SparkContext(sparkConf)
        //todo:读取数据
        val file: RDD[String] = sc.textFile("d:\\data\\access.log")
        //todo:将一行数据作为输入,输出("pv",1)
        val pvAndOne: RDD[(String, Int)] = file.map(x=>("pv",1))
        //todo:聚合输出
         val totalPV: RDD[(String, Int)] = pvAndOne.reduceByKey(_+_)
         totalPV.foreach(println)
         sc.stop()
  }
}

访问的uv

package cn.itcast
  import org.apache.spark.rdd.RDD
  import org.apache.spark.{SparkConf, SparkContext}
  object UV {
  def main(args: Array[String]): Unit = {
    //todo:构建SparkConf和 SparkContext
    val sparkConf: SparkConf = new SparkConf().setAppName("UV").setMaster("local[2]")
    val sc: SparkContext = new SparkContext(sparkConf)
    //todo:读取数据
    val file: RDD[String] = sc.textFile("d:\\data\\access.log")
    //todo:对每一行分隔,获取IP地址
    val ips: RDD[(String)] = file.map(_.split(" ")).map(x=>x(0))
    //todo:对ip地址进行去重,最后输出格式 ("UV",1)
    val uvAndOne: RDD[(String, Int)] = ips.distinct().map(x=>("UV",1))
    //todo:聚合输出
    val totalUV: RDD[(String, Int)] = uvAndOne.reduceByKey(_+_)
    totalUV.foreach(println)
    //todo:数据结果保存

    totalUV.saveAsTextFile("d:\\data\\out")
    sc.stop()
  }

}


访问的topN

求取访问来源refer的topN

package cn.itcast
  import org.apache.spark.rdd.RDD
  import org.apache.spark.{SparkConf, SparkContext}
  /**
  * 求访问的topN
  */
  object TopN {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("TopN").setMaster("local[2]")
    val sc: SparkContext = new SparkContext(sparkConf)
    sc.setLogLevel("WARN")
    //读取数据
    val file: RDD[String] = sc.textFile("d:\\data\\access.log")
    //将一行数据作为输入,输出(来源URL,1)
    val refUrlAndOne: RDD[(String, Int)] = file.map(_.split(" ")).filter(_.length>10).map(x=>(x(10),1))
    //聚合 排序-->降序
    val result: RDD[(String, Int)] = refUrlAndOne.reduceByKey(_+_).sortBy(_._2,false)
    //通过take取topN,这里是取前5名
    val finalResult: Array[(String, Int)] = result.take(5)
    println(finalResult.toBuffer)
    sc.stop()
  }
}

你可能感兴趣的:(spark,大数据)