1、MapReduce数据过滤操作

STEP1:单条数据处理,试探处理规律

核心问题:

从类似"8.35.201.144 - - [30/May/2013:17:38:20 +0800] "GET /uc_server/avatar.php?uid=29331&size=middle HTTP/1.1" 301 -"的数据中提取 ip-访问时间-访问的url

代码:(用到了/opt/hadoop-3.1.0/share/hadoop中common和mapreduce下的一些jar包,java project)

import java.io.FileNotFoundException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
public class SingalData {
    public static void main(String[] args) throws ParseException,FileNotFoundException {
        String data = "8.35.201.144 - - [30/May/2013:17:38:20 +0800] \"GET /uc_server/"
                + "avatar.php?uid=29331&size=middle HTTP/1.1\" 301 -";
        System.out.println("待处理的原数据为:"+data);
        
        //解析ip
        String ip = data.substring(0, data.indexOf("- -"));
        System.out.println("ip的解析结果:"+ip);
        
        // 解析time
        String tmpTime = data.substring(data.indexOf("[") + 1);
        tmpTime = tmpTime.substring(0, tmpTime.indexOf(" +0800"));
        SimpleDateFormat dateFormat1 = new SimpleDateFormat(
                "dd/MM/yyyy:HH:mm:ss");
        Date date = dateFormat1.parse(tmpTime.replace("May", "05"));
        SimpleDateFormat dateFormat2 = new SimpleDateFormat(
                "yyyy-MM-dd|HH:mm:ss");
        String time = dateFormat2.format(date);
        System.out.println("time的解析结果:"+time);
        
        // 解析url
        String tmpUrl = data.substring(data.indexOf("\"") + 1);
        if (tmpUrl.contains("HTTP")) {
            tmpUrl = tmpUrl.substring(0, tmpUrl.indexOf(" HTTP"));
            tmpUrl = tmpUrl.split(" ")[1];
        } else {
            tmpUrl = tmpUrl.substring(0, tmpUrl.indexOf("\""));
        }
        String url = tmpUrl;
        System.out.println("url的解析结果:"+url);
    }
}

运行结果:

待处理的原数据为:8.35.201.144 - - [30/May/2013:17:38:20 +0800] "GET /uc_server/
avatar.php?uid=29331&size=middle HTTP/1.1" 301 -
ip的解析结果:8.35.201.144 
time的解析结果:2013-05-30|17:38:20
url的解析结果:/uc_server/avatar.php?uid=29331&size=middle

STEP2:写MyMapper

步骤分析:

①确定Map的输入为
②将Map传入的Text转化为String类型进行处理
③确定Map的输出为
④将String处理的结果即“ip+time+url"转为text并写入context

源码:

mport java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;


public class MyMapper extends Mapper {

    @Override
    protected void map(LongWritable key, Text value,Context context)throws 
    IOException, InterruptedException {
        /*
         *key:输入的数据
         *value:数据 即句子
         *Context:Map上下文 上文HDFS 下文Reducer
         */
        String data=value.toString();
        
        if(data.contains("/uc_server")||data.contains("/data")||
                data.contains("/static")||data.contains("/template")||
                data.contains("/source")||data.contains("/favicon.ico")||
                data.contains("/images"))
            return;
        
        //获取ip
        String ip=data.substring(0, data.indexOf("- -"));
        
        //获取并格式化time
        String tmpTime=data.substring(data.indexOf("[")+1);
        tmpTime=tmpTime.substring(0,tmpTime.indexOf(" +0800"));
        SimpleDateFormat dateFormat1=new SimpleDateFormat("dd/MM/yyyy:HH:mm:ss");
        Date date=null;
        try {
            date = dateFormat1.parse(tmpTime.replace("May", "05"));
        } catch (ParseException e) {
            e.printStackTrace();
        }
        SimpleDateFormat dateFormat2=new SimpleDateFormat("yyyy-MM-dd|HH:mm:ss");
        String time=dateFormat2.format(date);
        
        //获取访问的url
        String tmpUrl=data.substring(data.indexOf("\"")+1);
        if(tmpUrl.contains("HTTP")){
            tmpUrl = tmpUrl.substring(0, tmpUrl.indexOf(" HTTP"));
            tmpUrl = tmpUrl.split(" ")[1];
        }else{
            tmpUrl = tmpUrl.substring(0, tmpUrl.indexOf("\""));
        }
        String url=tmpUrl;
  
        //写入上下文
        context.write(new Text(ip+" "+time+" "+url),NullWritable.get());
    }
}

STEP3:分析是否需要些Reduce

发现map中已处理完毕,不需要写reduce

STEP4:写Main:

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class Main {
    public static void main(String[] args) throws 
    IOException, ClassNotFoundException, InterruptedException {
        //创建一个job=map+reduce
        Configuration conf=new Configuration();
        
        //创建一个job
        Job job=Job.getInstance(conf);
        
        //指定任务的入口
        job.setJarByClass(Main.class);
        
        //指定map
        job.setMapperClass(MyMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NullWritable.class);
        
        //指定任务的输入输出
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        
        //提交任务
        job.waitForCompletion(true);//true表示打印日志信息
    }
}

STEP5:打jar包放到Hadoop上运行查看结果,需要准备好数据(日志)在HDFS上

运行结果示例:

1.169.170.214  2013-05-30|22:49:01 /api.php?mod=js&bid=65
1.169.170.214  2013-05-30|22:49:14 /api.php?mod=js&bid=65
1.170.183.87  2013-05-30|18:39:33 /api.php?mod=js&bid=65
1.170.183.87  2013-05-30|18:39:59 /api.php?mod=js&bid=94
1.170.6.222  2013-05-30|20:22:36 /api.php?mod=js&bid=65
1.170.6.222  2013-05-30|20:23:01 /api.php?mod=js&bid=94
1.171.165.64  2013-05-30|23:20:59 /api.php?mod=js&bid=65
1.171.165.64  2013-05-30|23:21:17 /thread-11220-1-1.html
1.171.52.130  2013-05-30|22:04:23 /api.php?mod=js&bid=65
1.171.52.242  2013-05-30|19:38:13 /api.php?mod=js&bid=65
1.171.62.241  2013-05-30|18:26:36 /api.php?mod=js&bid=66
1.173.227.222  2013-05-30|21:21:51 /api.php?mod=js&bid=65
1.173.227.222  2013-05-30|21:21:59 /api.php?mod=js&bid=66
1.173.227.222  2013-05-30|21:40:08 /api.php?mod=js&bid=94

你可能感兴趣的:(1、MapReduce数据过滤操作)