提交mapreduce要配置的参数

linux平台提交到yarn

import java.io.IOException;
import java.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class LinuxToYarn {
	public static void main(String[] args) throws IOException, 
	ClassNotFoundException, InterruptedException, URISyntaxException {
		
		Configuration conf = new Configuration();
		
		Job job = Job.getInstance(conf);
		
		//jar包所在的位置
		job.setJarByClass(LinuxToYarn.class); 
		
		//本次job索要调用的mapped、reducer实现类
		job.setMapperClass(WordCount.class);
		job.setReducerClass(WordcountMapreduce.class);
		
		//job的mapped实现类产生的结果的key、value类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);
		//
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		//本次job要处理的输入数据集所在路径、最终结果的所在路径
		FileInputFormat.setInputPaths(job, new Path("/input"));
		FileOutputFormat.setOutputPath(job, new Path("/output"));
		
		//想要启动的reduce task的数量
		job.setNumReduceTasks(2);
		
		//提交数据
		boolean flg = job.waitForCompletion(true);	
		
		System.exit(flg?0:-1);
		
	}


}

window提交到yarn

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class submittedWindowToyarn {
	public static void main(String[] args) throws IOException, 
	ClassNotFoundException, InterruptedException, URISyntaxException {
		
		Configuration conf = new Configuration();
		
		// 在代码中设置JVM系统参数,用于给job对象来获取访问HDFS的用户身份
		System.setProperty("HADOOP_USER_NAME", "root");
		
		//设置job要访问的默认文件系统
		conf.set("fs.defaultFS","hdfs://hadoop1:9000");
		
		//设置job提交到哪运行
		conf.set("mapreduce.framework.name", "yarn");
		conf.set("yarn.resourcemanager.hostname", "hadoop2");
		
		//如果要从windows系统上运行这个job提交客户端程序,则需要加这个跨平台提交的参数
		conf.set("mapreduce.app-submission.cross-platform","true");
		
		Job job = Job.getInstance(conf);
		
		//jar包所在的位置
		job.setJar("H:/mapreduce.jar");
		
		//本次job索要调用的mapped、reducer实现类
		job.setMapperClass(WordCount.class);
		job.setReducerClass(WordcountMapreduce.class);
		
		//job的mapped实现类产生的结果的key、value类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);
		//
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		Path path = new Path("/output");
		FileSystem fs = FileSystem.get(new URI("hdfs://hadoop1:9000"),conf,"root");
		if(fs.exists(path)){
			fs.delete(path,true);
		}
		//本次job要处理的输入数据集所在路径、最终结果的所在路径
		FileInputFormat.setInputPaths(job, new Path("/input"));
		FileOutputFormat.setOutputPath(job, new Path("/output"));
		
		//想要启动的reduce task的数量
		job.setNumReduceTasks(2);
		
		//提交数据
		boolean flg = job.waitForCompletion(true);	
		
		System.exit(flg?0:-1);
		
	}


}

 

你可能感兴趣的:(hadoop)