运行jar包

1.本地目录

hadoop jar /home/kg/phone_local.jar corina.wordCount.wordLocal.WordcountDriver  /home/kg/hello.txt /home/kg/result

package corina.wordCount.wordLocal;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;


/**
 * 相当于一个yarn集群的客户端,
 * 需要在此封装我们的mr程序相关运行参数,指定jar包
 * 最后提交给yarn
 * @author Administrator
 */
public class WordcountDriver {
    public WordcountDriver() {
    }


    public static void main(String[] args) throws Exception {
        // 1 获取配置信息,或者job对象实例
        Configuration configuration = new Configuration();
//        String[] otherArgs = (new GenericOptionsParser(configuration, args)).getRemainingArgs();

        configuration.set("mapreduce.framework.name","local");
        configuration.set("fs.defaultFS","file:///");




        Job job = Job.getInstance(configuration);
//        job.getConfiguration().set("mapreduce.job.queuename", otherArgs[2]);


        // 6 指定本程序的jar包所在的本地路径
//        job.setJar("/home/admin/wc.jar");
        job.setJarByClass(WordcountDriver.class);

        // 2 指定本业务job要使用的mapper/Reducer业务类
        job.setMapperClass(WordcountMapper.class);
        job.setReducerClass(WordcountReducer.class);


        //设置combiner
        job.setCombinerClass(WordcountReducer.class);

        // 3 指定mapper输出数据的kv类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        // 4 指定最终输出的数据的kv类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        // 5 指定job的输入原始文件所在目录


        FileInputFormat.setInputPaths(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job,new Path(args[1]));

//        FileInputFormat.setInputPaths(job, new Path("E:\\hello.txt"));
//        FileOutputFormat.setOutputPath(job, new Path("E:\\out"));

        // 7 将job中配置的相关参数,以及job所用的java类所在的jar包, 提交给yarn去运行
//        job.submit();
        boolean result = job.waitForCompletion(true);
        System.exit(result?0:1);
    }
}

2.hdfs--代码里没有配置queue

hadoop jar /home/kg/hadoop/phone.jar corina.wordCount.WordcountDriver -D mapred.job.queue.name=root.yjy /user/kg/hello.txt /user/kg/test6

//
// Source code recreated from a .class file by IntelliJ IDEA
// (powered by Fernflower decompiler)
//

package corina.wordCount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordcountDriver {
    public WordcountDriver() {
    }

    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();
        String[] otherArgs = (new GenericOptionsParser(configuration, args)).getRemainingArgs();
        Job job = Job.getInstance(configuration);
        job.setJarByClass(WordcountDriver.class);
        job.setMapperClass(WordcountMapper.class);
        job.setReducerClass(WordcountReducer.class);
        job.setCombinerClass(WordcountReducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.setInputPaths(job, new Path[]{new Path(otherArgs[0])});
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);
    }
}

3.hdfs--代码里配置queue

hadoop jar /home/kg/hadoop/phone_queue.jar corina.wordCount.WordcountDriver  /user/kg/hello.txt /user/kg/test6  root.yjy

package corina.wordCount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordcountDriver {
    public WordcountDriver() {
    }

    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();
        String[] otherArgs = (new GenericOptionsParser(configuration, args)).getRemainingArgs();
        Job job = Job.getInstance(configuration);
        job.getConfiguration().set("mapreduce.job.queuename", otherArgs[2]);
        job.setJarByClass(WordcountDriver.class);
        job.setMapperClass(WordcountMapper.class);
        job.setReducerClass(WordcountReducer.class);
        job.setCombinerClass(WordcountReducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.setInputPaths(job, new Path[]{new Path(otherArgs[0])});
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);
    }
}

 

posted on 2020-12-15 17:34  happygril3  阅读(195)  评论(0)    收藏  举报

导航