每日总结

环境准备

1)创建maven工程MapReduceDemo

2)在pom.xml文件添加如下依赖

<dependencies>

    <dependency>

        <groupId>org.apache.hadoop</groupId>

        <artifactId>hadoop-client</artifactId>

        <version>3.1.3</version>

    </dependency>

    <dependency>

        <groupId>junit</groupId>

        <artifactId>junit</artifactId>

        <version>4.12</version>

    </dependency>

    <dependency>

        <groupId>org.slf4j</groupId>

        <artifactId>slf4j-log4j12</artifactId>

        <version>1.7.30</version>

    </dependency>

</dependencies>

2)在项目的src/main/resources目录下,新建一个文件,命名为“log4j.properties”,在文件中填入。

log4j.rootLogger=INFO, stdout  

log4j.appender.stdout=org.apache.log4j.ConsoleAppender  

log4j.appender.stdout.layout=org.apache.log4j.PatternLayout  

log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n  

log4j.appender.logfile=org.apache.log4j.FileAppender  

log4j.appender.logfile.File=target/spring.log  

log4j.appender.logfile.layout=org.apache.log4j.PatternLayout  

log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n

(3)创建包名:com.atguigu.mapreduce.wordcount

4)编写程序

1)编写Mapper

package com.atguigu.mapreduce.wordcount;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;

 

public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable>{

 

Text k = new Text();

IntWritable v = new IntWritable(1);

 

@Override

protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

 

// 1 获取一行

String line = value.toString();

 

// 2 切割

String[] words = line.split(" ");

 

// 3 输出

for (String word : words) {

 

k.set(word);

context.write(k, v);

}

}

}

2)编写Reducer

package com.atguigu.mapreduce.wordcount;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Reducer;

 

public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable>{

 

int sum;

IntWritable v = new IntWritable();

 

@Override

protected void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {

 

// 1 累加求和

sum = 0;

for (IntWritable count : values) {

sum += count.get();

}

 

// 2 输出

         v.set(sum);

context.write(key,v);

}

}

3)编写Driver驱动类

package com.atguigu.mapreduce.wordcount;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

 

public class WordCountDriver {

 

public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

 

// 1 获取配置信息以及获取job对象

Configuration conf = new Configuration();

Job job = Job.getInstance(conf);

 

// 2 关联本Driver程序的jar

job.setJarByClass(WordCountDriver.class);

 

// 3 关联MapperReducerjar

job.setMapperClass(WordCountMapper.class);

job.setReducerClass(WordCountReducer.class);

 

// 4 设置Mapper输出的kv类型

job.setMapOutputKeyClass(Text.class);

job.setMapOutputValueClass(IntWritable.class);

 

// 5 设置最终输出kv类型

job.setOutputKeyClass(Text.class);

job.setOutputValueClass(IntWritable.class);

 

// 6 设置输入和输出路径

FileInputFormat.setInputPaths(job, new Path(args[0]));

FileOutputFormat.setOutputPath(job, new Path(args[1]));

 

// 7 提交job

boolean result = job.waitForCompletion(true);

System.exit(result ? 0 : 1);

}

}

5)本地测试

1)需要首先配置好HADOOP_HOME变量以及Windows运行依赖

2)在IDEA/Eclipse上运行程序

 
posted @ 2021-12-07 19:22  小萌新一枚lll  阅读(39)  评论(0编辑  收藏  举报