数据清洗课堂测试
Result文件数据说明:
Ip:106.39.41.166,(城市)
Date:10/Nov/2016:00:01:02 +0800,(日期)
Day:10,(天数)
Traffic: 54 ,(流量)
Type: video,(类型:视频video或文章article)
Id: 8701(视频或者文章的id)
测试要求:
1、 数据清洗:按照进行数据清洗,并将清洗后的数据导入hive数据库中。
两阶段数据清洗:
(1)第一阶段:把需要的信息从原始日志中提取出来
ip: 199.30.25.88
time: 10/Nov/2016:00:01:03 +0800
traffic: 62
文章: article/11325
视频: video/3235
(2)第二阶段:根据提取出来的信息做精细化操作
ip--->城市 city(IP)
date--> time:2016-11-10 00:01:03
day: 10
traffic:62
type:article/video
id:11325
(3)hive数据库表结构:
create table data( ip string, time string , day string, traffic bigint,
type string, id string )
2、数据分析:在HIVE统计下列数据。
(1)统计最受欢迎的视频/文章的Top10访问次数 (video/article)
(2)按照地市统计最受欢迎的Top10课程 (ip)
(3)按照流量统计最受欢迎的Top10课程 (traffic)
3、数据可视化:
将统计结果倒入MySql数据库中,通过图形化展示的方式展现出来。
第一阶段数据清洗
这是在maven项目中建的基于之前的hdfs课堂测试
ps:此种方法导入的有空格出现,可能是我数据库的问题或者代码的问题
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class shujuqingxi {
public static class Map extends Mapper<Object,Text,Text,Text>{
public static final SimpleDateFormat FORMAT = new SimpleDateFormat("d/MMM/yyyy:HH:mm:ss", Locale.ENGLISH); //原时间格式
public static final SimpleDateFormat dateformat1 = new SimpleDateFormat("yyyy-MM-dd-HH:mm:ss");//现时间格式
private static Date parseDateFormat(String string) { //转换时间格式
Date parse = null;
try {
parse = FORMAT.parse(string);
} catch (Exception e) {
e.printStackTrace();
}
return parse;
}
private static Text newKey = new Text();
private static Text newvalue = new Text();
public void map(Object key,Text value,Context context) throws IOException, InterruptedException{
String line = value.toString();
System.out.println(line);
String arr[] = line.split(",");
newKey.set(arr[0]);
final int first = arr[1].indexOf("");
final int last = arr[1].indexOf(" +0800");
String time = arr[1].substring(first + 1, last).trim();
Date date = parseDateFormat(time);
arr[1] = dateformat1.format(date);
newvalue.set(","+arr[1]+","+arr[2]+","+arr[3]+","+arr[4]+","+arr[5]);
context.write(newKey,newvalue);
}
}
public static class Reduce extends Reducer<Text, Text, Text, Text> {
protected void reduce(Text key, Iterable<Text> values, Context context)throws IOException, InterruptedException {
for(Text text : values){
context.write(key,text);
}
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf=new Configuration();
conf.set("fs.defaultFS","hdfs://hadoop102:9002");
// 进行客户端身份的设置(root为虚拟机的用户名,hadoop集群节点的其中一个都可以)
System.setProperty("HADOOP_USER_NAME","root");
FileSystem fs =FileSystem.get(conf);
Job job =new Job(conf,"OneSort");
job.setJarByClass(shujuqingxi.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
Path in=new Path("hdfs://hadoop102:9002/result.txt");
Path out=new Path("hdfs://hadoop102:9002/result4");
FileInputFormat.addInputPath(job, in);
FileOutputFormat.setOutputPath(job, out);
boolean flag = job.waitForCompletion(true);
System.out.println(flag);
System.exit(flag? 0 : 1);
}
}
2数据分析
create table result1 as
select id,count(*) total
from results
group by id
order by total desc
limit 10;
导入到mysql:
bin/sqoop export \
--connect jdbc:mysql://Hadoop102:3306/bigdata\
--username root \
--password ok \
--table result1\
--num-mappers 1 \
--export-dir /opt/hive/warehouse/result1 \
--input-fields-terminated-by "\001"
(2)按照地市统计最受欢迎的Top10课程 (ip)
create table result2 as
select ip,id,count(*) total
from results
group by ip,id
order by total desc
limit 10;
bin/sqoop export \
--connect jdbc:mysql://Hadoop102:3306/bigdata\
--username root \
--password ok \
--table result2\
--num-mappers 1 \
--export-dir /opt/hive/warehouse/result2 \
--input-fields-terminated-by "\001"
(3)按照流量统计最受欢迎的Top10课程 (traffic)
创建result3表:
create table result3as
select id,sum(traffic) total
from results
group by id
order by total desc
limit 10;
导入到MySQL:
bin/sqoop export \
--connect jdbc:mysql://Hadoop102:3306/bigdata\
--username root \
--password ok \
--table result3\
--num-mappers 1 \
--export-dir /opt/hive/warehouse/result3 \
--input-fields-terminated-by "\001"


浙公网安备 33010602011771号