2013一汽大众企业网站车主俱乐部建设维护方案,折扣卡网站建设,花店网站建设实训总结,阿里云搜索引擎p67 课程介绍 p68概述 p69 mapreduce核心思想 p70 wordcount源码 序列化类型
mapReduce三类进程 p71 编程规范
用户编写的程序分成三个部分#xff1a;Mapper、Reducer和Driver。
P72 wordcount需求案例分析 p 73 -78 案例环境准备
#xff08;1#xff09;创建maven…p67 课程介绍 p68概述 p69 mapreduce核心思想 p70 wordcount源码 序列化类型
mapReduce三类进程 p71 编程规范
用户编写的程序分成三个部分Mapper、Reducer和Driver。
P72 wordcount需求案例分析 p 73 -78 案例环境准备
1创建maven工程MapReduceDemo 2在pom.xml文件中添加如下依赖
dependenciesdependencygroupIdorg.apache.hadoop/groupIdartifactIdhadoop-client/artifactIdversion3.1.3/version/dependencydependencygroupIdjunit/groupIdartifactIdjunit/artifactIdversion4.12/version/dependencydependencygroupIdorg.slf4j/groupIdartifactIdslf4j-log4j12/artifactIdversion1.7.30/version/dependency
/dependencies2在项目的src/main/resources目录下新建一个文件命名为“log4j.properties”在文件中填入。
log4j.rootLoggerINFO, stdout
log4j.appender.stdoutorg.apache.log4j.ConsoleAppender
log4j.appender.stdout.layoutorg.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern%d %p [%c] - %m%n
log4j.appender.logfileorg.apache.log4j.FileAppender
log4j.appender.logfile.Filetarget/spring.log
log4j.appender.logfile.layoutorg.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern%d %p [%c] - %m%n3创建包名com.atguigu.mapreduce.wordcount 4编写程序 1编写Mapper类
package com.atguigu.mapreduce.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;public class WordCountMapper extends MapperLongWritable, Text, Text, IntWritable{Text k new Text();IntWritable v new IntWritable(1);Overrideprotected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {// 1 获取一行String line value.toString();// 2 切割String[] words line.split( );// 3 输出for (String word : words) {k.set(word);context.write(k, v);}}
}2编写Reducer类
package com.atguigu.mapreduce.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;public class WordCountReducer extends ReducerText, IntWritable, Text, IntWritable{int sum;
IntWritable v new IntWritable();Overrideprotected void reduce(Text key, IterableIntWritable values,Context context) throws IOException, InterruptedException {// 1 累加求和sum 0;for (IntWritable count : values) {sum count.get();}// 2 输出v.set(sum);context.write(key,v);}
}3编写Driver驱动类
package com.atguigu.mapreduce.wordcount;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class WordCountDriver {public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {// 1 获取配置信息以及获取job对象Configuration conf new Configuration();Job job Job.getInstance(conf);// 2 关联本Driver程序的jarjob.setJarByClass(WordCountDriver.class);// 3 关联Mapper和Reducer的jarjob.setMapperClass(WordCountMapper.class);job.setReducerClass(WordCountReducer.class);// 4 设置Mapper输出的kv类型job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(IntWritable.class);// 5 设置最终输出kv类型job.setOutputKeyClass(Text.class);job.setOutputValueClass(IntWritable.class);// 6 设置输入和输出路径FileInputFormat.setInputPaths(job, new Path(args[0]));FileOutputFormat.setOutputPath(job, new Path(args[1]));// 7 提交jobboolean result job.waitForCompletion(true);System.exit(result ? 0 : 1);}
}本地测试
1需要首先配置好HADOOP_HOME变量以及Windows运行依赖 2在IDEA/Eclipse上运行程序
提交到集群测试
集群上测试 1用maven打jar包需要添加的打包插件依赖
buildpluginspluginartifactIdmaven-compiler-plugin/artifactIdversion3.6.1/versionconfigurationsource1.8/sourcetarget1.8/target/configuration/pluginpluginartifactIdmaven-assembly-plugin/artifactIdconfigurationdescriptorRefsdescriptorRefjar-with-dependencies/descriptorRef/descriptorRefs/configurationexecutionsexecutionidmake-assembly/idphasepackage/phasegoalsgoalsingle/goal/goals/execution/executions/plugin/plugins
/build2将程序打成jar包
3修改不带依赖的jar包名称为 wc.jar并拷贝该jar包到Hadoop集群的 /opt/module/hadoop-3.1.3 路径。 4启动Hadoop集群 [atguiguhadoop102 hadoop-3.1.3]sbin/start-dfs.sh [atguiguhadoop103 hadoop-3.1.3]$ sbin/start-yarn.sh 5执行WordCount程序 [atguiguhadoop102 hadoop-3.1.3]$ hadoop jar wc.jar com.atguigu.mapreduce.wordcount.WordCountDriver /user/atguigu/input /user/atguigu/output
p79-86 序列化案例 编写MapReduce程序
package com.atguigu.mapreduce.writable;import org.apache.hadoop.io.Writable;import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;public class FlowBean implements Writable {private long upFlow;private long downFlow;private long sumFlow;public FlowBean() {}public long getUpFlow() {return upFlow;}public void setUpFlow(long upFlow) {this.upFlow upFlow;}public long getDownFlow() {return downFlow;}public void setDownFlow(long downFlow) {this.downFlow downFlow;}public long getSumFlow() {return sumFlow;}public void setSumFlow() {sumFlow upFlowdownFlow;}Overridepublic void write(DataOutput out) throws IOException {out.writeLong(upFlow);out.writeLong(downFlow);out.writeLong(sumFlow);}Overridepublic void readFields(DataInput in) throws IOException {upFlow in.readLong();downFlow in.readLong();sumFlow in.readLong();}Overridepublic String toString() {return upFlow \t downFlow \t sumFlow;}
}
package com.atguigu.mapreduce.writable;import com.sun.org.apache.bcel.internal.generic.NEW;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;import javax.sound.sampled.Line;
import java.io.IOException;public class FlowMapper extends MapperLongWritable, Text,Text,FlowBean {FlowBean flowBean new FlowBean();Text keyPhone new Text();Overrideprotected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {String line value.toString();String[] arr line.split(\t);String phonearr[1];String uparr[arr.length-3];String downarr[arr.length-2];keyPhone.set(phone);flowBean.setUpFlow(Long.parseLong(up));flowBean.setDownFlow(Long.parseLong(down));flowBean.setSumFlow();context.write(keyPhone,flowBean);}
}
package com.atguigu.mapreduce.writable;import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;import java.io.IOException;public class FlowReducer extends ReducerText,FlowBean,Text,FlowBean {private FlowBean reduceFlowBeannew FlowBean();Overrideprotected void reduce(Text key, IterableFlowBean values, Context context) throws IOException, InterruptedException {long up0;long down0;for (FlowBean flowBean: values) {upflowBean.getUpFlow();downflowBean.getDownFlow();}reduceFlowBean.setUpFlow(up);reduceFlowBean.setDownFlow(down);reduceFlowBean.setSumFlow();context.write(key,reduceFlowBean);}
}
package com.atguigu.mapreduce.writable;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import java.io.IOException;public class FlowDriver {public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {Configuration config new Configuration();Job job Job.getInstance(config);job.setJarByClass(FlowDriver.class);job.setMapperClass(FlowMapper.class);job.setReducerClass(FlowReducer.class);job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(FlowBean.class);job.setOutputKeyClass(Text.class);job.setOutputValueClass(FlowBean.class);FileInputFormat.setInputPaths(job,new Path(D:\\inputFlow));FileOutputFormat.setOutputPath(job,new Path(D:\\outputFlow));boolean completion job.waitForCompletion(true);System.exit(completion?0:1);}
}
p87 88 切片机制与并行度决定机制
1问题引出 MapTask的并行度决定Map阶段的任务处理并发度进而影响到整个Job的处理速度。 思考1G的数据启动8个MapTask可以提高集群的并发处理能力。那么1K的数据也启动8个MapTask会提高集群性能吗MapTask并行任务是否越多越好呢哪些因素影响了MapTask并行度
2MapTask并行度决定机制 数据块Block是HDFS物理上把数据分成一块一块。数据块是HDFS存储数据单位。 数据切片数据切片只是在逻辑上对输入进行分片并不会在磁盘上将其切分成片进行存储。数据切片是MapReduce程序计算输入数据的单位一个切片会对应启动一个MapTask。 生成临时目录 ,split文件和xml配置,如果是集群模式还要上传jar包
p89-91 切片源码 92 TextInputFormat p92 93 CombineTextInputFormat切片机制 CombineTextInputFormat案例实操 p94 mapreduce工作流程 上面的流程是整个MapReduce最全工作流程但是Shuffle过程只是从第7步开始到第16步结束具体Shuffle过程详解如下 1MapTask收集我们的map()方法输出的kv对放到内存缓冲区中 2从内存缓冲区不断溢出本地磁盘文件可能会溢出多个文件 3多个溢出文件会被合并成大的溢出文件 4在溢出过程及合并的过程中都要调用Partitioner进行分区和针对key进行排序 5ReduceTask根据自己的分区号去各个MapTask机器上取相应的结果分区数据 6ReduceTask会抓取到同一个分区的来自不同MapTask的结果文件ReduceTask会将这些文件再进行合并归并排序 7合并成大文件后Shuffle的过程也就结束了后面进入ReduceTask的逻辑运算过程从文件中取出一个一个的键值对Group调用用户自定义的reduce()方法 注意 1Shuffle中的缓冲区大小会影响到MapReduce程序的执行效率原则上说缓冲区越大磁盘io的次数越少执行速度就越快。 2缓冲区的大小可以通过参数调整参数mapreduce.task.io.sort.mb默认100M。
p95 shuffle工作机制