03_HADOOP_06_HDFS和MR客戶端java編程

創(chuàng)建一個(gè)maven工程

pom.xml引入如下依賴
<dependency>
  <groupId>org.apache.hadoop</groupId>
  <artifactId>hadoop-client</artifactId>
  <version>2.9.2</version>
</dependency>

復(fù)制hadoop環(huán)境下面的配置文件到resources目錄

復(fù)制四個(gè)配置文件

編寫HDFS客戶端

package com.zhanghh.train;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

import java.io.*;

/**
 * Hello world!
 *
 */
public class HdfsTest {
    Configuration conf;
    FileSystem fs;

    @Before
    public void before() throws Exception {
        //加載配置,默認(rèn)會(huì)加載classpath下面的四個(gè)配置
        conf = new Configuration(true);
        //根據(jù)配置獲取FileSystem API
        fs = FileSystem.get(conf);
    }

    @After
    public void after() throws Exception {
        fs.close();
    }

    /**
     * 創(chuàng)建目錄
     * @throws Exception
     */
    @Test
    public void mkdir() throws Exception {
        Path path = new Path("/tmp");
        if (fs.exists(path)){
            fs.delete(path,true);
        }
        fs.mkdirs(path);
    }

    /**
     * 上傳本地文件到HDFS指定文件
     * @throws Exception
     */
    @Test
    public void uploadFile() throws Exception {
        Path path = new Path("/tmp/test.txt");
        if(fs.exists(path)){
            fs.delete(path,false);
        }
        FSDataOutputStream outputStream = fs.create(path);
        File file = new File("C:\\Users\\zhanghh\\Desktop\\test.txt");
        InputStream inputStream = new BufferedInputStream(new FileInputStream(file));
        IOUtils.copyBytes(inputStream,outputStream,conf,true);
    }

    /**
     * 下載HDFS上的文件
     * @throws Exception
     */
    @Test
    public void downFile() throws Exception {
        Path path = new Path("/tmp/test.txt");
        FSDataInputStream inputStream = fs.open(path);
        BufferedOutputStream outputStream = new BufferedOutputStream(new FileOutputStream(new File("C:\\Users\\zhanghh\\Desktop\\222.txt")));
        IOUtils.copyBytes(inputStream,outputStream,conf,true);
    }

    /**
     * 查看文件存放的block位置信息
     * @throws Exception
     */
    @Test
    public void blockLoction() throws Exception {
        Path path = new Path("/tmp/test.txt");
        FileStatus fss=fs.getFileStatus(path);
        BlockLocation[] locations = fs.getFileBlockLocations(fss, 0, fss.getLen());
        for (BlockLocation obj:locations) {
            System.out.println(obj);
        }
    }

    /**
     * 刪除文件
     * @throws Exception
     */
    @Test
    public void delete() throws Exception{
        Path path = new Path("/tmp");
        if (fs.exists(path)){
            fs.delete(path,true);
        }
    }
}

mapperReduce編寫一個(gè)wordCount簡單計(jì)算

package com.zhanghh.train;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * <pre>
 * MR測試類
 * </pre>
 *
 * @author zhanghh
 * @create 2019/8/25
 */
public class MapperReduceTest {
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration(true);
        Job job = Job.getInstance(conf);
        job.setJarByClass(MyWordCount.class); //設(shè)置程序入口
        job.setJobName("zhanghh_job");  //任務(wù)名稱

        Path inputPath = new Path("/tmp/test.txt");
        FileInputFormat.addInputPath(job, inputPath); //輸入路徑

        Path outputPath = new Path("/tmp/output");
        if (outputPath.getFileSystem(conf).exists(outputPath)) {
            outputPath.getFileSystem(conf).delete(outputPath, true);
        }
        FileOutputFormat.setOutputPath(job, outputPath); //輸出路徑

        job.setMapperClass(MyWordCount.MyMapper.class);  //mapper類
        job.setMapOutputKeyClass(Text.class);            //mapper輸出的key類型
        job.setMapOutputValueClass(IntWritable.class);   //mapper輸出的value類型
        job.setReducerClass(MyWordCount.MyReducer.class);//reducer類

        // Submit the job, then poll for progress until the job is complete
        job.waitForCompletion(true);    //提交任務(wù),等待job完成
    }
}

----------------------------------------------------------------------------------------------------------
package com.zhanghh.train;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.StringTokenizer;

/**
 * <pre>
 * 單詞統(tǒng)計(jì)
 * </pre>
 *
 * @author zhanghh
 * @create 2019/8/27
 */
public class MyWordCount {

    public static class MyMapper extends Mapper<Object, Text, Text, IntWritable> {
        private IntWritable one=new IntWritable(1);
        private Text word = new Text();
        @Override
        protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            StringTokenizer itr = new StringTokenizer(value.toString());
            while (itr.hasMoreTokens()){
                word.set(itr.nextToken());
                context.write(word,one);
            }
        }
    }

    public static class MyReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
        private IntWritable result=new IntWritable();
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int sum = 0;
            for (IntWritable val:values) {
                 sum +=val.get();
            }
            result.set(sum);
            context.write(key,result);
        }
    }
}
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡書系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容