mapper和reduce程序都不需要更改,只需要在streaming程序运行中指定参数即可;
-jobconf "mapred.compress.map.output=true" \
-jobconf "mapred.map.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
-jobconf "mapred.output.compress=true" \
-jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
#!/bin/bash
HADOOP_CMD="/home/hadoop/app/hadoop/hadoop-2.6.0-cdh5.13.0/bin/hadoop"
STREAM_JAR_PATH="/home/hadoop/app/hadoop/hadoop-2.6.0-cdh5.13.0/share/hadoop/tools/lib/hadoop-streaming-2.6.0-cdh5.13.0.jar"
INPUT_FILE_PATH="/input/The_Man_of_Property"
OUTPUT_FILE_PATH="/output/wordcount/CacheArchiveCompressFile"
$HADOOP_CMD fs -rmr -skipTrash $OUTPUT_FILE_PATH
$HADOOP_CMD jar $STREAM_JAR_PATH \
-input $INPUT_FILE_PATH \
-output $OUTPUT_FILE_PATH \
-jobconf "mapred.job.name=wordcount_wordwhite_cacheArchivefile_demo" \
-jobconf "mapred.compress.map.output=true" \
-jobconf "mapred.map.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
-jobconf "mapred.output.compress=true" \
-jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
-mapper "python mapper.py WHF.gz" \
-reducer "python reducer.py" \
-cacheArchive "hdfs://localhost:9000/input/cachefile/wordwhite.tar.gz#WHF.gz" \
-file "./mapper.py" \
-file "./reducer.py"
$ chmod +x run_streaming_compress.sh
$ ./run_streaming_compress.sh
... 中间输出省略 ...
18/02/02 10:51:50 INFO streaming.StreamJob: Output directory: /output/wordcount/CacheArchiveCompressFile
$ hadoop fs -ls /output/wordcount/CacheArchiveCompressFile
Found 2 items
-rw-r--r-- 1 hadoop supergroup 0 2018-02-02 10:51 /output/wordcount/CacheArchiveCompressFile/_SUCCESS
-rw-r--r-- 1 hadoop supergroup 81 2018-02-02 10:51 /output/wordcount/CacheArchiveCompressFile/part-00000.gz
$ hadoop fs -get /output/wordcount/CacheArchiveCompressFile/part-00000.gz ./
$ gunzip part-00000.gz
$ cat part-00000
and 2573
had 1526
have 350
in 1694
or 253
the 5144
this 412
to 2782