wordcount程序在linux系统上运行成功,在windows上运行失败

Mr-稻帅 2014-07-27 02:38:37
程序如下:
package com.javen.wordcount;

import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;

public class WordCount {

private static final String INPUT_PATH = "hdfs://192.168.0.167:9000/user/micmiu/test/input";
private static final String OUT_PATH = "hdfs://192.168.0.167:9000/user/micmiu/test/output";

public static class WordCountMap extends MapReduceBase implements
Mapper<LongWritable, Text, Text, IntWritable> {

private static final IntWritable one = new IntWritable(1);
private Text word = new Text();

@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);

while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}

public static class WordCountReduce extends MapReduceBase implements
Reducer<Text, IntWritable, Text, IntWritable> {

@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}

}

public static void main(String[] args) throws IOException {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("wordcount");

conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);

conf.setMapperClass(WordCountMap.class);
conf.setReducerClass(WordCountReduce.class);

conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);

FileInputFormat.setInputPaths(conf, new Path(INPUT_PATH));
FileOutputFormat.setOutputPath(conf, new Path(OUT_PATH));

JobClient.runJob(conf);

}

}


在linux环境下运行,没有任何问题,可以输出到output....但在windows下面就会报错:

windows下面的eclipse连接hadoop木有问题~
...全文
443 4 打赏 收藏 转发到动态 举报
AI 作业
写回复
用AI写文章
4 条回复
切换为时间正序
请发表友善的回复…
发表回复
weixin_42004975 2021-07-19
  • 打赏
  • 举报
回复

楼主解决了吗

wsy85 2014-08-15
  • 打赏
  • 举报
回复
Windows没有配置 HADOOP_HOME
Mr-稻帅 2014-07-29
  • 打赏
  • 举报
回复
没人回答呀~
Mr-稻帅 2014-07-28
  • 打赏
  • 举报
回复
自己顶呀,别沉啊~

20,848

社区成员

发帖
与我相关
我的任务
社区描述
Hadoop生态大数据交流社区,致力于有Hadoop,hive,Spark,Hbase,Flink,ClickHouse,Kafka,数据仓库,大数据集群运维技术分享和交流等。致力于收集优质的博客
社区管理员
  • 分布式计算/Hadoop社区
  • 涤生大数据
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧