Java InvalidInputException:输入路径不存在
运行MapReduce作业时出现以下异常:Java InvalidInputException:输入路径不存在,java,eclipse,apache,hadoop,mapreduce,Java,Eclipse,Apache,Hadoop,Mapreduce,运行MapReduce作业时出现以下异常: 15/12/25 16:00:07 INFO jvm.JvmMetrics: Initializing JVM Metrics with processName=JobTracker, sessionId= 15/12/25 16:00:07 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement T
15/12/25 16:00:07 INFO jvm.JvmMetrics: Initializing JVM Metrics with processName=JobTracker, sessionId=
15/12/25 16:00:07 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
15/12/25 16:00:07 WARN mapred.JobClient: No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
Exception in thread "main" org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: file:/C:/Users/HARSH/workspace1/hadoop/words.txt
at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:224)
at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:241)
at org.apache.hadoop.mapred.JobClient.writeNewSplits(JobClient.java:885)
at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:779)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:432)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:447)
at hadoop.wordcount.main(wordcount.java:70)
有人能帮忙吗?问题是否在包文件中?
我给出的参数是“input.txt output”
代码如下:
package hadoop;
import org.apache.hadoop.io.IntWritable;
import java.io.IOException;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class wordcount {
static public class wordmap extends Mapper<IntWritable, Text, Text, IntWritable>
{
public void map(IntWritable key, Text value, Context context) throws IOException, InterruptedException
{
Text keys = new Text();
IntWritable one= new IntWritable(1);
StringTokenizer tokens= new StringTokenizer(value.toString());
while(tokens.hasMoreTokens())
{
keys.set(tokens.nextToken());
context.write(keys, one);
}
}
}
static public class wordred extends Reducer<Text, IntWritable, Text, IntWritable>
{
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
{
int sum=0;
for (IntWritable count : values)
{
sum=sum+ count.get();
}
context.write(key, new IntWritable(sum));
}
}
public static void main(String args[]) throws Exception
{
Configuration conf=new Configuration();
Job job= new Job(conf,"wordcount");
job.setJarByClass(wordcount.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapperClass(wordmap.class);
job.setReducerClass(wordred.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
}
}
packagehadoop;
导入org.apache.hadoop.io.IntWritable;
导入java.io.IOException;
导入java.util.*;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.*;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.*;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
导入org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
公共类字数{
静态公共类wordmap扩展映射器
{
公共void映射(IntWritable键、文本值、上下文)引发IOException、InterruptedException
{
文本键=新文本();
IntWritable one=新的IntWritable(1);
StringTokenizer tokens=新的StringTokenizer(value.toString());
while(tokens.hasMoreTokens())
{
key.set(tokens.nextToken());
上下文。写(键,一);
}
}
}
静态公共类wordred扩展了Reducer
{
公共void reduce(文本键、Iterable值、上下文上下文)引发IOException、InterruptedException
{
整数和=0;
for(可写入计数:值)
{
sum=sum+count.get();
}
write(key,newintwriteable(sum));
}
}
公共静态void main(字符串args[])引发异常
{
Configuration conf=新配置();
Job Job=新作业(conf,“wordcount”);
job.setJarByClass(wordcount.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
setInputFormatClass(TextInputFormat.class);
setOutputFormatClass(TextOutputFormat.class);
setMapperClass(wordmap.class);
job.setReduceClass(wordred.class);
addInputPath(作业,新路径(args[0]);
setOutputPath(作业,新路径(args[1]);
job.waitForCompletion(true);
}
}
这不是编译错误。例外情况清楚地表明,应用程序找不到文件C:/Users/HARSH/workspace1/hadoop/words.txt 检查:
- 文件存在且路径正确(尝试使用绝对路径)
- 你有访问权限吗
- 没有其他程序打开该文件