hadoop 8485端口 报错运行WordCount报错

hadoop wordCount eclipse执行提示ClassNotFoundException - Hadoop - ITeye群组
package com.wesley.
import java.io.IOE
import java.util.StringT
import org.apache.hadoop.conf.C
import org.apache.hadoop.fs.P
import org.apache.hadoop.io.IntW
import org.apache.hadoop.io.T
import org.apache.hadoop.mapreduce.J
import org.apache.hadoop.mapreduce.M
import org.apache.hadoop.mapreduce.R
import org.apache.hadoop.mapreduce.lib.input.FileInputF
import org.apache.hadoop.mapreduce.lib.output.FileOutputF
import org.apache.hadoop.util.GenericOptionsP
public class WordCount {
public static class TokenizerMapper
extends Mapper&Object, Text, Text, IntWritable&{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
public static class IntSumReducer
extends Reducer&Text,IntWritable,Text,IntWritable& {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable&IntWritable& values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
result.set(sum);
context.write(key, result);
public static void main(String[] args) throws Exception {
Configuration
conf = new Configuration ();
conf.set("mapred.job.tracker", "9.248.69.79:9001");
conf.set("fs.default.name", "hdfs://9.248.69.79:9000");
conf.set("hadoop.job.ugi", "hadoop");
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount &in& &out&");
System.exit(2);
Job job = new Job(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
hadoop 1.0.4 可以连上HDSF,但是执行Eclipse就是不行。总是提示ClassNotFound
13/05/16 14:59:42 WARN mapred.JobClient: No job jar file set.& User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
13/05/16 14:59:43 INFO input.FileInputFormat: Total input paths to process : 2
13/05/16 14:59:43 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
13/05/16 14:59:43 WARN snappy.LoadSnappy: Snappy native library not loaded
13/05/16 14:59:44 INFO mapred.JobClient: Running job: job__0001
13/05/16 14:59:45 INFO mapred.JobClient:& map 0% reduce 0%
13/05/16 15:00:05 INFO mapred.JobClient: Task Id : attempt__0001_m_, Status : FAILED
java.lang.RuntimeException: java.lang.ClassNotFoundException: com.wesley.hadoop.WordCount$TokenizerMapper
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:867)
at org.apache.hadoop.mapreduce.JobContext.getMapperClass(JobContext.java:199)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:719)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:370)
at org.apache.hadoop.mapred.Child$4.run(Child.java:255)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1121)
at org.apache.hadoop.mapred.Child.main(Child.java:249)
Caused by: java.lang.ClassNotFoundException: com.wesley.hadoop.WordCount$TokenizerMapper
at java.net.URLClassLoader$1.run(URLClassLoader.java:202)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:190)
at java.lang.ClassLoader.loadClass(ClassLoader.java:306)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:301)
at java.lang.ClassLoader.loadClass(ClassLoader.java:247)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:249)
at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:820)
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:865)
... 8 more
13/05/16 15:00:08 WARN mapred.JobClient: Error reading task outputhadoop-master
13/05/16 15:00:08 WARN mapred.JobClient: Error reading task outputhadoop-master
13/05/16 15:00:08 INFO mapred.JobClient: Task Id : attempt__0001_m_, Status : FAILED
java.lang.RuntimeException: java.lang.ClassNotFoundException: com.wesley.hadoop.WordCount$TokenizerMapper
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:867)
at org.apache.hadoop.mapreduce.JobContext.getMapperClass(JobContext.java:199)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:719)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:370)
at org.apache.hadoop.mapred.Child$4.run(Child.java:255)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1121)
at org.apache.hadoop.mapred.Child.main(Child.java:249)
Caused by: java.lang.ClassNotFoundException: com.wesley.hadoop.WordCount$TokenizerMapper
at java.net.URLClassLoader$1.run(URLClassLoader.java:202)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:190)
at java.lang.ClassLoader.loadClass(ClassLoader.java:306)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:301)
at java.lang.ClassLoader.loadClass(ClassLoader.java:247)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:249)
at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:820)
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:865)
... 8 more
13/05/16 15:00:08 WARN mapred.JobClient: Error reading task outputhadoop-master
13/05/16 15:00:08 WARN mapred.JobClient: Error reading task outputhadoop-master
求指点,网上查了很多资料,一一验证还是不行啊~
使用hadoop-eclipse-plugin运行map/reducer代码,会在workspace/.metadata/.plugins/org.apache.hadoop.eclipse生成jar包文件,编辑jar包中的MANIFEST.MF文件,将main class的“.”替换成“/”,以避免报找不到关联类的错误
寄生虫 写道使用hadoop-eclipse-plugin运行map/reducer代码,会在workspace/.metadata/.plugins/org.apache.hadoop.eclipse生成jar包文件,编辑jar包中的MANIFEST.MF文件,将main class的“.”替换成“/”,以避免报找不到关联类的错误
Manifest-Version: 1.0
Ant-Version: Apache Ant 1.8.4
Created-By: 1.6.0_22-b04 (Sun Microsystems Inc.)
Bundle-ManifestVersion: 2
Bundle-Name: MapReduce Tools for Eclipse
Bundle-SymbolicName: org.apache.hadoop.singleton:=true
Bundle-Version: 0.18
Bundle-Activator: org.apache.hadoop.eclipse.Activator
Bundle-Localization: plugin
Require-Bundle: org.eclipse.ui,org.eclipse.core.runtime,org.eclipse.jd
t.launching,org.eclipse.debug.core,org.eclipse.jdt,org.eclipse.jdt.co
re,org.eclipse.core.resources,org.eclipse.ui.ide,org.eclipse.jdt.ui,o
rg.eclipse.debug.ui,org.eclipse.jdt.debug.ui,org.eclipse.core.express
ions,org.eclipse.ui.cheatsheets,org.eclipse.ui.console,org.eclipse.ui
.navigator,org.eclipse.core.filesystem,mons.logging
Eclipse-LazyStart: true
Bundle-ClassPath: classes/,lib/hadoop-core.jar,lib/commons-cli-1.2.jar
,lib/commons-httpclient-3.0.1.jar,lib/jackson-core-asl-1.8.8.jar,lib/
jackson-mapper-asl-1.8.8.jar,lib/commons-configuration-1.6.jar,lib/co
mmons-lang-2.4.jar,lib/hadoop-core.jar
Bundle-Vendor: Apache Hadoop
这是MANIFEST。MF内容,没有MainClass呀
环境eclipse,运行同样的代码,没有问题,你应该查一下是不是环境的问题
包版本不对
bewithme 写道包版本不对
1.0.4 应该没问题吧
您好,请问这个问题 是怎么解决的呢?我也遇到了同样的问题,一直解决不了。
相关资源推荐Hadoop 运行wordcount 程序 - 简书
Hadoop 运行wordcount 程序
Hadoop中的wordcount 程序相当于其他语言的Hello World程序,郁闷的是,这个程序我已好几次运行失败了。但是今天,在我的努力下,终于把它运行成功了。此处晒图庆祝(刚入门有什么好庆祝的^--^)
咋运行成功的我就不细说了,网上好多文章。这里主要记录一下遇到的坑。1.启动hadoop的时候,出现starting namenode on
[],我用的是伪分布式,所以这个地方中括号里面应该是localhost才对,最后经过查找发现core-site.xml属性配置有问题,属性fs.name.default的值应该为hdfs://localhost:9000 ,我写成了hdfs:localhost:9000,少了双斜杠,加上之后再重新启动,通过jps命令查询,已经成功启动。2.在运行wordcount程序时,出现异常:File file:/usr/hadoop/hadoop-2.7.1/usr/hadoop/tmp/nm-local-dir/usercache/root/appcache/application_2_0002/usr/hadoop/tmp/nm-local-dir/usercache/root does not exist
Failing this attempt. Failing the application大概看着像某个文件或文件夹不存在,于是在网上找相关内容,找了半天也没有找到靠谱的结果。并且异常信息中的这个路径好像根本就不存在,只是temp这个目录有点熟悉,突然想到core-site.xml中有个属性就是配置temp目录的,重新编辑这个文件,发现hadoop.tmp.dir这个属性的值少了一个/符合,少了这个就意味着找不到这个文件。尝试把路径修改后再运行,就得到了上面 截图中的内容。两个坑其实都是粗心大意造成的,以后注意。但通过填坑最主要还是积累解决问题的经验和方法
java开发者
系统架构爱好者博客分类:
转载请注明出处:
hadoop2.2环境搭建好后可以运行wordcount例子来查看一个文件中的单词数量,废话不多说,看下面的步骤:
首先在/usr/local/hadoop/下创建一个目录,是为了存放我们的测试文件,目录名称为myfile,在进入myfile中创建一个名称为wordcount.txt文件里面输入数据如下:
hello hadoop
hello java
hello world
运行命令hadoop fs -mkdir /input在hdfs中创建一个input目录;
运行命令hadoop fs -input /usr/hadoop/myfile/wordcount.txt /input/,将本地系统的wordcount.txt文件上传到hdfs的input目录中;
确保hdfs中的input目录下面没有out目录,否则会报错,将光标定位到/usr/local/hadoop/share/hadoop/mapreduce/目录中,然后运行下面的命令进行统计字母:
hadoop jar hadoop-mapreduce-examples-2.2.0.jar& wordcount /input/wordcount.txt /input/out
下面是运行结束打印的结果:
[root@master mapreduce]# hadoop jar hadoop-mapreduce-examples-2.2.0.jar
wordcount /input/wordcount.txt /input/out
14/03/09 19:32:19 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032
14/03/09 19:32:22 INFO input.FileInputFormat: Total input paths to process : 1
14/03/09 19:32:22 INFO mapreduce.JobSubmitter: number of splits:1
14/03/09 19:32:22 INFO Configuration.deprecation: user.name is deprecated. Instead, use mapreduce.job.user.name
14/03/09 19:32:22 INFO Configuration.deprecation: mapred.jar is deprecated. Instead, use mapreduce.job.jar
14/03/09 19:32:22 INFO Configuration.deprecation: mapred.output.value.class is deprecated. Instead, use mapreduce.job.output.value.class
14/03/09 19:32:22 INFO Configuration.deprecation: bine.class is deprecated. Instead, use bine.class
14/03/09 19:32:22 INFO Configuration.deprecation: mapreduce.map.class is deprecated. Instead, use mapreduce.job.map.class
14/03/09 19:32:22 INFO Configuration.deprecation: mapred.job.name is deprecated. Instead, use mapreduce.job.name
14/03/09 19:32:22 INFO Configuration.deprecation: mapreduce.reduce.class is deprecated. Instead, use mapreduce.job.reduce.class
14/03/09 19:32:22 INFO Configuration.deprecation: mapred.input.dir is deprecated. Instead, use mapreduce.input.fileinputformat.inputdir
14/03/09 19:32:22 INFO Configuration.deprecation: mapred.output.dir is deprecated. Instead, use mapreduce.output.fileoutputformat.outputdir
14/03/09 19:32:22 INFO Configuration.deprecation: mapred.map.tasks is deprecated. Instead, use mapreduce.job.maps
14/03/09 19:32:22 INFO Configuration.deprecation: mapred.output.key.class is deprecated. Instead, use mapreduce.job.output.key.class
14/03/09 19:32:22 INFO Configuration.deprecation: mapred.working.dir is deprecated. Instead, use mapreduce.job.working.dir
14/03/09 19:32:23 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_0_0002
14/03/09 19:32:25 INFO impl.YarnClientImpl: Submitted application application_0_0002 to ResourceManager at /0.0.0.0:8032
14/03/09 19:32:25 INFO mapreduce.Job: The url to track the job: http://master:8088/proxy/application_0_0002/
14/03/09 19:32:25 INFO mapreduce.Job: Running job: job_0_0002
14/03/09 19:32:55 INFO mapreduce.Job: Job job_0_0002 running in uber mode : false
14/03/09 19:32:55 INFO mapreduce.Job:
map 0% reduce 0%
14/03/09 19:33:33 INFO mapreduce.Job:
map 100% reduce 0%
14/03/09 19:33:45 INFO mapreduce.Job:
map 100% reduce 100%
14/03/09 19:33:46 INFO mapreduce.Job: Job job_0_0002 completed successfully
14/03/09 19:33:47 INFO mapreduce.Job: Counters: 43
File System Counters
FILE: Number of bytes read=54
FILE: Number of bytes written=158345
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=139
HDFS: Number of bytes written=32
HDFS: Number of read operations=6
HDFS: Number of large read operations=0
HDFS: Number of write operations=2
Job Counters
Launched map tasks=1
Launched reduce tasks=1
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=36121
Total time spent by all reduces in occupied slots (ms)=7030
Map-Reduce Framework
Map input records=3
Map output records=6
Map output bytes=60
Map output materialized bytes=54
Input split bytes=103
Combine input records=6
Combine output records=4
Reduce input groups=4
Reduce shuffle bytes=54
Reduce input records=4
Reduce output records=4
Spilled Records=8
Shuffled Maps =1
Failed Shuffles=0
Merged Map outputs=1
GC time elapsed (ms)=588
CPU time spent (ms)=14810
Physical memory (bytes) snapshot=
Virtual memory (bytes) snapshot=
Total committed heap usage (bytes)=
Shuffle Errors
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters
Bytes Read=36
File Output Format Counters
Bytes Written=32
[root@master mapreduce]#
查看结果:
[root@master mapreduce]# hadoop fs -lsr /input
lsr: DEPRECATED: Please use 'ls -R' instead.
drwxr-xr-x
- root supergroup
19:33 /input/out
-rw-r--r--
3 root supergroup
19:33 /input/out/_SUCCESS
-rw-r--r--
3 root supergroup
19:33 /input/out/part-r-00000
-rw-r--r--
3 root supergroup
19:26 /input/wordcount.txt
[root@master mapreduce]# hadoop fs -cat /input/out/part-r-00000
[root@master mapreduce]#
成功!
转载请注明出处:
相关知识库:
浏览: 88920 次
来自: 上海
我的提交上去
总是报错,找不到hive表,可能是哪里 ...
target jvm版本也要选择正确。不能选择太高。2.10对 ...
(window.slotbydup=window.slotbydup || []).push({
id: '4773203',
container: s,
size: '200,200',
display: 'inlay-fix'我们在按照网上铺天盖地的教程开始运行wordcount时,有时会得到一个报错。如下所示
/usr/local/hadoop-1.2.1/bin# ./hadoop jar /home/ftp/temp/wordcount.jar WordCount /home/input /home/output
Exception in thread "main" java.lang.ClassNotFoundException: WordCount
at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:348)
at org.apache.hadoop.util.RunJar.main(RunJar.java:153)
他会提示classnotfound。
这种情况下,我们看到他提示找不到wordcount这个类,原因是
你在把wordcount打包成jar文件的时候,设置了自己的package。所以jvm找不到它了。
输入命令时,把package路径输入即可&
usr/local/hadoop-1.2.1/bin# ./hadoop jar /home/ftp/temp/wordcount.jar com.myPackage.WordCount /home/input /home/output
阅读(...) 评论()

我要回帖

更多关于 hadoop 数据分析 的文章

 

随机推荐