sparkStreaming + kafka classnofound

weixin_38557683 2017-09-21 12:17:16
package rizhichuli.sparkstreaming
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._
import org.apache.spark.SparkConf
import java.util.Properties
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.kafka
import org.apache.spark.streaming.StreamingContext._
import org.apache.spark.streaming.{Seconds,StreamingContext}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.util._
//import org.apache.spark.examples.streaming._
import org.apache.kafka.serializer.StringDecoder
import org.apache.spark.streaming.util._
import kafka.utils.VerifiableProperties //红色的为后加上的,并且有错,无法找到kafka下的utils等包


/**
* @author ${穆金星}
*/
object App {

def main(args : Array[String]) {
//if(args.length<4){
// System.err.println("Usage: KafkaWordCount <zkQuorum> <group> <topics> <numThreads>")
// System.exit(1)
// }
System.setProperty("hadoop.home.dir", "D:\\appbase\\hadoop-2.7.4\\hadoop-2.7.4")

val topic=Set("linuxSysteminfos")
val brokers="10.218.7.232:9092"

val sparkConf=new SparkConf().setMaster("local[2]").setAppName("App")
val sc=new SparkContext(sparkConf)
val ssc = new StreamingContext(sc, Seconds(10))

//ssc.sparkContext.setLogLevel("ERROR")

val kafkaParams=Map[String,String]("metadata.broker.list"->brokers,"serializer.class"->"kafka.serializer.StringEnvoder")
val kafkaStream=KafkaUtils.createDirectStream(ssc,kafkaParams,topic)

kafkaStream.print()
ssc.start()
ssc.awaitTermination()

}

}






运行后出现的错误是

17/09/20 22:35:10 ERROR Executor: Exception in task 0.0 in stage 0.0 (TID 0)
java.lang.NoSuchMethodException: scala.runtime.Nothing$.<init>(kafka.utils.VerifiableProperties)
at java.lang.Class.getConstructor0(Class.java:3074)
at java.lang.Class.getConstructor(Class.java:1817)
at org.apache.spark.streaming.kafka.KafkaRDD$KafkaRDDIterator.<init>(KafkaRDD.scala:150)
at org.apache.spark.streaming.kafka.KafkaRDD.compute(KafkaRDD.scala:136)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
at org.apache.spark.scheduler.Task.run(Task.scala:86)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
17/09/20 22:35:11 WARN TaskSetManager: Lost task 0.0 in stage 0.0 (TID 0, localhost): java.lang.NoSuchMethodException: scala.runtime.Nothing$.<init>(kafka.utils.VerifiableProperties)
at java.lang.Class.getConstructor0(Class.java:3074)
at java.lang.Class.getConstructor(Class.java:1817)
at org.apache.spark.streaming.kafka.KafkaRDD$KafkaRDDIterator.<init>(KafkaRDD.scala:150)
at org.apache.spark.streaming.kafka.KafkaRDD.compute(KafkaRDD.scala:136)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
at org.apache.spark.scheduler.Task.run(Task.scala:86)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)



添加kafka.utils.VerifiableProperties类后发现

object utils is not a member of package org.apache.kafka



但是确定kafka包已经包含在了maven中且其中有此类,忘各位大神能够帮忙解答
...全文
292 2 打赏 收藏 转发到动态 举报
写回复
用AI写文章
2 条回复
切换为时间正序
请发表友善的回复…
发表回复
卡奥斯道 2017-09-25
  • 打赏
  • 举报
回复
val kafkaStream=KafkaUtils.createDirectStream(ssc,kafkaParams,topic) 修改为下面试试,指定函数上的类型参数 val kafkaStream=KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc,kafkaParams,topic)
weixin_38557683 2017-09-21
  • 打赏
  • 举报
回复
自顶,期望哪位大神来看那看

1,261

社区成员

发帖
与我相关
我的任务
社区描述
Spark由Scala写成,是UC Berkeley AMP lab所开源的类Hadoop MapReduce的通用的并行计算框架,Spark基于MapReduce算法实现的分布式计算。
社区管理员
  • Spark
  • shiter
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧