"partition.assignment.strategy" which has no default value.

米哥2008 2017-05-05 12:51:35
sparkstreaming分析完数据后,往kafka发送数据报错如下

2017-05-04 13:03:35,105 [Executor task launch worker-0] ERROR [org.apache.spark.executor.Executor] 96 - Exception in task 0.0 in stage 59.0 (TID 52)
org.apache.kafka.common.config.ConfigException: Missing required configuration "partition.assignment.strategy" which has no default value.
at org.apache.kafka.common.config.ConfigDef.parse(ConfigDef.java:124)
at org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:48)
at org.apache.kafka.clients.consumer.ConsumerConfig.<init>(ConsumerConfig.java:194)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:430)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:413)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:400)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getConfigConsumer(ConfigHelper.java:78)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getConfig(ConfigHelper.java:35)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getProperties(ConfigHelper.java:94)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getProducerProperties(ConfigHelper.java:105)
at com.unistacks.tamboo.kafkaclient.producer.KafkaProducerFactory.getProducerCommon(KafkaProducerFactory.java:33)
at com.unistacks.tamboo.kafkaclient.producer.KafkaProducerFactory.getProducer(KafkaProducerFactory.java:17)
at com.bigdata.spark.utils.KafkaClient.<init>(KafkaClient.java:30)
at com.bigdata.spark.utils.KafkaFactory$$anonfun$getOrCreateProducer$1.apply(KafkaFactory.scala:24)
at com.bigdata.spark.utils.KafkaFactory$$anonfun$getOrCreateProducer$1.apply(KafkaFactory.scala:22)
at scala.collection.mutable.MapLike$class.getOrElseUpdate(MapLike.scala:189)
at scala.collection.mutable.AbstractMap.getOrElseUpdate(Map.scala:91)
at com.bigdata.spark.utils.KafkaFactory$.getOrCreateProducer(KafkaFactory.scala:22)
at com.bigdata.spark.DirectKafkaWordCount$$anonfun$main$2$$anonfun$apply$1.apply(DirectKafkaWordCount.scala:131)
at com.bigdata.spark.DirectKafkaWordCount$$anonfun$main$2$$anonfun$apply$1.apply(DirectKafkaWordCount.scala:130)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
at org.apache.spark.scheduler.Task.run(Task.scala:70)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
2017-05-04 13:03:35,106 [task-result-getter-0] WARN [org.apache.spark.scheduler.TaskSetManager] 71 - Lost task 0.0 in stage 59.0 (TID 52, localhost): org.apache.kafka.common.config.ConfigException: Missing required configuration "partition.assignment.strategy" which has no default value.
at org.apache.kafka.common.config.ConfigDef.parse(ConfigDef.java:124)
at org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:48)
at org.apache.kafka.clients.consumer.ConsumerConfig.<init>(ConsumerConfig.java:194)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:430)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:413)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:400)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getConfigConsumer(ConfigHelper.java:78)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getConfig(ConfigHelper.java:35)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getProperties(ConfigHelper.java:94)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getProducerProperties(ConfigHelper.java:105)
at com.unistacks.tamboo.kafkaclient.producer.KafkaProducerFactory.getProducerCommon(KafkaProducerFactory.java:33)
at com.unistacks.tamboo.kafkaclient.producer.KafkaProducerFactory.getProducer(KafkaProducerFactory.java:17)
at com.bigdata.spark.utils.KafkaClient.<init>(KafkaClient.java:30)
at com.bigdata.spark.utils.KafkaFactory$$anonfun$getOrCreateProducer$1.apply(KafkaFactory.scala:24)
at com.bigdata.spark.utils.KafkaFactory$$anonfun$getOrCreateProducer$1.apply(KafkaFactory.scala:22)
at scala.collection.mutable.MapLike$class.getOrElseUpdate(MapLike.scala:189)
at scala.collection.mutable.AbstractMap.getOrElseUpdate(Map.scala:91)
at com..bigdata.spark.utils.KafkaFactory$.getOrCreateProducer(KafkaFactory.scala:22)
at com.bigdata.spark.DirectKafkaWordCount$$anonfun$main$2$$anonfun$apply$1.apply(DirectKafkaWordCount.scala:131)
at com.bigdata.spark.DirectKafkaWordCount$$anonfun$main$2$$anonfun$apply$1.apply(DirectKafkaWordCount.scala:130)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
at org.apache.spark.scheduler.Task.run(Task.scala:70)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)

2017-05-04 13:03:35,110 [task-result-getter-0] ERROR [org.apache.spark.scheduler.TaskSetManager] 75 - Task 0 in stage 59.0 failed 1 times; aborting job

请问怎么解决?
...全文
2468 3 打赏 收藏 转发到动态 举报
写回复
用AI写文章
3 条回复
切换为时间正序
请发表友善的回复…
发表回复
「已注销」 2019-09-18
  • 打赏
  • 举报
回复
请问你怎么解决的?
scaling_heights 2018-08-10
  • 打赏
  • 举报
回复
参考:https://www.jianshu.com/p/688e1b751a85
jukiq 2017-06-21
  • 打赏
  • 举报
回复
我这几天也碰到这个问题,楼主,如何解决的?

1,258

社区成员

发帖
与我相关
我的任务
社区描述
Spark由Scala写成,是UC Berkeley AMP lab所开源的类Hadoop MapReduce的通用的并行计算框架,Spark基于MapReduce算法实现的分布式计算。
社区管理员
  • Spark
  • shiter
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧