python SVM BinaryClassificationMetrics areaUnderROC

ROBOT玲玉 2017-07-17 04:53:32
model=SVMWithSGD.train(trainData, 3, 50, 1)
from pyspark.mllib.evaluation import BinaryClassificationMetrics
score=model.predict(validationData.map(lambda p: p.features))
scoreAndLabel=score.zip(validationData.map(lambda p: p.label))
metrics=BinaryClassificationMetrics(scoreAndLabel)
AUC=metrics.areaUnderPR()
报错:
Py4JJavaError Traceback (most recent call last)
<ipython-input-61-8d10952be296> in <module>()
3 scoreAndLabel=score.zip(validationData.map(lambda p: p.label))
4 metrics=BinaryClassificationMetrics(scoreAndLabel)
----> 5 AUC=metrics.areaUnderPR()
6
7 scoreAndLabel.take(5)

/usr/local/spark/python/pyspark/mllib/evaluation.pyc in areaUnderPR(self)
70 Computes the area under the precision-recall curve.
71 """
---> 72 return self.call("areaUnderPR")
73
74 @since('1.4.0')

/usr/local/spark/python/pyspark/mllib/common.pyc in call(self, name, *a)
144 def call(self, name, *a):
145 """Call method of java_model"""
--> 146 return callJavaFunc(self._sc, getattr(self._java_model, name), *a)
147
148

/usr/local/spark/python/pyspark/mllib/common.pyc in callJavaFunc(sc, func, *args)
121 """ Call Java Function """
122 args = [_py2java(sc, a) for a in args]
--> 123 return _java2py(sc, func(*args))
124
125

/usr/local/spark/python/lib/py4j-0.10.1-src.zip/py4j/java_gateway.py in __call__(self, *args)
931 answer = self.gateway_client.send_command(command)
932 return_value = get_return_value(
--> 933 answer, self.gateway_client, self.target_id, self.name)
934
935 for temp_arg in temp_args:

/usr/local/spark/python/pyspark/sql/utils.pyc in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()

/usr/local/spark/python/lib/py4j-0.10.1-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
310 raise Py4JJavaError(
311 "An error occurred while calling {0}{1}{2}.\n".
--> 312 format(target_id, ".", name), value)
313 else:
314 raise Py4JError(

Py4JJavaError: An error occurred while calling o6150.areaUnderPR.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 3720.0 failed 4 times, most recent failure: Lost task 1.3 in stage 3720.0 (TID 6629, data3): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/usr/local/spark/python/pyspark/worker.py", line 172, in main
process()
File "/usr/local/spark/python/pyspark/worker.py", line 167, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/usr/local/spark/python/pyspark/serializers.py", line 263, in dump_stream
vs = list(itertools.islice(iterator, batch))
File "/usr/local/spark/python/pyspark/sql/session.py", line 505, in prepare
_verify_type(obj, schema)
File "/usr/local/spark/python/pyspark/sql/types.py", line 1349, in _verify_type
_verify_type(v, f.dataType, f.nullable)
File "/usr/local/spark/python/pyspark/sql/types.py", line 1321, in _verify_type
raise TypeError("%s can not accept object %r in type %s" % (dataType, obj, type(obj)))
TypeError: DoubleType can not accept object 1 in type <type 'int'>

at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:193)
at org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:234)
at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:152)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:63)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:79)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:47)
at org.apache.spark.scheduler.Task.run(Task.scala:85)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)


...全文
1140 3 打赏 收藏 转发到动态 举报
写回复
用AI写文章
3 条回复
切换为时间正序
请发表友善的回复…
发表回复
「已注销」 2020-08-20
  • 打赏
  • 举报
回复
2020年了,我也遇到这个问题
cdw_FstLst 2018-02-01
  • 打赏
  • 举报
回复
楼主后来解决这个问题了吗?
ROBOT玲玉 2017-07-17
  • 打赏
  • 举报
回复
这是什么原因??

20,808

社区成员

发帖
与我相关
我的任务
社区描述
Hadoop生态大数据交流社区,致力于有Hadoop,hive,Spark,Hbase,Flink,ClickHouse,Kafka,数据仓库,大数据集群运维技术分享和交流等。致力于收集优质的博客
社区管理员
  • 分布式计算/Hadoop社区
  • 涤生大数据
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧