spark python pickle对象_cPickle.PicklingError:无法序列化对象:NotImplementedError

pyspark_1|19/10/2510:23:03INFOSparkContext:Createdbroadcast12frombroadcast atNativeMethodAccessorImpl.java:0pyspark_1|Traceback(most recent call last):pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/serializers.py",line590,indumps

pyspark_1|returncloudpickle.dumps(obj,2)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/cloudpickle.py",line863,indumps

pyspark_1|cp.dump(obj)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/cloudpickle.py",line260,indump

pyspark_1|returnPickler.dump(self,obj)pyspark_1|File"/usr/lib/python2.7/pickle.py",line224,indump

pyspark_1|self.save(obj)pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/usr/lib/python2.7/pickle.py",line568,insave_tuple

pyspark_1|save(element)pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/cloudpickle.py",line406,insave_function

pyspark_1|self.save_function_tuple(obj)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/cloudpickle.py",line549,insave_function_tuple

pyspark_1|save(state)pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/usr/lib/python2.7/pickle.py",line655,insave_dict

pyspark_1|self._batch_setitems(obj.iteritems())pyspark_1|File"/usr/lib/python2.7/pickle.py",line687,in_batch_setitems

pyspark_1|save(v)pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/usr/lib/python2.7/pickle.py",line606,insave_list

pyspark_1|self._batch_appends(iter(obj))pyspark_1|File"/usr/lib/python2.7/pickle.py",line642,in_batch_appends

pyspark_1|save(tmp[0])pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/cloudpickle.py",line660,insave_instancemethod

pyspark_1|obj=obj)pyspark_1|File"/usr/lib/python2.7/pickle.py",line401,insave_reduce

pyspark_1|save(args)pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/usr/lib/python2.7/pickle.py",line554,insave_tuple

pyspark_1|save(element)pyspark_1|File"/usr/lib/python2.7/pickle.py",line331,insave

pyspark_1|self.save_reduce(obj=obj,*rv)pyspark_1|File"/usr/lib/python2.7/pickle.py",line425,insave_reduce

pyspark_1|save(state)pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/usr/lib/python2.7/pickle.py",line655,insave_dict

pyspark_1|self._batch_setitems(obj.iteritems())pyspark_1|File"/usr/lib/python2.7/pickle.py",line687,in_batch_setitems

pyspark_1|save(v)pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/usr/lib/python2.7/pickle.py",line606,insave_list

pyspark_1|self._batch_appends(iter(obj))pyspark_1|File"/usr/lib/python2.7/pickle.py",line642,in_batch_appends

pyspark_1|save(tmp[0])pyspark_1|File"/usr/lib/python2.7/pickle.py",line331,insave

pyspark_1|self.save_reduce(obj=obj,*rv)pyspark_1|File"/usr/lib/python2.7/pickle.py",line425,insave_reduce

pyspark_1|save(state)pyspark_1|File"/usr/lib/python2.7/pickle.py",line286,insave

pyspark_1|f(self,obj)# Call unbound method with explicit selfpyspark_1|File"/usr/lib/python2.7/pickle.py",line655,insave_dict

pyspark_1|self._batch_setitems(obj.iteritems())pyspark_1|File"/usr/lib/python2.7/pickle.py",line687,in_batch_setitems

pyspark_1|save(v)pyspark_1|File"/usr/lib/python2.7/pickle.py",line306,insave

pyspark_1|rv=reduce(self.proto)pyspark_1|File"/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/resource_variable_ops.py",line1152,in__reduce__

pyspark_1|initial_value=self.numpy(),pyspark_1|File"/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/resource_variable_ops.py",line906,innumpy

pyspark_1|"numpy() is only available when eager execution is enabled.")pyspark_1|NotImplementedError:numpy()isonly available when eager executionisenabled.pyspark_1|Traceback(most recent call last):pyspark_1|File"/home/ubuntu/./spark.py",line169,inpyspark_1|fitted_pipeline=pipeline.fit(train_df)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/ml/base.py",line132,infit

pyspark_1|returnself._fit(dataset)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/ml/pipeline.py",line109,in_fit

pyspark_1|model=stage.fit(dataset)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/ml/base.py",line132,infit

pyspark_1|returnself._fit(dataset)pyspark_1|File"/usr/local/lib/python2.7/dist-packages/elephas/ml_model.py",line92,in_fit

pyspark_1|validation_split=self.get_validation_split())pyspark_1|File"/usr/local/lib/python2.7/dist-packages/elephas/spark_model.py",line151,infit

pyspark_1|self._fit(rdd,epochs,batch_size,verbose,validation_split)pyspark_1|File"/usr/local/lib/python2.7/dist-packages/elephas/spark_model.py",line188,in_fit

pyspark_1|gradients=rdd.mapPartitions(worker.train).collect()pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py",line816,incollect

pyspark_1|sock_info=self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py",line2532,in_jrdd

pyspark_1|self._jrdd_deserializer,profiler)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py",line2434,in_wrap_function

pyspark_1|pickled_command,broadcast_vars,env,includes=_prepare_for_python_RDD(sc,command)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py",line2420,in_prepare_for_python_RDD

pyspark_1|pickled_command=ser.dumps(command)pyspark_1|File"/home/ubuntu/spark-2.4.4-bin-hadoop2.7/python/pyspark/serializers.py",line600,indumps

pyspark_1|raisepickle.PicklingError(msg)pyspark_1|cPickle.PicklingError:Couldnotserialize object:NotImplementedError:numpy()isonly available when eager executionisenabled.

你可能感兴趣的:(spark,python,pickle对象)