keras中model.fit()出错,请帮忙看看

小李飞刀李寻欢 2019-10-14 04:36:00
ValueError: Tensor conversion requested dtype float32_ref for Tensor with dtype float32: 'Tensor("Adam/dense/kernel/m/Initializer/zeros:0", shape=(256, 513), dtype=float32)'

请教下这是啥情况啊?
model.fit(X_train,y_train,batch_size=batch_size,epochs=200)
这句出错了。
它说数据类型的问题,但是我整个过程都是tf.float32,我不知道咋就错了

完整错误如下:
ValueError Traceback (most recent call last)
<ipython-input-17-713fac5079f0> in <module>
----> 1 model.fit(X_train,y_train,batch_size=batch_size,epochs=200)

d:\python\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
778 validation_steps=validation_steps,
779 validation_freq=validation_freq,
--> 780 steps_name='steps_per_epoch')
781
782 def evaluate(self,

d:\python\lib\site-packages\tensorflow\python\keras\engine\training_arrays.py in model_iteration(model, inputs, targets, sample_weights, batch_size, epochs, verbose, callbacks, val_inputs, val_targets, val_sample_weights, shuffle, initial_epoch, steps_per_epoch, validation_steps, validation_freq, mode, validation_in_fit, prepared_feed_values_from_dataset, steps_name, **kwargs)
155
156 # Get step function and loop type.
--> 157 f = _make_execution_function(model, mode)
158 use_steps = is_dataset or steps_per_epoch is not None
159 do_validation = val_inputs is not None

d:\python\lib\site-packages\tensorflow\python\keras\engine\training_arrays.py in _make_execution_function(model, mode)
530 if model._distribution_strategy:
531 return distributed_training_utils._make_execution_function(model, mode)
--> 532 return model._make_execution_function(mode)
533
534

d:\python\lib\site-packages\tensorflow\python\keras\engine\training.py in _make_execution_function(self, mode)
2274 def _make_execution_function(self, mode):
2275 if mode == ModeKeys.TRAIN:
-> 2276 self._make_train_function()
2277 return self.train_function
2278 if mode == ModeKeys.TEST:

d:\python\lib\site-packages\tensorflow\python\keras\engine\training.py in _make_train_function(self)
2217 # Training updates
2218 updates = self.optimizer.get_updates(
-> 2219 params=self._collected_trainable_weights, loss=self.total_loss)
2220 # Unconditional updates
2221 updates += self.get_updates_for(None)

d:\python\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py in get_updates(self, loss, params)
495 if g is not None and v.dtype != dtypes.resource
496 ])
--> 497 return [self.apply_gradients(grads_and_vars)]
498
499 def _set_hyper(self, name, value):

d:\python\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py in apply_gradients(self, grads_and_vars, name)
433 _ = self.iterations
434 self._create_hypers()
--> 435 self._create_slots(var_list)
436
437 self._prepare(var_list)

d:\python\lib\site-packages\tensorflow\python\keras\optimizer_v2\adam.py in _create_slots(self, var_list)
143 # Separate for-loops to respect the ordering of slot variables from v1.
144 for var in var_list:
--> 145 self.add_slot(var, 'm')
146 for var in var_list:
147 self.add_slot(var, 'v')

d:\python\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py in add_slot(self, var, slot_name, initializer)
576 dtype=var.dtype,
577 trainable=False,
--> 578 initial_value=initial_value)
579 backend.track_variable(weight)
580 slot_dict[slot_name] = weight

d:\python\lib\site-packages\tensorflow\python\ops\variables.py in __call__(cls, *args, **kwargs)
259 return cls._variable_v1_call(*args, **kwargs)
260 elif cls is Variable:
--> 261 return cls._variable_v2_call(*args, **kwargs)
262 else:
263 return super(VariableMetaclass, cls).__call__(*args, **kwargs)

d:\python\lib\site-packages\tensorflow\python\ops\variables.py in _variable_v2_call(cls, initial_value, trainable, validate_shape, caching_device, name, variable_def, dtype, import_scope, constraint, synchronization, aggregation, shape)
253 synchronization=synchronization,
254 aggregation=aggregation,
--> 255 shape=shape)
256
257 def __call__(cls, *args, **kwargs):

d:\python\lib\site-packages\tensorflow\python\ops\variables.py in <lambda>(**kws)
234 shape=None):
235 """Call on Variable class. Useful to force the signature."""
--> 236 previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)
237 for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
238 previous_getter = _make_getter(getter, previous_getter)

d:\python\lib\site-packages\tensorflow\python\ops\variable_scope.py in default_variable_creator_v2(next_creator, **kwargs)
2542 synchronization=synchronization,
2543 aggregation=aggregation,
-> 2544 shape=shape)
2545
2546

d:\python\lib\site-packages\tensorflow\python\ops\variables.py in __call__(cls, *args, **kwargs)
261 return cls._variable_v2_call(*args, **kwargs)
262 else:
--> 263 return super(VariableMetaclass, cls).__call__(*args, **kwargs)
264
265

d:\python\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py in __init__(self, initial_value, trainable, collections, validate_shape, caching_device, name, dtype, variable_def, import_scope, constraint, distribute_strategy, synchronization, aggregation, shape)
458 synchronization=synchronization,
459 aggregation=aggregation,
--> 460 shape=shape)
461
462 def __repr__(self):

d:\python\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py in _init_from_args(self, initial_value, trainable, collections, caching_device, name, dtype, constraint, synchronization, aggregation, shape)
603 initial_value = ops.convert_to_tensor(
604 initial_value() if init_from_fn else initial_value,
--> 605 name="initial_value", dtype=dtype)
606 # Don't use `shape or initial_value.shape` since TensorShape has
607 # overridden `__bool__`.

d:\python\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor(value, dtype, name, preferred_dtype, dtype_hint)
1085 preferred_dtype = deprecation.deprecated_argument_lookup(
1086 "dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
-> 1087 return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
1088
1089

d:\python\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor_v2(value, dtype, dtype_hint, name)
1143 name=name,
1144 preferred_dtype=dtype_hint,
-> 1145 as_ref=False)
1146
1147

d:\python\lib\site-packages\tensorflow\python\framework\ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, ctx, accept_symbolic_tensors, accept_composite_tensors)
1222
1223 if ret is None:
-> 1224 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1225
1226 if ret is NotImplemented:

d:\python\lib\site-packages\tensorflow\python\framework\ops.py in _TensorTensorConversionFunction(t, dtype, name, as_ref)
1016 raise ValueError(
1017 "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
-> 1018 (dtype.name, t.dtype.name, str(t)))
1019 return t
1020


请大佬指点下,多谢
...全文
1447 1 打赏 收藏 转发到动态 举报
AI 作业
写回复
用AI写文章
1 条回复
切换为时间正序
请发表友善的回复…
发表回复
rgc 2021-03-10
  • 打赏
  • 举报
回复
请问最后怎么解决的?

37,743

社区成员

发帖
与我相关
我的任务
社区描述
JavaScript,VBScript,AngleScript,ActionScript,Shell,Perl,Ruby,Lua,Tcl,Scala,MaxScript 等脚本语言交流。
社区管理员
  • 脚本语言(Perl/Python)社区
  • WuKongSecurity@BOB
加入社区
  • 近7日
  • 近30日
  • 至今

试试用AI创作助手写篇文章吧