class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.squeeze(y_true)
y_pred = tf.sign(y_pred)
y_pred=tf.reshape(y_pred,[-1])
self.true_positives.assign_add(tf.keras.backend.mean(tf.keras.backend.equal(y_true,
y_pred)))
def result(self):
return self.true_positives
def reset_states(self):
self.true_positives.assign(0)
def model_fn():
keras_model = create_keras_model()
return tff.learning.from_keras_model(keras_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.MSE,
metrics=[BinaryTruePositives()])
TypeError: Expected tensorflow.python.keras.losses.Loss or collections.abc.Sequence, found function.
CodePudding user response:
Some more of the stacktrace might be useful here, but I believe the issue in the code above is the fact that tf.keras.losses.MSE
is a function defining the loss logic, rather than an instance of tf.keras.losses.Loss
itself.
Looking at an old version of TFF, it seems you are hitting this line, though note that you'd get a different error with a newer version of TFF (I believe you'd hit this line instead).
You can fix this by passing
loss=tf.keras.losses.MeanSquaredError()
instead of the existing loss argument in your model_fn
above.
CodePudding user response:
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='Results', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true=tf.reshape(y_true, [16, 4])
y_true=tf.cast(y_true, dtype=tf.float32)
y_true = tf.squeeze(y_true)
print("before pred")
tf.print(y_pred)
y_pred = tf.sign(y_pred)
print("pred after sign")
tf.print(y_pred)
print("true")
tf.print(y_true)
equal_t = tf.equal(y_true, y_pred)
reduce_t = tf.reduce_all(equal_t, axis=1)
tf.print(reduce_t)
z=tf.keras.backend.mean(reduce_t)
tf.print(z)
self.true_positives.assign_add(z)
def result(self):
return self.true_positives
def reset_states(self):
self.true_positives.assign(0.)
It worked. but now it throws an error of shape. Even though I reshape
y_true.
Batch_size=16
After printing, it displays first batch size results and then the error.
[[-0.00392384036 0.0555611625 0.0308244769 -0.0159802306]
[-0.0485811867 -0.0572906882 -0.0325558931 0.00928601343]
[-0.0213074014 -0.0333432704 0.0229184721 -0.0973156691]
...
[-0.116143309 0.0813855231 -0.114974774 0.0727083907]
[0.00289174495 0.0473632365 -0.0516404174 -0.0565651]
[-0.043574594 -0.0561362281 0.0185791403 -0.0167554729]]
[[-1 1 1 -1]
[-1 -1 -1 1]
[-1 -1 1 -1]
...
[-1 1 -1 1]
[1 1 -1 -1]
[-1 -1 1 -1]]
[[1 -1 1 -1]
[1 1 1 1]
[-1 -1 1 1]
...
[1 1 1 1]
[-1 -1 1 1]
[-1 1 -1 1]]
[0 0 0 ... 0 0 0]
0.0625
InvalidArgumentError: [_Derived_] Incompatible shapes: [64] vs. [16,4]
[[{{node
Function call stack:
wrapped_function -> wrapped_function -> wrapped_function
I am trying for multi classifications and classes are defined as
z=[]
for i in range(len(y_train)):
print(I)
if y_train[i] == 0:
z.append([1, 1, 1, 1])
elif y_train[i] == 1:
z.append([-1, -1, -1, -1])
elif y_train[i] == 2:
z.append([1,-1, 1, -1])
elif y_train[i] == 3:
z.append([-1, 1, -1, 1])
elif y_train[i] == 4:
z.append([1, 1, -1, -1])
else:
z.append([-1, -1, 1, 1])