To fit this into the fabric of Keras, I created a custom layer and called it 'Recover'. It is not triggered while the model is trained, however it serves as a faux input layer for the reverse training step. (I'm sure the approach can be optimised further, but that's the quickest hack I could come up with).
class Recover(Layer):
def __init__(self, ** kwargs):
super(Recover, self).__init__( ** kwargs)
self.reverse = False
def build(self, input_shape):
self.kernel = self.add_weight(name = 'kernel',
shape = (input_shape[1], ),
initializer = 'uniform',
trainable = True)
super(Recover, self).build(input_shape)
def call(self, x):
if self.reverse:
return K.ones_like(x) * self.kernel
else:
return x
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[0])
Something along these lines:
for x in model.layers:
x.trainable = False
model.layers[0].trainable = True
model.layers[0].reverse = True
model.compile(...your compile parameters...)
Assuming your input data is in the shape (as an example):
n_obs, n_feat = 1000, 20 n_labels = 2 n_hidden = 100 # you can make this whatever you want
You would usually design the input layer like:
Dense(input_dim = n_feat, output_dim = n_hidden)
And the output layer like:
Dense(input_dim = n_hidden, output_dim = n_labels)
New output layer:
Dense(input_dim = n_hidden, output_dim = n_feat)
keras.backend.concatenate() , keras.backend.reshape() , keras.backend.cast() , keras.backend.shape()
def _backward(gamma, mask):
''
'Backward recurrence of the linear chain crf.'
''
gamma = K.cast(gamma, 'int32')
def _backward_step(gamma_t, states):
y_tm1 = K.squeeze(states[0], 0)
y_t = batch_gather(gamma_t, y_tm1)
return y_t, [K.expand_dims(y_t, 0)]
initial_states = [K.expand_dims(K.zeros_like(gamma[: , 0, 0]), 0)]
_, y_rev, _ = K.rnn(_backward_step,
gamma,
initial_states,
go_backwards = True)
y = K.reverse(y_rev, 1)
if mask is not None:
mask = K.cast(mask, dtype = 'int32')
# mask output
y *= mask
# set masked values to - 1
y += -(1 - mask)
return y
def _backward(gamma, mask):
""
"Backward recurrence of the linear chain crf."
""
gamma = K.cast(gamma, 'int32')
def _backward_step(gamma_t, states):
y_tm1 = K.squeeze(states[0], 0)
y_t = batch_gather(gamma_t, y_tm1)
return y_t, [K.expand_dims(y_t, 0)]
initial_states = [K.expand_dims(K.zeros_like(gamma[: , 0, 0]), 0)]
_, y_rev, _ = K.rnn(_backward_step,
gamma,
initial_states,
go_backwards = True)
y = K.reverse(y_rev, 1)
if mask is not None:
mask = K.cast(mask, dtype = 'int32')
# mask output
y *= mask
# set masked values to - 1
y += -(1 - mask)
return y
def call(self, x):
shape = K.shape(x)
x = K.reverse(x, axes = 1) # reverse, so that frameness is related to fixed point
frame_1 = tf.gather(x, K.arange(start = 0, stop = shape[1], step = 3), axis = 1)
frame_2 = tf.gather(x, K.arange(start = 1, stop = shape[1], step = 3), axis = 1)
frame_3 = tf.gather(x, K.arange(start = 2, stop = shape[1], step = 3), axis = 1)
return [frame_1, frame_2, frame_3]
def map_charades(y_true, y_pred): "" " Returns mAP "" " m_aps = [] tf_one = tf.constant(1, dtype = tf.float32) n_classes = y_pred.shape[1] for oc_i in range(n_classes): pred_row = y_pred[: , oc_i] sorted_idxs = tf_framework.argsort(-pred_row) true_row = y_true[: , oc_i] true_row = tf.map_fn(lambda i: true_row[i], sorted_idxs, dtype = np.float32) tp_poolean = tf.equal(true_row, tf_one) tp = tf.cast(tp_poolean, dtype = np.float32) fp = K.reverse(tp, axes = 0) n_pos = tf.reduce_sum(tp) f_pcs = tf.cumsum(fp) t_pcs = tf.cumsum(tp) s = f_pcs + t_pcs s = tf.cast(s, tf.float32) t_pcs = tf.cast(t_pcs, tf.float32) tp_float = tf.cast(tp_poolean, np.float32) prec = t_pcs / s avg_prec = prec * tp_float n_pos = tf.cast(n_pos, tf.float32) avg_prec = avg_prec / n_pos avg_prec = tf.expand_dims(avg_prec, axis = 0) m_aps.append(avg_prec) m_aps = K.concatenate(m_aps, axis = 0) mAP = K.mean(m_aps) return mAP # endregion # region Callbacks
def call(self, x, mask = None):
import theano.tensor as T
newx = T.sort(x)
#response = K.reverse(newx, axes = 1)
#response = K.sum(x > 0.5, axis = 1) / self.k
return newx
#response = K.reshape(newx, [-1, 1])
#return K.concatenate([1 - response, response], axis = self.label)
#response = K.reshape(x[: , self.axis], (-1, 1))
#return K.concatenate([1 - response, response], axis = self.axis)
#e = K.exp(x - K.max(x, axis = self.axis, keepdims = True))
#s = K.sum(e, axis = self.axis, keepdims = True)
#return e / s
def call(self, x, mask = None):
newx = K.sort(x)
#response = K.reverse(newx, axes = 1)
#response = K.sum(x > 0.5, axis = 1) / self.k
return K.concatenate([newx[: ,: self.softmink], newx[: , newx.shape[1] - self.softmaxk: ]], axis = -1)
#response = K.reshape(newx, [-1, 1])
#return K.concatenate([1 - response, response], axis = self.label)
#response = K.reshape(x[: , self.axis], (-1, 1))
#return K.concatenate([1 - response, response], axis = self.axis)
#e = K.exp(x - K.max(x, axis = self.axis, keepdims = True))
#s = K.sum(e, axis = self.axis, keepdims = True)
#return e / s
Last updated 2022-06-28 UTC.
View aliases
Compat aliases for migration
See Migration guide for more details.
tf.compat.v1.manip.reverse
, tf.compat.v1.reverse
, tf.compat.v1.reverse_v2
tf.reverse( tensor, axis, name = None )
For example:
# tensor 't' is[[ [ [0, 1, 2, 3], #[4, 5, 6, 7], #[8, 9, 10, 11] ], #[[12, 13, 14, 15], #[16, 17, 18, 19], #[20, 21, 22, 23]] ]] # tensor 't' shape is[1, 2, 3, 4] # 'dims' is[3] or 'dims' is[-1] reverse(t, dims) == > [ [ [ [3, 2, 1, 0], [7, 6, 5, 4], [11, 10, 9, 8] ], [ [15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20] ] ] ] # 'dims' is '[1]'(or 'dims' is '[-3]') reverse(t, dims) == > [ [ [ [12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11] ] ] ] # 'dims' is '[2]'(or 'dims' is '[-2]') reverse(t, dims) == > [ [ [ [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3] ] [ [20, 21, 22, 23], [16, 17, 18, 19], [12, 13, 14, 15] ] ] ]