import tensorflow as tf
w = tf.constant(1.)
x = tf.constant(2.)
y = x*w
with tf.GradientTape() as tape:
tape.watch([w])
y2 = x*w
grad1 = tape.gradient(y,[w])
print(grad1)
結果為[None]
因為 y= x* w 沒有放在 with tf.GradientTape() as tape:中。所以無法計算.如果w 已經是tf.Variable類型,就不需要放在GradientType中了
import tensorflow as tf
w = tf.constant(1.)
x = tf.constant(2.)
y = x*w
with tf.GradientTape() as tape:
tape.watch([w])
y2 = x*w
grad2 = tape.gradient(y2,[w])
print(grad2)
結果 為:[<tf.Tensor: id=6, shape=(), dtype=float32, numpy=2.0>].
注意 [w] 中的w必須放在tape.watch()中.因為這個w不是tf.Variable型。
import tensorflow as tf
x = tf.random.normal([2,4])
w = tf.random.normal([4,3])
b = tf.zeros([3])
y = tf.constant([2,0])
with tf.GradientTape() as tape:
tape.watch([w,b])
logits = x@w + b
loss = tf.reduce_mean(tf.losses.categorical_crossentropy(tf.one_hot(y,depth=3),logits,from_logits = True))
grads = tape.gradient(loss,[w,b])
print(grads)
x =tf.random.normal([2,4])
w = tf.Variable(tf.random.normal([4,3]))
b = tf.Variable(tf.zeros([3]))
y = tf.constant([2,0])
with tf.GradientTape() as tape:
# tape.watch([w,b]) 注意 w,b 已經是 tf.Variable類型了。就不需要watch了。
logits = x@w + b
loss = tf.reduce_mean(tf.losses.categorical_crossentropy(tf.one_hot(y,depth=3),logits,from_logits = True))
grads = tape.gradient(loss,[w,b])
print(grads)
第三點:Persistent 參數.為True才可以連續求梯度.否則會報錯.
with tf.GradientTape( persistent = True) as tape:
第4點。二階求導:DMEO.一般很少用到。
