|
|
@ -28,25 +28,22 @@ class OptimizerAE(object): |
|
|
|
# tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_fake), logits=d_fake, name='gl')) |
|
|
|
# tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_fake), logits=d_fake, name='gl')) |
|
|
|
generator_loss = -self.dc_loss_fake |
|
|
|
generator_loss = -self.dc_loss_fake |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# pos_weight,允许人们通过向上或向下加权相对于负误差的正误差的成本来权衡召回率和精确度 |
|
|
|
# pos_weight,允许人们通过向上或向下加权相对于负误差的正误差的成本来权衡召回率和精确度 |
|
|
|
self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight)) |
|
|
|
self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight)) |
|
|
|
self.generator_loss = generator_loss + self.cost |
|
|
|
self.generator_loss = generator_loss + self.cost |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
all_variables = tf.trainable_variables() |
|
|
|
all_variables = tf.trainable_variables() |
|
|
|
dc_var = [var for var in all_variables if 'dc_' in var.name] |
|
|
|
dc_var = [var for var in all_variables if 'dc_' in var.name] |
|
|
|
en_var = [var for var in all_variables if 'e_' in var.name] |
|
|
|
en_var = [var for var in all_variables if 'e_' in var.name] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with tf.variable_scope(tf.get_variable_scope()): |
|
|
|
with tf.variable_scope(tf.get_variable_scope()): |
|
|
|
self.discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
self.discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
beta1=0.9, name='adam1').minimize(self.dc_loss, var_list=dc_var) #minimize(dc_loss_real, var_list=dc_var) |
|
|
|
beta1=0.9, name='adam1').minimize(self.dc_loss, |
|
|
|
|
|
|
|
var_list=dc_var) # minimize(dc_loss_real, var_list=dc_var) |
|
|
|
|
|
|
|
|
|
|
|
self.generator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
self.generator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
beta1=0.9, name='adam2').minimize(self.generator_loss, var_list=en_var) |
|
|
|
beta1=0.9, name='adam2').minimize(self.generator_loss, var_list=en_var) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 值得注意的是,这个地方,除了对抗优化之外, |
|
|
|
# 值得注意的是,这个地方,除了对抗优化之外, |
|
|
|
# 还单纯用cost损失又优化了一遍, |
|
|
|
# 还单纯用cost损失又优化了一遍, |
|
|
|
# 待会儿看训练的时候注意看是在哪部分进行的这部分优化操作 |
|
|
|
# 待会儿看训练的时候注意看是在哪部分进行的这部分优化操作 |
|
|
@ -57,7 +54,6 @@ class OptimizerAE(object): |
|
|
|
|
|
|
|
|
|
|
|
class OptimizerCycle(object): |
|
|
|
class OptimizerCycle(object): |
|
|
|
def __init__(self, preds, labels, pos_weight, norm, d_real, d_fake, GD_real, GD_fake, preds_z2g, labels_z2g, preds_cycle, labels_cycle, gradient, gradient_z): |
|
|
|
def __init__(self, preds, labels, pos_weight, norm, d_real, d_fake, GD_real, GD_fake, preds_z2g, labels_z2g, preds_cycle, labels_cycle, gradient, gradient_z): |
|
|
|
|
|
|
|
|
|
|
|
preds_sub = preds |
|
|
|
preds_sub = preds |
|
|
|
labels_sub = labels |
|
|
|
labels_sub = labels |
|
|
|
|
|
|
|
|
|
|
@ -94,7 +90,6 @@ class OptimizerCycle(object): |
|
|
|
# pos_weight,允许人们通过向上或向下加权相对于负误差的正误差的成本来权衡召回率和精确度 |
|
|
|
# pos_weight,允许人们通过向上或向下加权相对于负误差的正误差的成本来权衡召回率和精确度 |
|
|
|
self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight)) |
|
|
|
self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cost_cycle = norm * tf.reduce_mean(tf.square(preds_cycle - labels_cycle)) |
|
|
|
cost_cycle = norm * tf.reduce_mean(tf.square(preds_cycle - labels_cycle)) |
|
|
|
|
|
|
|
|
|
|
|
cost_z2g = norm * tf.reduce_mean(tf.square(preds_z2g - labels_z2g)) |
|
|
|
cost_z2g = norm * tf.reduce_mean(tf.square(preds_z2g - labels_z2g)) |
|
|
@ -104,17 +99,16 @@ class OptimizerCycle(object): |
|
|
|
self.generator_loss = generator_loss + self.cost |
|
|
|
self.generator_loss = generator_loss + self.cost |
|
|
|
self.generator_loss_z2g = generator_loss_z2g |
|
|
|
self.generator_loss_z2g = generator_loss_z2g |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
all_variables = tf.trainable_variables() |
|
|
|
all_variables = tf.trainable_variables() |
|
|
|
dc_var = [var for var in all_variables if 'dc_' in var.name] |
|
|
|
dc_var = [var for var in all_variables if 'dc_' in var.name] |
|
|
|
en_var = [var for var in all_variables if 'e_' in var.name] |
|
|
|
en_var = [var for var in all_variables if 'e_' in var.name] |
|
|
|
GG_var = [var for var in all_variables if 'GG' in var.name] |
|
|
|
GG_var = [var for var in all_variables if 'GG' in var.name] |
|
|
|
GD_var = [var for var in all_variables if 'GD' in var.name] |
|
|
|
GD_var = [var for var in all_variables if 'GD' in var.name] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with tf.variable_scope(tf.get_variable_scope()): |
|
|
|
with tf.variable_scope(tf.get_variable_scope()): |
|
|
|
self.discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
self.discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
beta1=0.9, name='adam1').minimize(self.dc_loss, var_list=dc_var) #minimize(dc_loss_real, var_list=dc_var) |
|
|
|
beta1=0.9, name='adam1').minimize(self.dc_loss, |
|
|
|
|
|
|
|
var_list=dc_var) # minimize(dc_loss_real, var_list=dc_var) |
|
|
|
|
|
|
|
|
|
|
|
self.generator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
self.generator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
beta1=0.9, name='adam2').minimize(self.generator_loss, var_list=en_var) |
|
|
|
beta1=0.9, name='adam2').minimize(self.generator_loss, var_list=en_var) |
|
|
@ -125,7 +119,6 @@ class OptimizerCycle(object): |
|
|
|
self.generator_optimizer_z2g = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
self.generator_optimizer_z2g = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, |
|
|
|
beta1=0.9, name='adam2').minimize(self.generator_loss_z2g, var_list=GG_var) |
|
|
|
beta1=0.9, name='adam2').minimize(self.generator_loss_z2g, var_list=GG_var) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 值得注意的是,这个地方,除了对抗优化之外, |
|
|
|
# 值得注意的是,这个地方,除了对抗优化之外, |
|
|
|
# 还单纯用cost损失又优化了一遍, |
|
|
|
# 还单纯用cost损失又优化了一遍, |
|
|
|
# 待会儿看训练的时候注意看是在哪部分进行的这部分优化操作 |
|
|
|
# 待会儿看训练的时候注意看是在哪部分进行的这部分优化操作 |
|
|
|