Removed extra scaling by batch size.

This commit is contained in:
William L Hamilton 2017-11-03 12:19:52 -07:00 committed by GitHub
parent 826e715cb1
commit 326710993f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -388,7 +388,6 @@ class SampleAndAggregate(GeneralizedModel):
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var) self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
self.loss = self.link_pred_layer.loss(self.outputs1, self.outputs2, self.neg_outputs) self.loss = self.link_pred_layer.loss(self.outputs1, self.outputs2, self.neg_outputs)
self.loss = self.loss / tf.cast(self.batch_size, tf.float32)
tf.summary.scalar('loss', self.loss) tf.summary.scalar('loss', self.loss)
def _accuracy(self): def _accuracy(self):