Home > Enterprise >  GCN model is not learning
GCN model is not learning

Time:11-28

I am trying to implement a GCN layer using tensorflow, but it is not learning. Can someone check what potential issue could be?

I have tried normalizing the adjacency matrix and even replaced it with identity so that the GCN layer becomes a simple MLP. But there is no change. I think, I have made some fundamental/silly mistake in my implementation which I am not able to find. Can someone let me know what the issue could be?

!pip install numpy
!pip install tensorflow
!pip install spektral
#!pip install tqdm

import numpy as np
import tensorflow as 
import spektral

def masked_cross_entropy_loss(  labels,logits, mask ):

  loss = tf.nn.softmax_cross_entropy_with_logits(labels,logits )
  mask = tf.cast(mask, dtype=tf.float32)
  # the step below is important, because we need to find mean of only masked nodes
  # dividing the mask by its mean = mask * total_nodes/total_masked nodes, comes in handy when we try to take the mean of the loss in the final step
  # the total number of nodes that are input to the function are cancelled out between two means of mask and loss
  # What remains is only the total number of masked nodes in denominator.
  mask /= tf.reduce_mean(mask)
  loss *= mask
  return tf.reduce_mean(loss)

def masked_accuracy( labels, logits, mask ):
  accuracy_array = tf.equal(tf.argmax(logits, axis=1), tf.argmax(labels, axis=1))
  accuracy_array = tf.cast(accuracy_array, dtype =tf.float32)
  mask = tf.cast(mask, dtype = tf.float32)
  mask/= tf.reduce_mean(mask)
  accuracy_array *= mask
  return tf.reduce_mean(accuracy_array) 

class GCNLayer:
  def __init__( self, A ):
    self.A = A
       
  
  def _transform( self, units, features, trans_func ):
    if trans_func == 'dense':
        features =  tf.keras.layers.Dense(units)( features )
        features = tf.cast(features, dtype=tf.float32)
        return features 
    else:
        raise Exception('Transformation function not implemented')

  def _aggregate( self, features, agg_func ):
    if agg_func == 'adj_matmul':
      return   self.A @ features 
    else:
      raise Exception('Aggregation function not implemented')

  def _activate( self, features, activation ):
      features =  tf.keras.layers.Activation(activation)( features)
      return features

  def __call__( self, units, features, trans_func='dense', agg_func = 'adj_matmul', activation='relu' ):
    features = self._transform(units, features, trans_func )
    features = self._aggregate(features, agg_func)
    if  activation is not None: 
      features =  self._activate(features, activation)
    return features

class MyModel:
  def __init__( self, A, node_features, node_labels, train_mask, val_mask, test_mask ):
    self.A = A
    self.node_features = node_features
    self.node_labels = node_labels
    self.train_mask = train_mask
    self.val_mask = val_mask
    self.test_mask = test_mask
    
    self.gcn_layer1 =  GCNLayer(self.A)
    self.gcn_layer2 =  GCNLayer(self.A)

  def __call__( self ):
    hidden_out = self.gcn_layer1(32, self.node_features, activation='relu'  )
    output  = self.gcn_layer2(7, hidden_out, activation=None) 
    return output

  def train( self, num_epochs=1, lr =0.01 ):
    optimizer = tf.keras.optimizers.Adam(lr)
    best_val_acc = 0.0
    for e in range(num_epochs):
      with tf.GradientTape() as t:
        logits = self()
        train_loss = masked_cross_entropy_loss( self.node_labels, logits, self.train_mask )
      
      variables = t.watched_variables()
      grads = t.gradient(train_loss, variables)
      optimizer.apply_gradients(zip(grads, variables))
      logits = self()
      train_acc = masked_accuracy(  self.node_labels, logits, self.train_mask )
      val_acc = masked_accuracy(  self.node_labels, logits, self.val_mask )
      if val_acc > best_val_acc:
        best_val_acc = val_acc
        print(f'epoch={e},Training Loss:{train_loss.numpy()},Training Accuracy:{train_acc.numpy()}, Validation Accuracy:{val_acc.numpy()}')

model = MyModel(A, node_features, node_labels, train_mask, val_mask, test_mask)
model.train(num_epochs=200, lr=0.01)

Output

epoch=0,Training Loss:4.099794864654541,Training Accuracy:0.1428571492433548, Validation Accuracy:0.09000000357627869
epoch=1,Training Loss:6.438627243041992,Training Accuracy:0.20714285969734192, Validation Accuracy:0.16599997878074646
epoch=5,Training Loss:5.980966091156006,Training Accuracy:0.17142857611179352, Validation Accuracy:0.17399999499320984
epoch=13,Training Loss:3.9486303329467773,Training Accuracy:0.15000000596046448, Validation Accuracy:0.2800000011920929
epoch=40,Training Loss:5.182331562042236,Training Accuracy:0.23571430146694183, Validation Accuracy:0.29600000381469727
epoch=158,Training Loss:6.245728969573975,Training Accuracy:0.2142857164144516, Validation Accuracy:0.3160000145435333

CodePudding user response:

Your model is learning but doesn't converge. Consider checking/adding data ,use simpler model, or tuning parameters while training (e.g: learning rate, batches size).

CodePudding user response:

I found the problem in my code. I had instantiated the tf.keras.Dense layer in the call function which was causing the weights to be initialized on every epoch confusing the GradientTape.

  • Related