Home > Software design >  How to get activation values of a layer in pytorch
How to get activation values of a layer in pytorch

Time:11-07

I have a pytorch-lightning model that has a dense layer like so:

def __init__(...)
...
self.dense = nn.Linear(channels[-1], 64, bias=True)
...

for my project, I need to get the activation values of this layer as a list

I have tried this code which I found on the pytorch discussion forum:

activation = {}

def get_activation(name):
    def hook(model, input, output):
        activation[name] = output.detach()
    return hook

test_img = cv.imread(f'digimage/100.jpg')
test_img = cv.resize(test_img, (128, 128))
test_img = np.moveaxis(test_img, 2, 0)

modelftr = load_feature_model(**model_dict)
num_ftrs = modelftr.fc.in_features
modelftr.fc = torch.nn.Linear(num_ftrs, 228)
modelftr.load_state_dict(torch.load('...'))
modelftr.dense.register_forward_hook(get_activation('dense'))

with torch.no_grad():
    modelatt.to('cpu')
    modelatt.eval()
    test_img = torch.tensor(test_img).view(-1, 3, 128, 128).float()
    output = modelcat(test_img)
    print(activation['dense'])

But this gives a keyerror:

      8 test_img = torch.tensor(test_img).view(-1, 3, 128, 128).float()
      9 output = modelcat(test_img)
---> 10 print(activation['dense'])

KeyError: 'dense'

Update:

This is my full model code. As you can see there is a linear layer named dense

class FAtNet(pl.LightningModule):
    def __init__(self, image_size, in_channels, num_blocks, channels,
                 num_classes=20, block_types=['C', 'C', 'T', 'T'], lr=0.0001, loss_function=nn.CrossEntropyLoss()):
        super().__init__()
        self.lr = lr
        self.loss_function = loss_function
        ih, iw = image_size
        block = {'C': MBConv, 'T': Transformer}

        self.s0 = self._make_layer(
            conv_3x3_bn, in_channels, channels[0], num_blocks[0], (ih // 2, iw // 2))
        self.s1 = self._make_layer(
            block[block_types[0]], channels[0], channels[1], num_blocks[1], (ih // 4, iw // 4))
        self.s2 = self._make_layer(
            block[block_types[1]], channels[1], channels[2], num_blocks[2], (ih // 8, iw // 8))
        self.s3 = self._make_layer(
            block[block_types[2]], channels[2], channels[3], num_blocks[3], (ih // 16, iw // 16))
        self.s4 = self._make_layer(
            block[block_types[3]], channels[3], channels[4], num_blocks[4], (ih // 32, iw // 32))

        self.pool = nn.AvgPool2d(ih // 32, 1)
        self.dense = nn.Linear(channels[-1], 64, bias=True)
        self.fc = nn.Linear(64, num_classes, bias=False)

    def forward(self, x):
        x = self.s0(x)
        x = self.s1(x)
        x = self.s2(x)
        x = self.s3(x)
        x = self.s4(x)

        x = self.pool(x).view(-1, x.shape[1])
        x = self.dense(x)
        x = self.fc(x)
        return x

    def _make_layer(self, block, inp, oup, depth, image_size):
        layers = nn.ModuleList([])
        for i in range(depth):
            if i == 0:
                layers.append(block(inp, oup, image_size, downsample=True))
            else:
                layers.append(block(oup, oup, image_size))
        return nn.Sequential(*layers)
    
    def configure_optimizers(self):
        return optim.Adam(self.parameters(), lr=self.lr)
    
    def training_step(self, batch, batch_idx):
        X, y = batch
        y_hat = self(X)
        loss = self.loss_function(y_hat, y)
        self.log('train_loss', loss)
        return loss
    
    def test_step(self, batch, batch_idx):
        X, y = batch
        y_hat = self(X)
        loss = self.loss_function(y_hat, y)
        self.log('test_loss', loss)
        return loss
      
    ### custom prediction function ###
    def predict(self, dm):
        X_test = dm.X_test

        self.eval()

        X_test = torch.tensor(X_test).float()
        self.to(device='cuda')

        pred = []
        
        with torch.no_grad():
            for data in X_test:
                output = self(data)
                pred.append(output)
        
        pred = pred[0].detach()

        pred = pred.cpu()

        self.to(device='cpu')
        self.train()

        return pred

def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

CodePudding user response:

It seems like you model does not have 'dense' layer, only 'fc'.
Try:

modelftr.fc.register_forward_hook(get_activation('fc'))
  • Related