Home > Mobile >  Can't call numpy() on Tensor that requires grad. Use tensor.detach().numpy() instead
Can't call numpy() on Tensor that requires grad. Use tensor.detach().numpy() instead

Time:09-17

This is my second question on this problem. Initially, I had the AttributeError error: 'numpy.ndarray' object has no attribute 'log'. Then U12-Forward helped me solve it. But a new problem has arisen.

import torch
import numpy as np
import matplotlib.pyplot as plt

x = torch.tensor([[5., 10.],
                  [1., 2.]], requires_grad=True)
var_history = []
fn_history = []
alpha = 0.001
optimizer = torch.optim.SGD([x], lr=alpha)

def function_parabola(variable):
    return np.prod(np.log(np.log(variable   7)))


def make_gradient_step(function, variable):
    function_result = function(variable)
    function_result.backward()
    optimizer.step()
    optimizer.zero_grad()


for i in range(500):
    var_history.append(x.data.numpy().copy())
    fn_history.append(function_parabola(x).data.cpu().detach().numpy().copy())
    make_gradient_step(function_parabola, x)
print(x)
def show_contours(objective,
                  x_lims=[-10.0, 10.0],
                  y_lims=[-10.0, 10.0],
                  x_ticks=100,
                  y_ticks=100):
    x_step = (x_lims[1] - x_lims[0]) / x_ticks
    y_step = (y_lims[1] - y_lims[0]) / y_ticks
    X, Y = np.mgrid[x_lims[0]:x_lims[1]:x_step, y_lims[0]:y_lims[1]:y_step]
    res = []
    for x_index in range(X.shape[0]):
        res.append([])
        for y_index in range(X.shape[1]):
            x_val = X[x_index, y_index]
            y_val = Y[x_index, y_index]
            res[-1].append(objective(np.array([[x_val, y_val]]).T))
    res = np.array(res)
    plt.figure(figsize=(7,7))
    plt.contour(X, Y, res, 100)
    plt.xlabel('$x_1$')
    plt.ylabel('$x_2$')
show_contours(function_parabola)
plt.scatter(np.array(var_history)[:,0], np.array(var_history)[:,1], s=10, c='r');
plt.show()

CodePudding user response:

Modify function_parabola() to operate on PyTorch tensors and leverage PyTorch equivalents of the original numpy operations, like so:

import torch
import numpy as np
import matplotlib.pyplot as plt

x = torch.tensor([[5., 10.],
                  [1., 2.]], requires_grad=True)
var_history = []
fn_history = []
alpha = 0.001
optimizer = torch.optim.SGD([x], lr=alpha)

def function_parabola(variable):
    return (torch.prod(torch.log(torch.log(torch.as_tensor(variable   7)))))


def make_gradient_step(function, variable):
    function_result = function(variable)
    function_result.backward()
    optimizer.step()
    optimizer.zero_grad()


for i in range(500):
    var_history.append(x.data.numpy().copy())
    fn_history.append(function_parabola(x).data.cpu().detach().numpy())
    make_gradient_step(function_parabola, x)
print(x)
def show_contours(objective,
                  x_lims=[-10.0, 10.0],
                  y_lims=[-10.0, 10.0],
                  x_ticks=100,
                  y_ticks=100):
    x_step = (x_lims[1] - x_lims[0]) / x_ticks
    y_step = (y_lims[1] - y_lims[0]) / y_ticks
    X, Y = np.mgrid[x_lims[0]:x_lims[1]:x_step, y_lims[0]:y_lims[1]:y_step]
    res = []
    for x_index in range(X.shape[0]):
        res.append([])
        for y_index in range(X.shape[1]):
            x_val = X[x_index, y_index]
            y_val = Y[x_index, y_index]
            res[-1].append(objective(np.array([[x_val, y_val]]).T))
    res = np.array(res)
    plt.figure(figsize=(7,7))
    plt.contour(X, Y, res, 100)
    plt.xlabel('$x_1$')
    plt.ylabel('$x_2$')
show_contours(function_parabola)
plt.scatter(np.array(var_history)[:,0], np.array(var_history)[:,1], s=10, c='r');

plt.show()

  • Related