Home > Net >  TypeError: Fetch argument None has invalid type <class 'NoneType'>, Tensor passing p
TypeError: Fetch argument None has invalid type <class 'NoneType'>, Tensor passing p

Time:03-05

I encapsulated a GCN network with extract_feature.py, but when I called the method in main.py to try get the output of the GCN network in extract_feature.py, it reported the same error as the title.

The extract_feature.py file is as follows:

import tensorflow as tf

import numpy as np import networkx as nx

class Feature_Extract:

def __init__(self, config, env, networkServices):
    self.config = config
    self.num_vnfs = config.num_vnfs
    self.vnf_properties = env.vnf_properties
    self.vnf_bandwidth = env.vnf_bandwidth
    self.containers = networkServices.cells.reshape(config.batch_size, config.high_index, config.max_length)
    self.container_length = networkServices.container_length
    self.batch_length = networkServices.batch_length
    self.relations = networkServices.relations
    self.features = None
    self.gcn_constant = None
    self.H_0 = None

def convert(self):

    for batch in range(self.config.batch_size):
        batch_feature = None
        for slice_id in range(self.batch_length[batch]):
            length = self.container_length[batch][slice_id]
            container = self.containers[batch][slice_id][:length]
            relation = self.relations[batch][slice_id]
            relation = np.array(relation) - 1
            graph = self.list2graph(relation=relation)

            gcn_out = self.gcn_model(graph, container)

            if batch_feature is not None:
                batch_feature = tf.concat([batch_feature, gcn_out], axis=0)
            else:
                batch_feature = gcn_out

        batch_feature = tf.pad(batch_feature, [[0, 10 * (self.config.high_index - self.batch_length[batch] - 1)], [0, 0]])  # 每一批长度不一,padding到统一长度进行合并
        if self.features is not None:
            batch_feature = tf.expand_dims(batch_feature, axis=0)
            self.features = tf.concat([self.features, batch_feature], axis=0)
        else:
            batch_feature = tf.expand_dims(batch_feature, axis=0)
            self.features = batch_feature

    return self.features

def list2graph(self, relation):
    graph = nx.Graph()
    graph.add_nodes_from(np.array(range(self.num_vnfs)))   # 添加所有vnf,把feature与vnf位置对应, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 10类VNF
    graph.add_edges_from(relation)

    return graph

def container_features(self, graph, container):
    cpu_list = [0 for _ in range(self.num_vnfs)]
    mem_list = [0 for _ in range(self.num_vnfs)]
    sto_list = [0 for _ in range(self.num_vnfs)]
    for node in container:
        cpu_list[node - 1] = (self.vnf_properties[node]["cpu_request"])
        mem_list[node - 1] = (self.vnf_properties[node]["memory_request"])
        sto_list[node - 1] = (self.vnf_properties[node]["storage_request"])

    bandwidth = np.zeros((self.num_vnfs, self.num_vnfs))
    for e1, e2 in graph.edges():
        bandwidth[e1 - 1][e2 - 1] = self.vnf_bandwidth[e1][e2]

    H_0 = 0.1 * bandwidth   np.diag(cpu_list)   0.1 * np.diag(mem_list)   0.01 * np.diag(sto_list)

    return H_0

def get_graph_matrix(self, graph, container):
    """
                Get the correlation matrix of graph network
                *******************************************
                H^l 1 = sigma(\widetilde{D}^(-1/2)*\widetilde{A}*\widetilde{D}^(-1/2)*H^l*W^l)
                *******************************************
                H^l: Input features of layer i
                H^l 1: Output features of layer i
                sigma: Activation function
                A: Adjacency matrix of graph network
                \widetilde{A}: A I, I is a identity matrix
                \widetilde{D}: Degree matrix of \widetilde{A}
                W^l: Parameter matrix
            """
    A = nx.adj_matrix(graph)
    A_tilde = A   np.identity(n=A.shape[0])
    D = np.squeeze(np.sum(np.array(A_tilde), axis=1))
    D_tilde_inv_sqrt = np.power(D, -1 / 2)
    D_tilde = np.diag(D_tilde_inv_sqrt)
    H_0 = self.container_features(graph, container)

    return np.dot(np.dot(D_tilde, A_tilde), D_tilde), H_0

def gcn_model(self, graph, container):

    gcn_constant, H_0 = self.get_graph_matrix(graph, container)

    self.gcn_constant = gcn_constant
    self.H_0 = H_0


    with tf.variable_scope('gcn', reuse=tf.AUTO_REUSE):
        wc1 = tf.get_variable('wc1', [self.config.num_vnfs, self.config.gcn_layer[0]], dtype=tf.float64, initializer=tf.contrib.layers.xavier_initializer())
        wc2 = tf.get_variable('wc2', [self.config.gcn_layer[0], self.config.gcn_layer[1]], dtype=tf.float64, initializer=tf.contrib.layers.xavier_initializer())
        wc3 = tf.get_variable('wc3', [self.config.gcn_layer[1], self.config.gcn_layer[2]], dtype=tf.float64, initializer=tf.contrib.layers.xavier_initializer())

        # GCN embedding
        fc1 = tf.matmul(tf.matmul(H_0, gcn_constant), wc1)
        fc1 = tf.nn.relu(fc1)

        fc2 = tf.matmul(tf.matmul(gcn_constant, fc1), wc2)
        fc2 = tf.nn.relu(fc2)

        fc3 = tf.matmul(tf.matmul(gcn_constant, fc2), wc3)
        fc3 = tf.nn.relu(fc3)

        fc4 = tf.layers.dense(inputs=fc3, units=10, activation=tf.nn.relu)

        den_out = tf.pad(fc4, [[0, self.config.max_length - self.num_vnfs],
                               [0, self.config.max_length - self.num_vnfs]])  # [max_length, embeddings]
        gcn_out = tf.cast(den_out, dtype=tf.float32)

    return gcn_out

The way I get the self.feature is:

features = sess.run(extractor.features)

I also tried another way to get the GCN output:

features = extractor.convert()
features = sess.run(features)

But it reported another error:

FailedPreconditionError (see above for traceback): Attempting to use uninitialized value gcn/wc1

But I have done the following initialization in main.py:

sess.run(tf.global_variables_initializer())

Help me PLZ.

CodePudding user response:

  • Related