pytorch|图卷积神经网络(GCN)实战

本文基于pytorch框架实现2层的GCN,以空手道网络的Louvain算法结果作为标签,训练得到准确率。

import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import community as community_louvain
import torch

import torch_geometric
from torch_geometric.data import InMemoryDataset, Data


G = nx.karate_club_graph()


x = torch.eye(G.number_of_nodes(), dtype=torch.float)
adj = nx.to_scipy_sparse_matrix(G).tocoo()
row = torch.from_numpy(adj.row.astype(np.int64)).to(torch.long)
col = torch.from_numpy(adj.col.astype(np.int64)).to(torch.long)
edge_index = torch.stack([row, col], dim=0)

# Compute communities.
partition = community_louvain.best_partition(G)
print(set(partition.values()))
y = torch.tensor([partition[i] for i in range(G.number_of_nodes())])
print(y)
# Select a single training node for each community
# (we just use the first one).
train_mask = torch.zeros(y.size(0), dtype=torch.bool)
for i in range(int(y.max()) + 1):
    train_mask[(y == i).nonzero(as_tuple=False)[0]] = True

data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask)
remaining = (~data.train_mask).nonzero(as_tuple=False).view(-1)
remaining = remaining[torch.randperm(remaining.size(0))]
data.test_mask = torch.zeros(y.size(0), dtype=torch.bool)
data.test_mask.fill_(False)
data.test_mask[remaining[:]] = True

import torch.nn.functional as F
from torch_geometric.nn import GCNConv # GCN

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(data.num_node_features, 16) # 第一层输入维度,输出维度为16
        self.conv2 = GCNConv(16, 4) # 第二层输入维度 16, 输出维度 34

    def forward(self):
        x, edge_index = data.x, data.edge_index

        x = self.conv1(x, edge_index) 
        x = F.relu(x) # 激活函数 ReLU
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index)

        return F.log_softmax(x, dim=1)

    
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # 本电脑只有一个GPU 要改为 0
print(device)
model, data = Net().to(device), data.to(device)
# 优化器
optimizer = torch.optim.Adam([ 
    dict(params=model.conv1.parameters(), weight_decay=5e-4),
    dict(params=model.conv2.parameters(), weight_decay=0)
], lr=0.001)  # Only perform weight-decay on first convolution.

def train():
    optimizer.zero_grad()  
    out = model()
    loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
    loss.backward()
    optimizer.step()
    return loss
    
def test():
    model.eval()
    logits, accs = model(), []
    for _, mask in data('train_mask', 'test_mask'):
        pred = logits[mask].max(1)[1]
        acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
        accs.append(acc)
    return accs

for epoch in range(1, 1001):
    loss = train()
    log = 'Epoch: {:04d}, Train: {:.4f}, Test: {:.4f}, Loss: {:.4f}'
    a = log.format(epoch, *test(), loss)
    print(a)

结果:
pytorch|图卷积神经网络(GCN)实战_第1张图片

你可能感兴趣的:(深度学习,图神经网络,pytorch,深度学习,GCN)