PYG实现GCN、GraphSAGE、GAT

from torch_geometric.datasets import Planetoid
import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, SAGEConv, GATConv


dataset = Planetoid(root='/tmp/Cora', name='Cora')
print(dataset)

class GCN_Net(torch.nn.Module):
    def __init__(self, features, hidden, classes):
        super().__init__()
        self.conv1 = GCNConv(features, hidden)
        self.conv2 = GCNConv(hidden, classes)

    def forward(self, data):
        #x, edge_index = dataset.x.to(device), dataset.edge_index.to(device)
        x, edge_index = data.x, data.edge_index       # 这里不应该是dataset.x, 因为dataset是初始的dataset, 转到cuda上的是data
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index)

        return F.log_softmax(x, dim=1)

class GraphSAGE_Net(torch.nn.Module):
    def __init__(self, features, hidden, classes):
        super().__init__()
        self.sage1 = SAGEConv(features, hidden)
        self.sage2 = SAGEConv(hidden, classes)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = self.sage1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.sage2(x, edge_index)

        return F.log_softmax(x, dim=1)
class GAT_Net(torch.nn.Module):
    def __init__(self, features, hidden, classes, heads=1):
        super().__init__()
        self.gat1 = GATConv(features, hidden, heads=heads)
        self.gat2 = GATConv(hidden*heads, classes)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = self.gat1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.gat2(x, edge_index)

        return F.log_softmax(x, dim=1)


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    GCN_model = GCN_Net(dataset.num_node_features, 16, dataset.num_classes).to(device)
    GraphSAGE_model = GraphSAGE_Net(dataset.num_node_features, 16, dataset.num_classes).to(device)
    GAT_model = GAT_Net(dataset.num_node_features, 16, dataset.num_classes, heads=4).to(device)
    data = dataset[0].to(device)
    models = [GCN_model, GraphSAGE_model, GAT_model]
    for model in models:
        optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
        model.train()
        for epoch in range(200):
            optimizer.zero_grad()
            out = model(data)
            # print("type(out):", type(out))
            # print("type(data.y)", type(data.y))
            loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
            loss.backward()
            optimizer.step()

        model.eval()
        pred = model(data).argmax(dim=1)
        correct = (pred[data.test_mask] == data.y[data.test_mask]).sum()
        acc = int(correct) / int(data.test_mask.sum())
        print(model, "的Acc:", acc)



"""
Cora()
GCN_Net(
  (conv1): GCNConv(1433, 16)
  (conv2): GCNConv(16, 7)
) 的Acc: 0.814
GraphSAGE_Net(
  (sage1): SAGEConv(1433, 16, aggr=mean)
  (sage2): SAGEConv(16, 7, aggr=mean)
) 的Acc: 0.792
GAT_Net(
  (gat1): GATConv(1433, 16, heads=4)
  (gat2): GATConv(64, 7, heads=1)
) 的Acc: 0.788
"""

你可能感兴趣的:(图神经网络,python,深度学习,pytorch)