如果你也被数学公式劝退的话可以康康这篇讲解
主要的公式如下, A i j A_{ij} Aij是表示图的邻接矩阵
ASGCN论文中引用的图卷积神经网络模块代码如下:
class GraphConvolution(nn.Module):
"""
基于图卷积神经网络
"""
def __init__(self,in_features,out_features,bias=True):
super(GraphConvolution,self).__init__()
self.in_features=in_features
self.out_features=out_features
self.weight=nn.Parameter(torch.FloatTensor(in_features,out_features))#定义需要学习的参数矩阵的形状
if bias:
self.bias=nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter("bias",None)#没有偏置参数
def forward(self,text,adj):
text=torch.tensor(text,dtype=torch.float32).to(self.weight)#就是说text张量的type以及device同weight
hidden=torch.matmul(text,self.weight)# text*W:可以是高维的
denom=torch.sum(adj,dim=2,keepdim=True)+1
output=torch.matmul(adj,hidden)/denom
if self.bias is not None:
return output+self.bias
else:
return output
对比一下RGAT论文附带的GCN代码:
class GCN(nn.Module):
"""
GCN module operated on graphs
"""
def __init__(self, args, in_dim, mem_dim, num_layers):
super(GCN, self).__init__()
self.args = args
self.in_dim = in_dim
self.num_layers = num_layers
self.dropout = nn.Dropout(args.gcn_dropout)
# gcn layer
self.W = nn.ModuleList()
for layer in range(num_layers):
input_dim = self.in_dim if layer == 0 else mem_dim
self.W.append(nn.Linear(input_dim, mem_dim))
def conv_l2(self):
conv_weights = []
for w in self.W:
conv_weights += [w.weight, w.bias]
return sum([x.pow(2).sum() for x in conv_weights])
def forward(self, adj, feature):
# gcn layer
denom = adj.sum(2).unsqueeze(2) + 1
mask = (adj.sum(2) + adj.sum(1)).eq(0).unsqueeze(2)
for l in range(self.num_layers):
Ax = adj.bmm(feature)
AxW = self.W[l](Ax)
AxW = AxW + self.W[l](feature) # self loop
AxW /= denom
# gAxW = F.relu(AxW)
gAxW = AxW
feature = self.dropout(gAxW) if l < self.num_layers - 1 else gAxW
return feature, mask
这个坑明天和这周之后的时间再填~
总算把实验要用的矩阵算出来了(大概叭,好费时间555,还好教室限网比实验室晚一点
关于跑通源码的截图: