import torch
from torch_geometric.data import Data
x = torch.tensor([[2,1],[5,6],[3,7],[12,0]], dtype=troch.float)
y = torch.tensor([0,1,0,1], dtype=torch.float)#与节点对应顺序无关,顺序怎么写都性
edge_index = torch.tensor([[0,1,2,0,3],[1,0,1,3,2]], dtype=torch.long)
data = Data(x=x, y=y, edge_index=edge_index)
#torch_geometric/data/dataset.pyfrom typing import List, Optional, Callable, Union, Any, Tuple
import sys
import re
import copy
import warnings
import numpy as np
import os.path as osp
from collections.abc import Sequence
import torch.utils.data
from torch import Tensor
from torch_geometric.data import Data
from torch_geometric.data.makedirs import makedirs
IndexType = Union[slice, Tensor, np.ndarray, Sequence]classDataset(torch.utils.data.Dataset):r"""Dataset base class for creating graph datasets.
See `here `__ for the accompanying tutorial.
Args:
root (string, optional): Root directory where the dataset should be
saved. (optional: :obj:`None`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""@propertydefraw_file_names(self)-> Union[str, List[str], Tuple]:r"""The name of the files in the :obj:`self.raw_dir` folder that must
be present in order to skip downloading."""raise NotImplementedError
@propertydefprocessed_file_names(self)-> Union[str, List[str], Tuple]:r"""The name of the files in the :obj:`self.processed_dir` folder that
must be present in order to skip processing."""raise NotImplementedError
defdownload(self):r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""raise NotImplementedError
defprocess(self):r"""Processes the dataset to the :obj:`self.processed_dir` folder."""raise NotImplementedError
deflen(self)->int:r"""Returns the number of graphs stored in the dataset."""raise NotImplementedError
defget(self, idx:int)-> Data:r"""Gets the data object at index :obj:`idx`."""raise NotImplementedError
def__init__(self, root: Optional[str]=None,
transform: Optional[Callable]=None,
pre_transform: Optional[Callable]=None,
pre_filter: Optional[Callable]=None):super().__init__()ifisinstance(root,str):
root = osp.expanduser(osp.normpath(root))
self.root = root
self.transform = transform
self.pre_transform = pre_transform
self.pre_filter = pre_filter
self._indices: Optional[Sequence]=Noneif'download'in self.__class__.__dict__:
self._download()if'process'in self.__class__.__dict__:
self._process()defindices(self)-> Sequence:returnrange(self.len())if self._indices isNoneelse self._indices
@propertydefraw_dir(self)->str:return osp.join(self.root,'raw')@propertydefprocessed_dir(self)->str:return osp.join(self.root,'processed')@propertydefnum_node_features(self)->int:r"""Returns the number of features per node in the dataset."""
data = self[0]
data = data[0]ifisinstance(data,tuple)else data
ifhasattr(data,'num_node_features'):return data.num_node_features
raise AttributeError(f"'{data.__class__.__name__}' object has no "f"attribute 'num_node_features'")@propertydefnum_features(self)->int:r"""Returns the number of features per node in the dataset.
Alias for :py:attr:`~num_node_features`."""return self.num_node_features
@propertydefnum_edge_features(self)->int:r"""Returns the number of features per edge in the dataset."""
data = self[0]
data = data[0]ifisinstance(data,tuple)else data
ifhasattr(data,'num_edge_features'):return data.num_edge_features
raise AttributeError(f"'{data.__class__.__name__}' object has no "f"attribute 'num_edge_features'")@propertydefraw_paths(self)-> List[str]:r"""The absolute filepaths that must be present in order to skip
downloading."""
files = to_list(self.raw_file_names)return[osp.join(self.raw_dir, f)for f in files]@propertydefprocessed_paths(self)-> List[str]:r"""The absolute filepaths that must be present in order to skip
processing."""
files = to_list(self.processed_file_names)return[osp.join(self.processed_dir, f)for f in files]def_download(self):if files_exist(self.raw_paths):# pragma: no coverreturn
makedirs(self.raw_dir)
self.download()def_process(self):
f = osp.join(self.processed_dir,'pre_transform.pt')if osp.exists(f)and torch.load(f)!= _repr(self.pre_transform):
warnings.warn(f"The `pre_transform` argument differs from the one used in "f"the pre-processed version of this dataset. If you want to "f"make use of another pre-processing technique, make sure to "f"sure to delete '{self.processed_dir}' first")
f = osp.join(self.processed_dir,'pre_filter.pt')if osp.exists(f)and torch.load(f)!= _repr(self.pre_filter):
warnings.warn("The `pre_filter` argument differs from the one used in the ""pre-processed version of this dataset. If you want to make ""use of another pre-fitering technique, make sure to delete ""'{self.processed_dir}' first")if files_exist(self.processed_paths):# pragma: no coverreturnprint('Processing...',file=sys.stderr)
makedirs(self.processed_dir)
self.process()
path = osp.join(self.processed_dir,'pre_transform.pt')
torch.save(_repr(self.pre_transform), path)
path = osp.join(self.processed_dir,'pre_filter.pt')
torch.save(_repr(self.pre_filter), path)print('Done!',file=sys.stderr)def__len__(self)->int:r"""The number of examples in the dataset."""returnlen(self.indices())def__getitem__(
self,
idx: Union[int, np.integer, IndexType],)-> Union['Dataset', Data]:r"""In case :obj:`idx` is of type integer, will return the data object
at index :obj:`idx` (and transforms it in case :obj:`transform` is
present).
In case :obj:`idx` is a slicing object, *e.g.*, :obj:`[2:5]`, a list, a
tuple, or a :obj:`torch.Tensor` or :obj:`np.ndarray` of type long or
bool, will return a subset of the dataset at the specified indices."""if(isinstance(idx,(int, np.integer))or(isinstance(idx, Tensor)and idx.dim()==0)or(isinstance(idx, np.ndarray)and np.isscalar(idx))):
data = self.get(self.indices()[idx])
data = data if self.transform isNoneelse self.transform(data)return data
else:return self.index_select(idx)defindex_select(self, idx: IndexType)->'Dataset':r"""Creates a subset of the dataset from specified indices :obj:`idx`.
Indices :obj:`idx` can be a slicing object, *e.g.*, :obj:`[2:5]`, a
list, a tuple, or a :obj:`torch.Tensor` or :obj:`np.ndarray` of type
long or bool."""
indices = self.indices()ifisinstance(idx,slice):
indices = indices[idx]elifisinstance(idx, Tensor)and idx.dtype == torch.long:return self.index_select(idx.flatten().tolist())elifisinstance(idx, Tensor)and idx.dtype == torch.bool:
idx = idx.flatten().nonzero(as_tuple=False)return self.index_select(idx.flatten().tolist())elifisinstance(idx, np.ndarray)and idx.dtype == np.int64:return self.index_select(idx.flatten().tolist())elifisinstance(idx, np.ndarray)and idx.dtype == np.bool:
idx = idx.flatten().nonzero()[0]return self.index_select(idx.flatten().tolist())elifisinstance(idx, Sequence)andnotisinstance(idx,str):
indices =[indices[i]for i in idx]else:raise IndexError(f"Only slices (':'), list, tuples, torch.tensor and "f"np.ndarray of dtype long or bool are valid indices (got "f"'{type(idx).__name__}')")
dataset = copy.copy(self)
dataset._indices = indices
return dataset
defshuffle(
self,
return_perm:bool=False,)-> Union['Dataset', Tuple['Dataset', Tensor]]:r"""Randomly shuffles the examples in the dataset.
Args:
return_perm (bool, optional): If set to :obj:`True`, will also
return the random permutation used to shuffle the dataset.
(default: :obj:`False`)
"""
perm = torch.randperm(len(self))
dataset = self.index_select(perm)return(dataset, perm)if return_perm isTrueelse dataset
def__repr__(self)->str:
arg_repr =str(len(self))iflen(self)>1else''returnf'{self.__class__.__name__}({arg_repr})'defto_list(value: Any)-> Sequence:ifisinstance(value, Sequence)andnotisinstance(value,str):return value
else:return[value]deffiles_exist(files: List[str])->bool:# NOTE: We return `False` in case `files` is empty, leading to a# re-processing of files on every instantiation.returnlen(files)!=0andall([osp.exists(f)for f in files])def_repr(obj: Any)->str:if obj isNone:return'None'return re.sub('(<.*?)\\s.*(>)',r'\1\2', obj.__repr__())
目前大多数的注意力模型附着在Encoder-Decoder框架下,当然,器注意力模型可以看作一种通用的思想,本身并不依赖于特定框架。下图是文本处理领域里常用的Encoder-Decoder框架最抽象的一种表示。 文本处理领域的Encoder-Decoder框架可以看作适合处理由一个句子(或篇章)生成另外一个句子(或篇章)的通用处理模型。对于句子对,目标是给定输入句子Source,期待通过Encoder-Decoder框架来生成目标句子Target。Source和Target可以是同一种语言,也可以是两种不同的语言。Source和Target分别由各自的单词序列构成: Encoder对输入句子Source进行编码,将输入句子通过非线性变换转化为中间语义表示C: 解码器根据句子Source的中间语义表示C和之前已经生成的历史信息 y 1 , y 2 . . . y i − 1 y_1,y_2...y_{i-1} y1,y2...yi−1来生成 i i i时刻要生成的单词 y i y_i yi: 每个 y i y_i yi都依次产生,看起来就是整个系统根据输入句子Source生成了目标句子Target。 上图的Encoder-Decoder框架没有体现出“注意力模型”,可以看作是注意力不集中的分心模型。因为在生成目标句子的单词时,不论生成哪个单词,他们使用的输入句子Source的语义编码C都是一样的,没有任何区别。 增加注意力模型的Encoder-Decoder框架如下图所示: 即生成目标句子单词的过程成了下面的形式: 把Attention从Encoder-Decoder框架中剥离,进一步抽象,可以看懂A头疼体哦i你机制的本质思想。 将Source中的构成元素想象成是由一系列的数据对构成,此时给定Target中的某个元素Query,通过计算Query和各个Key的相似性或者相关性,得到每个Key对应Value的权重系数,然后对Value进行加权求和,即得到了最终的Attention数值。所以本质上Attention机制是对Source中元素的Value值进行加权求和,而Query和Key用来计算对应Value的权重系数,既可以将其本质思想改写为如下公式: 其中, L x = ∣ ∣ S o u r c e ∣ ∣ L_x=||Source|| Lx=∣∣Source∣∣表示Source的长度。在Attention的计算过程中,Source中的Key和Value合二为一,指向的是同一个东西,即输入句子中每个单词对应的语义编码,所以可能不容易看出这种能够体现本质思想的结构。但是从概念上理解,把Attention仍然理解为从大量信息中有选择的筛选出少量重要信息并聚焦到这些重要信息上,忽略大多不重要的信息,这种思想仍然成立。聚焦的过程体现在权重系数的计算上,权重越大越聚焦于其对应的Value值上,即权重代表了信息的重要性,而Value是其对应的信息。 Attention机制的计算过程:
web.xml报错
The content of element type "web-app" must match "(icon?,display-
name?,description?,distributable?,context-param*,filter*,filter-mapping*,listener*,servlet*,s
JUnit4:Test文档中的解释:
The Test annotation supports two optional parameters.
The first, expected, declares that a test method should throw an exception.
If it doesn't throw an exception or if it
借鉴网上的思路,用java实现:
public class NoIfWhile {
/**
* @param args
*
* find x=1+2+3+....n
*/
public static void main(String[] args) {
int n=10;
int re=find(n);
System.o
在Linux中执行.sh脚本,异常/bin/sh^M: bad interpreter: No such file or directory。
分析:这是不同系统编码格式引起的:在windows系统中编辑的.sh文件可能有不可见字符,所以在Linux系统下执行会报以上异常信息。
解决:
1)在windows下转换:
利用一些编辑器如UltraEdit或EditPlus等工具
Binary search tree works well for a wide variety of applications, but they have poor worst-case performance. Now we introduce a type of binary search tree where costs are guaranteed to be loga