本文整理汇总了Python中networkx.from_scipy_sparse_matrix方法的典型用法代码示例。如果您正苦于以下问题:Python networkx.from_scipy_sparse_matrix方法的具体用法?Python networkx.from_scipy_sparse_matrix怎么用?Python networkx.from_scipy_sparse_matrix使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块networkx的用法示例。
在下文中一共展示了networkx.from_scipy_sparse_matrix方法的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: text_to_graph
点赞 6
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def text_to_graph(text):
import networkx as nx
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import kneighbors_graph
# use tfidf to transform texts into feature vectors
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(text)
# build the graph which is full-connected
N = vectors.shape[0]
mat = kneighbors_graph(vectors, N, metric='cosine', mode='distance', include_self=True)
mat.data = 1 - mat.data # to similarity
g = nx.from_scipy_sparse_matrix(mat, create_using=nx.Graph())
return g
开发者ID:thunlp,项目名称:OpenNE,代码行数:19,
示例2: calculate_max_depth_over_max_width
点赞 6
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def calculate_max_depth_over_max_width(comment_tree):
comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())
if len(comment_tree_nx) == 0:
max_depth_over_max_width = 0.0
else:
node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
depth_to_nodecount = collections.defaultdict(int)
for k, v in node_to_depth.items():
depth_to_nodecount[v] += 1
max_depth = max(node_to_depth.values())
max_width = max(depth_to_nodecount.values())
max_depth_over_max_width = max_depth/max_width
return max_depth_over_max_width
开发者ID:MKLab-ITI,项目名称:news-popularity-prediction,代码行数:20,
示例3: calculate_comment_tree_hirsch
点赞 6
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def calculate_comment_tree_hirsch(comment_tree):
comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())
if len(comment_tree_nx) == 0:
comment_tree_hirsch = 0.0
else:
node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
depth_to_nodecount = collections.defaultdict(int)
for k, v in node_to_depth.items():
depth_to_nodecount[v] += 1
comment_tree_hirsch = max(node_to_depth.values())
while True:
if depth_to_nodecount[comment_tree_hirsch] >= comment_tree_hirsch:
break
else:
comment_tree_hirsch -= 1
return comment_tree_hirsch
开发者ID:MKLab-ITI,项目名称:news-popularity-prediction,代码行数:23,
示例4: init_setup
点赞 6
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def init_setup():
data = Dataset(root='/tmp/', name=args.dataset, setting='gcn')
data.features = normalize_feature(data.features)
adj, features, labels = data.adj, data.features, data.labels
StaticGraph.graph = nx.from_scipy_sparse_matrix(adj)
dict_of_lists = nx.to_dict_of_lists(StaticGraph.graph)
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
device = torch.device('cuda') if args.ctx == 'gpu' else 'cpu'
# black box setting
adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True, device=device)
victim_model = load_victim_model(data, device=device, file_path=args.saved_model)
setattr(victim_model, 'norm_tool', GraphNormTool(normalize=True, gm='gcn', device=device))
output = victim_model.predict(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return features, labels, idx_val, idx_test, victim_model, dict_of_lists, adj
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:26,
示例5: textrank_tfidf
点赞 6
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def textrank_tfidf(sentences, topk=6):
"""
使用tf-idf作为相似度, networkx.pagerank获取中心句子作为摘要
:param sentences: str, docs of text
:param topk:int
:return:list
"""
# 切句子
sentences = list(cut_sentence(sentences))
# tf-idf相似度
matrix_norm = tdidf_sim(sentences)
# 构建相似度矩阵
tfidf_sim = nx.from_scipy_sparse_matrix(matrix_norm * matrix_norm.T)
# nx.pagerank
sens_scores = nx.pagerank(tfidf_sim)
# 得分排序
sen_rank = sorted(sens_scores.items(), key=lambda x: x[1], reverse=True)
# 保留topk个, 防止越界
topk = min(len(sentences), topk)
# 返回原句子和得分
return [(sr[1], sentences[sr[0]]) for sr in sen_rank][0:topk]
开发者ID:yongzhuo,项目名称:nlg-yongzhuo,代码行数:23,
示例6: textrank_text_summarizer
点赞 6
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def textrank_text_summarizer(documents, num_sentences=2,
feature_type='frequency'):
vec, dt_matrix = build_feature_matrix(norm_sentences,
feature_type='tfidf')
similarity_matrix = (dt_matrix * dt_matrix.T)
similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix)
scores = networkx.pagerank(similarity_graph)
ranked_sentences = sorted(((score, index)
for index, score
in scores.items()),
reverse=True)
top_sentence_indices = [ranked_sentences[index][1]
for index in range(num_sentences)]
top_sentence_indices.sort()
for index in top_sentence_indices:
print sentences[index]
开发者ID:dipanjanS,项目名称:text-analytics-with-python,代码行数:23,
示例7: test_differential_operator
点赞 6
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def test_differential_operator(self, n_vertices=98):
r"""The Laplacian must always be the divergence of the gradient,
whether the Laplacian is combinatorial or normalized, and whether the
graph is directed or weighted."""
def test_incidence_nx(graph):
r"""Test that the incidence matrix corresponds to NetworkX."""
incidence_pg = np.sign(graph.D.toarray())
G = nx.OrderedDiGraph if graph.is_directed() else nx.OrderedGraph
graph_nx = nx.from_scipy_sparse_matrix(graph.W, create_using=G)
incidence_nx = nx.incidence_matrix(graph_nx, oriented=True)
np.testing.assert_equal(incidence_pg, incidence_nx.toarray())
for graph in [graphs.Graph(np.zeros((n_vertices, n_vertices))),
graphs.Graph(np.identity(n_vertices)),
graphs.Graph([[0, 0.8], [0.8, 0]]),
graphs.Graph([[1.3, 0], [0.4, 0.5]]),
graphs.ErdosRenyi(n_vertices, directed=False, seed=42),
graphs.ErdosRenyi(n_vertices, directed=True, seed=42)]:
for lap_type in ['combinatorial', 'normalized']:
graph.compute_laplacian(lap_type)
graph.compute_differential_operator()
L = graph.D.dot(graph.D.T)
np.testing.assert_allclose(L.toarray(), graph.L.toarray())
test_incidence_nx(graph)
开发者ID:epfl-lts2,项目名称:pygsp,代码行数:25,
示例8: draw_adjacency_graph
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def draw_adjacency_graph(adjacency_matrix,
node_color=None,
size=10,
layout='graphviz',
prog='neato',
node_size=80,
colormap='autumn'):
"""draw_adjacency_graph."""
graph = nx.from_scipy_sparse_matrix(adjacency_matrix)
plt.figure(figsize=(size, size))
plt.grid(False)
plt.axis('off')
if layout == 'graphviz':
pos = nx.graphviz_layout(graph, prog=prog)
else:
pos = nx.spring_layout(graph)
if len(node_color) == 0:
node_color = 'gray'
nx.draw_networkx_nodes(graph, pos,
node_color=node_color,
alpha=0.6,
node_size=node_size,
cmap=plt.get_cmap(colormap))
nx.draw_networkx_edges(graph, pos, alpha=0.5)
plt.show()
# draw a whole set of graphs::
开发者ID:fabriziocosta,项目名称:EDeN,代码行数:33,
示例9: calculate_max_depth
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def calculate_max_depth(comment_tree):
comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())
if len(comment_tree_nx) == 0:
max_depth = 0.0
else:
node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
max_depth = max(node_to_depth.values())
return max_depth
开发者ID:MKLab-ITI,项目名称:news-popularity-prediction,代码行数:12,
示例10: calculate_avg_depth
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def calculate_avg_depth(comment_tree):
comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())
if len(comment_tree_nx) == 0:
avg_depth = 0.0
else:
node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
avg_depth = statistics.mean(node_to_depth.values())
return avg_depth
开发者ID:MKLab-ITI,项目名称:news-popularity-prediction,代码行数:12,
示例11: calculate_max_width
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def calculate_max_width(comment_tree):
comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())
if len(comment_tree_nx) == 0:
max_width = 1.0
else:
node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
depth_to_nodecount = collections.defaultdict(int)
for k, v in node_to_depth.items():
depth_to_nodecount[v] += 1
max_width = max(depth_to_nodecount.values())
return max_width
开发者ID:MKLab-ITI,项目名称:news-popularity-prediction,代码行数:17,
示例12: calculate_avg_width
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def calculate_avg_width(comment_tree):
comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())
if len(comment_tree_nx) == 0:
avg_width = 1.0
else:
node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
depth_to_nodecount = collections.defaultdict(int)
for k, v in node_to_depth.items():
depth_to_nodecount[v] += 1
avg_width = statistics.mean(depth_to_nodecount.values())
return avg_width
开发者ID:MKLab-ITI,项目名称:news-popularity-prediction,代码行数:17,
示例13: overlay_skeleton_networkx
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def overlay_skeleton_networkx(csr_graph, coordinates, *, axis=None,
image=None, cmap=None, **kwargs):
"""Draw the skeleton as a NetworkX graph, optionally overlaid on an image.
Due to the size of NetworkX drawing elements, this is only recommended
for very small skeletons.
Parameters
----------
csr_graph : SciPy Sparse matrix
The skeleton graph in SciPy CSR format.
coordinates : array, shape (N_points, 2)
The coordinates of each point in the skeleton. ``coordinates.shape[0]``
should be equal to ``csr_graph.shape[0]``.
Other Parameters
----------------
axis : Matplotlib Axes object, optional
The Axes on which to plot the data. If None, a new figure and axes will
be created.
image : array, shape (M, N[, 3])
An image on which to overlay the skeleton. ``image.shape`` should be
greater than ``np.max(coordinates, axis=0)``.
**kwargs : keyword arguments
Arguments passed on to `nx.draw_networkx`. Particularly useful ones
include ``node_size=`` and ``font_size=``.
"""
if axis is None:
_, axis = plt.subplots()
if image is not None:
cmap = cmap or 'gray'
axis.imshow(image, cmap=cmap)
gnx = nx.from_scipy_sparse_matrix(csr_graph)
# Note: we invert the positions because Matplotlib uses x/y for
# scatterplot, but the coordinates are row/column NumPy indexing
positions = dict(zip(range(coordinates.shape[0]), coordinates[:, ::-1]))
_clean_positions_dict(positions, gnx) # remove nodes not in Graph
nx.draw_networkx(gnx, pos=positions, ax=axis, **kwargs)
return axis
开发者ID:jni,项目名称:skan,代码行数:41,
示例14: make_blogcatalog
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def make_blogcatalog(edgelist="../data/blogcatalog.mat",
dedupe=True):
"""
Graph with cluster labels from blogcatalog
Dedupe: Whether to deduplicate results (else some nodes have multilabels)
"""
mat = scipy.io.loadmat(edgelist)
nodes = mat['network'].tocsr()
groups = mat['group']
G = nx.from_scipy_sparse_matrix(nodes)
labels = (
pd.DataFrame(groups.todense())
.idxmax(axis=1)
.reset_index(drop=False)
)
labels.columns = ['node', 'label']
labels.node = labels.node.astype(int)
if dedupe:
labels = labels.loc[~labels.node.duplicated()
].reset_index(drop=True)
labels.label = labels.label.astype(int) - 1
return G, labels
else:
df = pd.DataFrame(groups.todense())
labels_list = df.apply(lambda row: list((row.loc[row > 0]).index), axis=1)
return G, pd.DataFrame({'node': list(G), 'mlabels': pd.Series(labels_list)})
开发者ID:VHRanger,项目名称:nodevectors,代码行数:29,
示例15: init_setup
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def init_setup():
data = Dataset(root='/tmp/', name=args.dataset, setting='nettack')
injecting_nodes(data)
adj, features, labels = data.adj, data.features, data.labels
StaticGraph.graph = nx.from_scipy_sparse_matrix(adj)
dict_of_lists = nx.to_dict_of_lists(StaticGraph.graph)
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
device = torch.device('cuda') if args.ctx == 'gpu' else 'cpu'
# gray box setting
adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True, device=device)
# Setup victim model
victim_model = GCN(nfeat=features.shape[1], nclass=labels.max().item()+1,
nhid=16, dropout=0.5, weight_decay=5e-4, device=device)
victim_model = victim_model.to(device)
victim_model.fit(features, adj, labels, idx_train, idx_val)
setattr(victim_model, 'norm_tool', GraphNormTool(normalize=True, gm='gcn', device=device))
output = victim_model.predict(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return features, labels, idx_train, idx_val, idx_test, victim_model, dict_of_lists, adj
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:32,
示例16: mat_to_nxG
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def mat_to_nxG(mat):
g = nx.from_scipy_sparse_matrix(mat)
return g
开发者ID:xchadesi,项目名称:GPF,代码行数:5,
示例17: summarize
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def summarize(self, text, length=5, weighting='frequency', norm=None):
"""
Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking
web pages.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default)
:param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight"
the voting power of long sentences (None by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
# Compute the word frequency matrix. If norm is set to 'l1' or 'l2' then words are normalized
# by the length of their associated sentences (such that each vector of sentence terms sums to 1).
word_matrix = self._compute_matrix(sentences, weighting=weighting, norm=norm)
# Build the similarity graph by calculating the number of overlapping words between all
# combinations of sentences.
similarity_matrix = (word_matrix * word_matrix.T)
similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix)
scores = networkx.pagerank(similarity_graph)
ranked_sentences = sorted(
((score, ndx) for ndx, score in scores.items()), reverse=True
)
top_sentences = [ranked_sentences[i][1] for i in range(length)]
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences]
开发者ID:jaijuneja,项目名称:PyTLDR,代码行数:43,
示例18: reduce_graph
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def reduce_graph(self, adj):
# g = nx.from_scipy_sparse_matrix(adj)
n_nodes, n_edges, e_froms, e_tos = self.__CtypeAdj(adj)
reduced_node = (ctypes.c_int * (n_nodes))()
new_n_nodes = ctypes.c_int()
new_n_edges = ctypes.c_int()
reduced_xadj = (ctypes.c_int * (n_nodes+1))()
reduced_adjncy = (ctypes.c_int * (2*n_edges))()
mapping = (ctypes.c_int * (n_nodes))()
reverse_mapping = (ctypes.c_int * (n_nodes))()
crt_is_size = self.lib.Reduce(n_nodes, n_edges, e_froms, e_tos, reduced_node,
ctypes.byref(new_n_nodes), ctypes.byref(new_n_edges),
reduced_xadj, reduced_adjncy, mapping, reverse_mapping)
# crt_is_size = self.lib.Reduce(n_nodes, n_edges, e_froms, e_tos, reduced_node)
new_n_nodes = new_n_nodes.value
new_n_edges = new_n_edges.value
reduced_node = np.asarray(reduced_node[:])
reduced_xadj = np.asarray(reduced_xadj[:])
reduced_xadj = reduced_xadj[:new_n_nodes+1]
reduced_adjncy = np.asarray(reduced_adjncy[:])
reduced_adjncy = reduced_adjncy[:new_n_edges]
mapping = np.asarray(mapping[:])
reverse_mapping = np.asarray(reverse_mapping[:])
reverse_mapping = reverse_mapping[:new_n_nodes]
reduced_adj = sp.csr_matrix((np.ones(new_n_edges), reduced_adjncy, reduced_xadj), shape=[new_n_nodes, new_n_nodes])
return reduced_node, reduced_adj, mapping, reverse_mapping, crt_is_size
# return reduced_node[:], crt_is_size
开发者ID:intel-isl,项目名称:NPHard,代码行数:29,
示例19: reddit_to_networkx
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def reddit_to_networkx(dirpath):
print("Loading graph data")
coo_adj = scipy.sparse.load_npz(os.path.join(dirpath, edge_file_name))
G = nx.from_scipy_sparse_matrix(coo_adj)
print("Loading node feature and label")
# node feature, edge label
reddit_data = numpy.load(os.path.join(dirpath, feat_file_name))
G.graph['x'] = reddit_data['feature'].astype(numpy.float32)
G.graph['y'] = reddit_data['label'].astype(numpy.int32)
G.graph['label_num'] = 41
# G = nx.convert_node_labels_to_integers(G)
print("Finish loading graph: {}".format(dirpath))
return G
开发者ID:chainer,项目名称:chainer-chemistry,代码行数:17,
示例20: louvain
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def louvain(adjacency_matrix):
"""
Performs community embedding using the LOUVAIN method.
Introduced in: Blondel, V. D., Guillaume, J. L., Lambiotte, R., & Lefebvre, E. (2008).
Fast unfolding of communities in large networks.
Journal of Statistical Mechanics: Theory and Experiment, 2008(10), P10008.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
# Convert to networkx undirected graph.
adjacency_matrix = nx.from_scipy_sparse_matrix(adjacency_matrix, create_using=nx.Graph())
# Call LOUVAIN algorithm to calculate a hierarchy of communities.
tree = community.generate_dendogram(adjacency_matrix, part_init=None)
# Embed communities
row = list()
col = list()
append_row = row.append
append_col = col.append
community_counter = 0
for i in range(len(tree)):
partition = community.partition_at_level(tree, i)
for n, c in partition.items():
append_row(n)
append_col(community_counter + c)
community_counter += max(partition.values()) + 1
row = np.array(row)
col = np.array(col)
data = np.ones(row.size, dtype=np.float64)
louvain_features = sparse.coo_matrix((data, (row, col)), shape=(len(partition.keys()), community_counter),
dtype=np.float64)
return louvain_features
开发者ID:MKLab-ITI,项目名称:reveal-graph-embedding,代码行数:43,
示例21: get_stationary_distribution_directed
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def get_stationary_distribution_directed(adjacency_matrix, rho):
graph_nx = nx.from_scipy_sparse_matrix(adjacency_matrix, create_using=nx.DiGraph())
stationary_distribution = pagerank_scipy(graph_nx,
alpha=1-rho,
personalization=None,
max_iter=200,
tol=1.0e-7,
weight="weight",
dangling=None)
stationary_distribution = np.array([stationary_distribution[k] for k in sorted(stationary_distribution.keys())])
return stationary_distribution
开发者ID:MKLab-ITI,项目名称:reveal-graph-embedding,代码行数:17,
示例22: adamic_adar
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def adamic_adar(self):
"""Computes adamic adar scores."""
graph = nx.from_scipy_sparse_matrix(self.adj_matrix)
scores = nx.adamic_adar_index(graph)
return scores
开发者ID:google,项目名称:gcnn-survey-paper,代码行数:7,
示例23: jaccard_coeff
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def jaccard_coeff(self):
"""Computes Jaccard coefficients."""
graph = nx.from_scipy_sparse_matrix(self.adj_matrix)
coeffs = nx.jaccard_coefficient(graph)
return coeffs
开发者ID:google,项目名称:gcnn-survey-paper,代码行数:7,
示例24: identity_conversion
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def identity_conversion(self, G, A, create_using):
GG = nx.from_scipy_sparse_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
ACSR = A.tocsr()
GI = create_using.__class__(ACSR)
self.assert_equal(G, GI)
ACOO = A.tocoo()
GI = create_using.__class__(ACOO)
self.assert_equal(G, GI)
ACSC = A.tocsc()
GI = create_using.__class__(ACSC)
self.assert_equal(G, GI)
AD = A.todense()
GI = create_using.__class__(AD)
self.assert_equal(G, GI)
AA = A.toarray()
GI = create_using.__class__(AA)
self.assert_equal(G, GI)
开发者ID:SpaceGroupUCL,项目名称:qgisSpaceSyntaxToolkit,代码行数:31,
示例25: test_shape
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def test_shape(self):
"Conversion from non-square sparse array."
A = sp.sparse.lil_matrix([[1,2,3],[4,5,6]])
assert_raises(nx.NetworkXError, nx.from_scipy_sparse_matrix, A)
开发者ID:SpaceGroupUCL,项目名称:qgisSpaceSyntaxToolkit,代码行数:6,
示例26: test_from_scipy_sparse_matrix_parallel_edges
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def test_from_scipy_sparse_matrix_parallel_edges(self):
"""Tests that the :func:`networkx.from_scipy_sparse_matrix` function
interprets integer weights as the number of parallel edges when
creating a multigraph.
"""
A = sparse.csr_matrix([[1, 1], [1, 2]])
# First, with a simple graph, each integer entry in the adjacency
# matrix is interpreted as the weight of a single edge in the graph.
expected = nx.DiGraph()
edges = [(0, 0), (0, 1), (1, 0)]
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
expected.add_edge(1, 1, weight=2)
actual = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
create_using=nx.DiGraph())
assert_graphs_equal(actual, expected)
actual = nx.from_scipy_sparse_matrix(A, parallel_edges=False,
create_using=nx.DiGraph())
assert_graphs_equal(actual, expected)
# Now each integer entry in the adjacency matrix is interpreted as the
# number of parallel edges in the graph if the appropriate keyword
# argument is specified.
edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)]
expected = nx.MultiDiGraph()
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
actual = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
create_using=nx.MultiDiGraph())
assert_graphs_equal(actual, expected)
expected = nx.MultiDiGraph()
expected.add_edges_from(set(edges), weight=1)
# The sole self-loop (edge 0) on vertex 1 should have weight 2.
expected[1][1][0]['weight'] = 2
actual = nx.from_scipy_sparse_matrix(A, parallel_edges=False,
create_using=nx.MultiDiGraph())
assert_graphs_equal(actual, expected)
开发者ID:SpaceGroupUCL,项目名称:qgisSpaceSyntaxToolkit,代码行数:37,
示例27: test_symmetric
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def test_symmetric(self):
"""Tests that a symmetric matrix has edges added only once to an
undirected multigraph when using
:func:`networkx.from_scipy_sparse_matrix`.
"""
A = sparse.csr_matrix([[0, 1], [1, 0]])
G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
expected = nx.MultiGraph()
expected.add_edge(0, 1, weight=1)
assert_graphs_equal(G, expected)
开发者ID:SpaceGroupUCL,项目名称:qgisSpaceSyntaxToolkit,代码行数:13,
示例28: identity_conversion
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def identity_conversion(self, G, A, create_using):
GG = nx.from_scipy_sparse_matrix(A, create_using=create_using)
self.assert_isomorphic(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_isomorphic(G, GW)
GI = nx.empty_graph(0, create_using).__class__(A)
self.assert_isomorphic(G, GI)
ACSR = A.tocsr()
GI = nx.empty_graph(0, create_using).__class__(ACSR)
self.assert_isomorphic(G, GI)
ACOO = A.tocoo()
GI = nx.empty_graph(0, create_using).__class__(ACOO)
self.assert_isomorphic(G, GI)
ACSC = A.tocsc()
GI = nx.empty_graph(0, create_using).__class__(ACSC)
self.assert_isomorphic(G, GI)
AD = A.todense()
GI = nx.empty_graph(0, create_using).__class__(AD)
self.assert_isomorphic(G, GI)
AA = A.toarray()
GI = nx.empty_graph(0, create_using).__class__(AA)
self.assert_isomorphic(G, GI)
开发者ID:holzschu,项目名称:Carnets,代码行数:31,
示例29: test_shape
点赞 5
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import from_scipy_sparse_matrix [as 别名]
def test_shape(self):
"Conversion from non-square sparse array."
A = sp.sparse.lil_matrix([[1, 2, 3], [4, 5, 6]])
assert_raises(nx.NetworkXError, nx.from_scipy_sparse_matrix, A)
开发者ID:holzschu,项目名称:Carnets,代码行数:6,
注:本文中的networkx.from_scipy_sparse_matrix方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。