Initial commit
This commit is contained in:
13
extensions/networkx/linalg/__init__.py
Normal file
13
extensions/networkx/linalg/__init__.py
Normal file
@ -0,0 +1,13 @@
|
||||
from networkx.linalg.attrmatrix import *
|
||||
import networkx.linalg.attrmatrix
|
||||
from networkx.linalg.spectrum import *
|
||||
import networkx.linalg.spectrum
|
||||
from networkx.linalg.graphmatrix import *
|
||||
import networkx.linalg.graphmatrix
|
||||
from networkx.linalg.laplacianmatrix import *
|
||||
import networkx.linalg.laplacianmatrix
|
||||
from networkx.linalg.algebraicconnectivity import *
|
||||
from networkx.linalg.modularitymatrix import *
|
||||
import networkx.linalg.modularitymatrix
|
||||
from networkx.linalg.bethehessianmatrix import *
|
||||
import networkx.linalg.bethehessianmatrix
|
592
extensions/networkx/linalg/algebraicconnectivity.py
Normal file
592
extensions/networkx/linalg/algebraicconnectivity.py
Normal file
@ -0,0 +1,592 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2014 ysitu <ysitu@users.noreply.github.com>
|
||||
# All rights reserved.
|
||||
# BSD license.
|
||||
#
|
||||
# Author: ysitu <ysitu@users.noreply.github.com>
|
||||
"""
|
||||
Algebraic connectivity and Fiedler vectors of undirected graphs.
|
||||
"""
|
||||
from functools import partial
|
||||
import networkx as nx
|
||||
from networkx.utils import not_implemented_for
|
||||
from networkx.utils import reverse_cuthill_mckee_ordering
|
||||
from networkx.utils import random_state
|
||||
|
||||
try:
|
||||
from numpy import array, asmatrix, asarray, dot, ndarray, ones, sqrt, zeros
|
||||
from numpy.linalg import norm, qr
|
||||
from numpy.random import normal
|
||||
from scipy.linalg import eigh, inv
|
||||
from scipy.sparse import csc_matrix, spdiags
|
||||
from scipy.sparse.linalg import eigsh, lobpcg
|
||||
__all__ = ['algebraic_connectivity', 'fiedler_vector', 'spectral_ordering']
|
||||
except ImportError:
|
||||
__all__ = []
|
||||
|
||||
try:
|
||||
from scipy.linalg.blas import dasum, daxpy, ddot
|
||||
except ImportError:
|
||||
if __all__:
|
||||
# Make sure the imports succeeded.
|
||||
# Use minimal replacements if BLAS is unavailable from SciPy.
|
||||
dasum = partial(norm, ord=1)
|
||||
ddot = dot
|
||||
|
||||
def daxpy(x, y, a):
|
||||
y += a * x
|
||||
return y
|
||||
|
||||
|
||||
class _PCGSolver(object):
|
||||
"""Preconditioned conjugate gradient method.
|
||||
|
||||
To solve Ax = b:
|
||||
M = A.diagonal() # or some other preconditioner
|
||||
solver = _PCGSolver(lambda x: A * x, lambda x: M * x)
|
||||
x = solver.solve(b)
|
||||
|
||||
The inputs A and M are functions which compute
|
||||
matrix multiplication on the argument.
|
||||
A - multiply by the matrix A in Ax=b
|
||||
M - multiply by M, the preconditioner surragate for A
|
||||
|
||||
Warning: There is no limit on number of iterations.
|
||||
"""
|
||||
|
||||
def __init__(self, A, M):
|
||||
self._A = A
|
||||
self._M = M or (lambda x: x.copy())
|
||||
|
||||
def solve(self, B, tol):
|
||||
B = asarray(B)
|
||||
X = ndarray(B.shape, order='F')
|
||||
for j in range(B.shape[1]):
|
||||
X[:, j] = self._solve(B[:, j], tol)
|
||||
return X
|
||||
|
||||
def _solve(self, b, tol):
|
||||
A = self._A
|
||||
M = self._M
|
||||
tol *= dasum(b)
|
||||
# Initialize.
|
||||
x = zeros(b.shape)
|
||||
r = b.copy()
|
||||
z = M(r)
|
||||
rz = ddot(r, z)
|
||||
p = z.copy()
|
||||
# Iterate.
|
||||
while True:
|
||||
Ap = A(p)
|
||||
alpha = rz / ddot(p, Ap)
|
||||
x = daxpy(p, x, a=alpha)
|
||||
r = daxpy(Ap, r, a=-alpha)
|
||||
if dasum(r) < tol:
|
||||
return x
|
||||
z = M(r)
|
||||
beta = ddot(r, z)
|
||||
beta, rz = beta / rz, beta
|
||||
p = daxpy(p, z, a=beta)
|
||||
|
||||
|
||||
class _CholeskySolver(object):
|
||||
"""Cholesky factorization.
|
||||
|
||||
To solve Ax = b:
|
||||
solver = _CholeskySolver(A)
|
||||
x = solver.solve(b)
|
||||
|
||||
optional argument `tol` on solve method is ignored but included
|
||||
to match _PCGsolver API.
|
||||
"""
|
||||
|
||||
def __init__(self, A):
|
||||
if not self._cholesky:
|
||||
raise nx.NetworkXError('Cholesky solver unavailable.')
|
||||
self._chol = self._cholesky(A)
|
||||
|
||||
def solve(self, B, tol=None):
|
||||
return self._chol(B)
|
||||
|
||||
try:
|
||||
from scikits.sparse.cholmod import cholesky
|
||||
_cholesky = cholesky
|
||||
except ImportError:
|
||||
_cholesky = None
|
||||
|
||||
|
||||
class _LUSolver(object):
|
||||
"""LU factorization.
|
||||
|
||||
To solve Ax = b:
|
||||
solver = _LUSolver(A)
|
||||
x = solver.solve(b)
|
||||
|
||||
optional argument `tol` on solve method is ignored but included
|
||||
to match _PCGsolver API.
|
||||
"""
|
||||
|
||||
def __init__(self, A):
|
||||
if not self._splu:
|
||||
raise nx.NetworkXError('LU solver unavailable.')
|
||||
self._LU = self._splu(A)
|
||||
|
||||
def solve(self, B, tol=None):
|
||||
B = asarray(B)
|
||||
X = ndarray(B.shape, order='F')
|
||||
for j in range(B.shape[1]):
|
||||
X[:, j] = self._LU.solve(B[:, j])
|
||||
return X
|
||||
|
||||
try:
|
||||
from scipy.sparse.linalg import splu
|
||||
_splu = partial(splu, permc_spec='MMD_AT_PLUS_A', diag_pivot_thresh=0.,
|
||||
options={'Equil': True, 'SymmetricMode': True})
|
||||
except ImportError:
|
||||
_splu = None
|
||||
|
||||
|
||||
def _preprocess_graph(G, weight):
|
||||
"""Compute edge weights and eliminate zero-weight edges.
|
||||
"""
|
||||
if G.is_directed():
|
||||
H = nx.MultiGraph()
|
||||
H.add_nodes_from(G)
|
||||
H.add_weighted_edges_from(((u, v, e.get(weight, 1.))
|
||||
for u, v, e in G.edges(data=True)
|
||||
if u != v), weight=weight)
|
||||
G = H
|
||||
if not G.is_multigraph():
|
||||
edges = ((u, v, abs(e.get(weight, 1.)))
|
||||
for u, v, e in G.edges(data=True) if u != v)
|
||||
else:
|
||||
edges = ((u, v, sum(abs(e.get(weight, 1.)) for e in G[u][v].values()))
|
||||
for u, v in G.edges() if u != v)
|
||||
H = nx.Graph()
|
||||
H.add_nodes_from(G)
|
||||
H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0)
|
||||
return H
|
||||
|
||||
|
||||
def _rcm_estimate(G, nodelist):
|
||||
"""Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.
|
||||
"""
|
||||
G = G.subgraph(nodelist)
|
||||
order = reverse_cuthill_mckee_ordering(G)
|
||||
n = len(nodelist)
|
||||
index = dict(zip(nodelist, range(n)))
|
||||
x = ndarray(n, dtype=float)
|
||||
for i, u in enumerate(order):
|
||||
x[index[u]] = i
|
||||
x -= (n - 1) / 2.
|
||||
return x
|
||||
|
||||
|
||||
def _tracemin_fiedler(L, X, normalized, tol, method):
|
||||
"""Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
|
||||
|
||||
The Fiedler vector of a connected undirected graph is the eigenvector
|
||||
corresponding to the second smallest eigenvalue of the Laplacian matrix of
|
||||
of the graph. This function starts with the Laplacian L, not the Graph.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
L : Laplacian of a possibly weighted or normalized, but undirected graph
|
||||
|
||||
X : Initial guess for a solution. Usually a matrix of random numbers.
|
||||
This function allows more than one column in X to identify more than
|
||||
one eigenvector if desired.
|
||||
|
||||
normalized : bool
|
||||
Whether the normalized Laplacian matrix is used.
|
||||
|
||||
tol : float
|
||||
Tolerance of relative residual in eigenvalue computation.
|
||||
Warning: There is no limit on number of iterations.
|
||||
|
||||
method : string
|
||||
Should be 'tracemin_pcg', 'tracemin_chol' or 'tracemin_lu'.
|
||||
Otherwise exception is raised.
|
||||
|
||||
Returns
|
||||
-------
|
||||
sigma, X : Two NumPy arrays of floats.
|
||||
The lowest eigenvalues and corresponding eigenvectors of L.
|
||||
The size of input X determines the size of these outputs.
|
||||
As this is for Fiedler vectors, the zero eigenvalue (and
|
||||
constant eigenvector) are avoided.
|
||||
"""
|
||||
n = X.shape[0]
|
||||
|
||||
if normalized:
|
||||
# Form the normalized Laplacian matrix and determine the eigenvector of
|
||||
# its nullspace.
|
||||
e = sqrt(L.diagonal())
|
||||
D = spdiags(1. / e, [0], n, n, format='csr')
|
||||
L = D * L * D
|
||||
e *= 1. / norm(e, 2)
|
||||
|
||||
if normalized:
|
||||
def project(X):
|
||||
"""Make X orthogonal to the nullspace of L.
|
||||
"""
|
||||
X = asarray(X)
|
||||
for j in range(X.shape[1]):
|
||||
X[:, j] -= dot(X[:, j], e) * e
|
||||
else:
|
||||
def project(X):
|
||||
"""Make X orthogonal to the nullspace of L.
|
||||
"""
|
||||
X = asarray(X)
|
||||
for j in range(X.shape[1]):
|
||||
X[:, j] -= X[:, j].sum() / n
|
||||
|
||||
if method == 'tracemin_pcg':
|
||||
D = L.diagonal().astype(float)
|
||||
solver = _PCGSolver(lambda x: L * x, lambda x: D * x)
|
||||
elif method == 'tracemin_chol' or method == 'tracemin_lu':
|
||||
# Convert A to CSC to suppress SparseEfficiencyWarning.
|
||||
A = csc_matrix(L, dtype=float, copy=True)
|
||||
# Force A to be nonsingular. Since A is the Laplacian matrix of a
|
||||
# connected graph, its rank deficiency is one, and thus one diagonal
|
||||
# element needs to modified. Changing to infinity forces a zero in the
|
||||
# corresponding element in the solution.
|
||||
i = (A.indptr[1:] - A.indptr[:-1]).argmax()
|
||||
A[i, i] = float('inf')
|
||||
if method == 'tracemin_chol':
|
||||
solver = _CholeskySolver(A)
|
||||
else:
|
||||
solver = _LUSolver(A)
|
||||
else:
|
||||
raise nx.NetworkXError('Unknown linear system solver: ' + method)
|
||||
|
||||
# Initialize.
|
||||
Lnorm = abs(L).sum(axis=1).flatten().max()
|
||||
project(X)
|
||||
W = asmatrix(ndarray(X.shape, order='F'))
|
||||
|
||||
while True:
|
||||
# Orthonormalize X.
|
||||
X = qr(X)[0]
|
||||
# Compute iteration matrix H.
|
||||
W[:, :] = L * X
|
||||
H = X.T * W
|
||||
sigma, Y = eigh(H, overwrite_a=True)
|
||||
# Compute the Ritz vectors.
|
||||
X *= Y
|
||||
# Test for convergence exploiting the fact that L * X == W * Y.
|
||||
res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm
|
||||
if res < tol:
|
||||
break
|
||||
# Compute X = L \ X / (X' * (L \ X)).
|
||||
# L \ X can have an arbitrary projection on the nullspace of L,
|
||||
# which will be eliminated.
|
||||
W[:, :] = solver.solve(X, tol)
|
||||
X = (inv(W.T * X) * W.T).T # Preserves Fortran storage order.
|
||||
project(X)
|
||||
|
||||
return sigma, asarray(X)
|
||||
|
||||
|
||||
def _get_fiedler_func(method):
|
||||
"""Returns a function that solves the Fiedler eigenvalue problem.
|
||||
"""
|
||||
if method == "tracemin": # old style keyword <v2.1
|
||||
method = "tracemin_pcg"
|
||||
if method in ("tracemin_pcg", "tracemin_chol", "tracemin_lu"):
|
||||
def find_fiedler(L, x, normalized, tol, seed):
|
||||
q = 1 if method == 'tracemin_pcg' else min(4, L.shape[0] - 1)
|
||||
X = asmatrix(seed.normal(size=(q, L.shape[0]))).T
|
||||
sigma, X = _tracemin_fiedler(L, X, normalized, tol, method)
|
||||
return sigma[0], X[:, 0]
|
||||
elif method == 'lanczos' or method == 'lobpcg':
|
||||
def find_fiedler(L, x, normalized, tol, seed):
|
||||
L = csc_matrix(L, dtype=float)
|
||||
n = L.shape[0]
|
||||
if normalized:
|
||||
D = spdiags(1. / sqrt(L.diagonal()), [0], n, n, format='csc')
|
||||
L = D * L * D
|
||||
if method == 'lanczos' or n < 10:
|
||||
# Avoid LOBPCG when n < 10 due to
|
||||
# https://github.com/scipy/scipy/issues/3592
|
||||
# https://github.com/scipy/scipy/pull/3594
|
||||
sigma, X = eigsh(L, 2, which='SM', tol=tol,
|
||||
return_eigenvectors=True)
|
||||
return sigma[1], X[:, 1]
|
||||
else:
|
||||
X = asarray(asmatrix(x).T)
|
||||
M = spdiags(1. / L.diagonal(), [0], n, n)
|
||||
Y = ones(n)
|
||||
if normalized:
|
||||
Y /= D.diagonal()
|
||||
sigma, X = lobpcg(L, X, M=M, Y=asmatrix(Y).T, tol=tol,
|
||||
maxiter=n, largest=False)
|
||||
return sigma[0], X[:, 0]
|
||||
else:
|
||||
raise nx.NetworkXError("unknown method '%s'." % method)
|
||||
|
||||
return find_fiedler
|
||||
|
||||
|
||||
@random_state(5)
|
||||
@not_implemented_for('directed')
|
||||
def algebraic_connectivity(G, weight='weight', normalized=False, tol=1e-8,
|
||||
method='tracemin_pcg', seed=None):
|
||||
"""Returns the algebraic connectivity of an undirected graph.
|
||||
|
||||
The algebraic connectivity of a connected undirected graph is the second
|
||||
smallest eigenvalue of its Laplacian matrix.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : NetworkX graph
|
||||
An undirected graph.
|
||||
|
||||
weight : object, optional (default: None)
|
||||
The data key used to determine the weight of each edge. If None, then
|
||||
each edge has unit weight.
|
||||
|
||||
normalized : bool, optional (default: False)
|
||||
Whether the normalized Laplacian matrix is used.
|
||||
|
||||
tol : float, optional (default: 1e-8)
|
||||
Tolerance of relative residual in eigenvalue computation.
|
||||
|
||||
method : string, optional (default: 'tracemin_pcg')
|
||||
Method of eigenvalue computation. It must be one of the tracemin
|
||||
options shown below (TraceMIN), 'lanczos' (Lanczos iteration)
|
||||
or 'lobpcg' (LOBPCG).
|
||||
|
||||
The TraceMIN algorithm uses a linear system solver. The following
|
||||
values allow specifying the solver to be used.
|
||||
|
||||
=============== ========================================
|
||||
Value Solver
|
||||
=============== ========================================
|
||||
'tracemin_pcg' Preconditioned conjugate gradient method
|
||||
'tracemin_chol' Cholesky factorization
|
||||
'tracemin_lu' LU factorization
|
||||
=============== ========================================
|
||||
|
||||
seed : integer, random_state, or None (default)
|
||||
Indicator of random number generation state.
|
||||
See :ref:`Randomness<randomness>`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
algebraic_connectivity : float
|
||||
Algebraic connectivity.
|
||||
|
||||
Raises
|
||||
------
|
||||
NetworkXNotImplemented
|
||||
If G is directed.
|
||||
|
||||
NetworkXError
|
||||
If G has less than two nodes.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Edge weights are interpreted by their absolute values. For MultiGraph's,
|
||||
weights of parallel edges are summed. Zero-weighted edges are ignored.
|
||||
|
||||
To use Cholesky factorization in the TraceMIN algorithm, the
|
||||
:samp:`scikits.sparse` package must be installed.
|
||||
|
||||
See Also
|
||||
--------
|
||||
laplacian_matrix
|
||||
"""
|
||||
if len(G) < 2:
|
||||
raise nx.NetworkXError('graph has less than two nodes.')
|
||||
G = _preprocess_graph(G, weight)
|
||||
if not nx.is_connected(G):
|
||||
return 0.
|
||||
|
||||
L = nx.laplacian_matrix(G)
|
||||
if L.shape[0] == 2:
|
||||
return 2. * L[0, 0] if not normalized else 2.
|
||||
|
||||
find_fiedler = _get_fiedler_func(method)
|
||||
x = None if method != 'lobpcg' else _rcm_estimate(G, G)
|
||||
sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
|
||||
return sigma
|
||||
|
||||
|
||||
@random_state(5)
|
||||
@not_implemented_for('directed')
|
||||
def fiedler_vector(G, weight='weight', normalized=False, tol=1e-8,
|
||||
method='tracemin_pcg', seed=None):
|
||||
"""Returns the Fiedler vector of a connected undirected graph.
|
||||
|
||||
The Fiedler vector of a connected undirected graph is the eigenvector
|
||||
corresponding to the second smallest eigenvalue of the Laplacian matrix of
|
||||
of the graph.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : NetworkX graph
|
||||
An undirected graph.
|
||||
|
||||
weight : object, optional (default: None)
|
||||
The data key used to determine the weight of each edge. If None, then
|
||||
each edge has unit weight.
|
||||
|
||||
normalized : bool, optional (default: False)
|
||||
Whether the normalized Laplacian matrix is used.
|
||||
|
||||
tol : float, optional (default: 1e-8)
|
||||
Tolerance of relative residual in eigenvalue computation.
|
||||
|
||||
method : string, optional (default: 'tracemin_pcg')
|
||||
Method of eigenvalue computation. It must be one of the tracemin
|
||||
options shown below (TraceMIN), 'lanczos' (Lanczos iteration)
|
||||
or 'lobpcg' (LOBPCG).
|
||||
|
||||
The TraceMIN algorithm uses a linear system solver. The following
|
||||
values allow specifying the solver to be used.
|
||||
|
||||
=============== ========================================
|
||||
Value Solver
|
||||
=============== ========================================
|
||||
'tracemin_pcg' Preconditioned conjugate gradient method
|
||||
'tracemin_chol' Cholesky factorization
|
||||
'tracemin_lu' LU factorization
|
||||
=============== ========================================
|
||||
|
||||
seed : integer, random_state, or None (default)
|
||||
Indicator of random number generation state.
|
||||
See :ref:`Randomness<randomness>`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
fiedler_vector : NumPy array of floats.
|
||||
Fiedler vector.
|
||||
|
||||
Raises
|
||||
------
|
||||
NetworkXNotImplemented
|
||||
If G is directed.
|
||||
|
||||
NetworkXError
|
||||
If G has less than two nodes or is not connected.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Edge weights are interpreted by their absolute values. For MultiGraph's,
|
||||
weights of parallel edges are summed. Zero-weighted edges are ignored.
|
||||
|
||||
To use Cholesky factorization in the TraceMIN algorithm, the
|
||||
:samp:`scikits.sparse` package must be installed.
|
||||
|
||||
See Also
|
||||
--------
|
||||
laplacian_matrix
|
||||
"""
|
||||
if len(G) < 2:
|
||||
raise nx.NetworkXError('graph has less than two nodes.')
|
||||
G = _preprocess_graph(G, weight)
|
||||
if not nx.is_connected(G):
|
||||
raise nx.NetworkXError('graph is not connected.')
|
||||
|
||||
if len(G) == 2:
|
||||
return array([1., -1.])
|
||||
|
||||
find_fiedler = _get_fiedler_func(method)
|
||||
L = nx.laplacian_matrix(G)
|
||||
x = None if method != 'lobpcg' else _rcm_estimate(G, G)
|
||||
sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
|
||||
return fiedler
|
||||
|
||||
|
||||
@random_state(5)
|
||||
def spectral_ordering(G, weight='weight', normalized=False, tol=1e-8,
|
||||
method='tracemin_pcg', seed=None):
|
||||
"""Compute the spectral_ordering of a graph.
|
||||
|
||||
The spectral ordering of a graph is an ordering of its nodes where nodes
|
||||
in the same weakly connected components appear contiguous and ordered by
|
||||
their corresponding elements in the Fiedler vector of the component.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : NetworkX graph
|
||||
A graph.
|
||||
|
||||
weight : object, optional (default: None)
|
||||
The data key used to determine the weight of each edge. If None, then
|
||||
each edge has unit weight.
|
||||
|
||||
normalized : bool, optional (default: False)
|
||||
Whether the normalized Laplacian matrix is used.
|
||||
|
||||
tol : float, optional (default: 1e-8)
|
||||
Tolerance of relative residual in eigenvalue computation.
|
||||
|
||||
method : string, optional (default: 'tracemin_pcg')
|
||||
Method of eigenvalue computation. It must be one of the tracemin
|
||||
options shown below (TraceMIN), 'lanczos' (Lanczos iteration)
|
||||
or 'lobpcg' (LOBPCG).
|
||||
|
||||
The TraceMIN algorithm uses a linear system solver. The following
|
||||
values allow specifying the solver to be used.
|
||||
|
||||
=============== ========================================
|
||||
Value Solver
|
||||
=============== ========================================
|
||||
'tracemin_pcg' Preconditioned conjugate gradient method
|
||||
'tracemin_chol' Cholesky factorization
|
||||
'tracemin_lu' LU factorization
|
||||
=============== ========================================
|
||||
|
||||
seed : integer, random_state, or None (default)
|
||||
Indicator of random number generation state.
|
||||
See :ref:`Randomness<randomness>`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
spectral_ordering : NumPy array of floats.
|
||||
Spectral ordering of nodes.
|
||||
|
||||
Raises
|
||||
------
|
||||
NetworkXError
|
||||
If G is empty.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Edge weights are interpreted by their absolute values. For MultiGraph's,
|
||||
weights of parallel edges are summed. Zero-weighted edges are ignored.
|
||||
|
||||
To use Cholesky factorization in the TraceMIN algorithm, the
|
||||
:samp:`scikits.sparse` package must be installed.
|
||||
|
||||
See Also
|
||||
--------
|
||||
laplacian_matrix
|
||||
"""
|
||||
if len(G) == 0:
|
||||
raise nx.NetworkXError('graph is empty.')
|
||||
G = _preprocess_graph(G, weight)
|
||||
|
||||
find_fiedler = _get_fiedler_func(method)
|
||||
order = []
|
||||
for component in nx.connected_components(G):
|
||||
size = len(component)
|
||||
if size > 2:
|
||||
L = nx.laplacian_matrix(G, component)
|
||||
x = None if method != 'lobpcg' else _rcm_estimate(G, component)
|
||||
sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
|
||||
sort_info = zip(fiedler, range(size), component)
|
||||
order.extend(u for x, c, u in sorted(sort_info))
|
||||
else:
|
||||
order.extend(component)
|
||||
|
||||
return order
|
||||
|
||||
|
||||
# fixture for pytest
|
||||
def setup_module(module):
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
scipy.sparse = pytest.importorskip('scipy.sparse')
|
455
extensions/networkx/linalg/attrmatrix.py
Normal file
455
extensions/networkx/linalg/attrmatrix.py
Normal file
@ -0,0 +1,455 @@
|
||||
"""
|
||||
Functions for constructing matrix-like objects from graph attributes.
|
||||
"""
|
||||
|
||||
__all__ = ['attr_matrix', 'attr_sparse_matrix']
|
||||
|
||||
import networkx as nx
|
||||
|
||||
|
||||
def _node_value(G, node_attr):
|
||||
"""Returns a function that returns a value from G.nodes[u].
|
||||
|
||||
We return a function expecting a node as its sole argument. Then, in the
|
||||
simplest scenario, the returned function will return G.nodes[u][node_attr].
|
||||
However, we also handle the case when `node_attr` is None or when it is a
|
||||
function itself.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
node_attr : {None, str, callable}
|
||||
Specification of how the value of the node attribute should be obtained
|
||||
from the node attribute dictionary.
|
||||
|
||||
Returns
|
||||
-------
|
||||
value : function
|
||||
A function expecting a node as its sole argument. The function will
|
||||
returns a value from G.nodes[u] that depends on `edge_attr`.
|
||||
|
||||
"""
|
||||
if node_attr is None:
|
||||
def value(u): return u
|
||||
elif not hasattr(node_attr, '__call__'):
|
||||
# assume it is a key for the node attribute dictionary
|
||||
def value(u): return G.nodes[u][node_attr]
|
||||
else:
|
||||
# Advanced: Allow users to specify something else.
|
||||
#
|
||||
# For example,
|
||||
# node_attr = lambda u: G.nodes[u].get('size', .5) * 3
|
||||
#
|
||||
value = node_attr
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def _edge_value(G, edge_attr):
|
||||
"""Returns a function that returns a value from G[u][v].
|
||||
|
||||
Suppose there exists an edge between u and v. Then we return a function
|
||||
expecting u and v as arguments. For Graph and DiGraph, G[u][v] is
|
||||
the edge attribute dictionary, and the function (essentially) returns
|
||||
G[u][v][edge_attr]. However, we also handle cases when `edge_attr` is None
|
||||
and when it is a function itself. For MultiGraph and MultiDiGraph, G[u][v]
|
||||
is a dictionary of all edges between u and v. In this case, the returned
|
||||
function sums the value of `edge_attr` for every edge between u and v.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
edge_attr : {None, str, callable}
|
||||
Specification of how the value of the edge attribute should be obtained
|
||||
from the edge attribute dictionary, G[u][v]. For multigraphs, G[u][v]
|
||||
is a dictionary of all the edges between u and v. This allows for
|
||||
special treatment of multiedges.
|
||||
|
||||
Returns
|
||||
-------
|
||||
value : function
|
||||
A function expecting two nodes as parameters. The nodes should
|
||||
represent the from- and to- node of an edge. The function will
|
||||
return a value from G[u][v] that depends on `edge_attr`.
|
||||
|
||||
"""
|
||||
|
||||
if edge_attr is None:
|
||||
# topological count of edges
|
||||
|
||||
if G.is_multigraph():
|
||||
def value(u, v): return len(G[u][v])
|
||||
else:
|
||||
def value(u, v): return 1
|
||||
|
||||
elif not hasattr(edge_attr, '__call__'):
|
||||
# assume it is a key for the edge attribute dictionary
|
||||
|
||||
if edge_attr == 'weight':
|
||||
# provide a default value
|
||||
if G.is_multigraph():
|
||||
def value(u, v): return sum([d.get(edge_attr, 1) for d in G[u][v].values()])
|
||||
else:
|
||||
def value(u, v): return G[u][v].get(edge_attr, 1)
|
||||
else:
|
||||
# otherwise, the edge attribute MUST exist for each edge
|
||||
if G.is_multigraph():
|
||||
def value(u, v): return sum([d[edge_attr] for d in G[u][v].values()])
|
||||
else:
|
||||
def value(u, v): return G[u][v][edge_attr]
|
||||
|
||||
else:
|
||||
# Advanced: Allow users to specify something else.
|
||||
#
|
||||
# Alternative default value:
|
||||
# edge_attr = lambda u,v: G[u][v].get('thickness', .5)
|
||||
#
|
||||
# Function on an attribute:
|
||||
# edge_attr = lambda u,v: abs(G[u][v]['weight'])
|
||||
#
|
||||
# Handle Multi(Di)Graphs differently:
|
||||
# edge_attr = lambda u,v: numpy.prod([d['size'] for d in G[u][v].values()])
|
||||
#
|
||||
# Ignore multiple edges
|
||||
# edge_attr = lambda u,v: 1 if len(G[u][v]) else 0
|
||||
#
|
||||
value = edge_attr
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def attr_matrix(G, edge_attr=None, node_attr=None, normalized=False,
|
||||
rc_order=None, dtype=None, order=None):
|
||||
"""Returns a NumPy matrix using attributes from G.
|
||||
|
||||
If only `G` is passed in, then the adjacency matrix is constructed.
|
||||
|
||||
Let A be a discrete set of values for the node attribute `node_attr`. Then
|
||||
the elements of A represent the rows and columns of the constructed matrix.
|
||||
Now, iterate through every edge e=(u,v) in `G` and consider the value
|
||||
of the edge attribute `edge_attr`. If ua and va are the values of the
|
||||
node attribute `node_attr` for u and v, respectively, then the value of
|
||||
the edge attribute is added to the matrix element at (ua, va).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
The NetworkX graph used to construct the NumPy matrix.
|
||||
|
||||
edge_attr : str, optional
|
||||
Each element of the matrix represents a running total of the
|
||||
specified edge attribute for edges whose node attributes correspond
|
||||
to the rows/cols of the matirx. The attribute must be present for
|
||||
all edges in the graph. If no attribute is specified, then we
|
||||
just count the number of edges whose node attributes correspond
|
||||
to the matrix element.
|
||||
|
||||
node_attr : str, optional
|
||||
Each row and column in the matrix represents a particular value
|
||||
of the node attribute. The attribute must be present for all nodes
|
||||
in the graph. Note, the values of this attribute should be reliably
|
||||
hashable. So, float values are not recommended. If no attribute is
|
||||
specified, then the rows and columns will be the nodes of the graph.
|
||||
|
||||
normalized : bool, optional
|
||||
If True, then each row is normalized by the summation of its values.
|
||||
|
||||
rc_order : list, optional
|
||||
A list of the node attribute values. This list specifies the ordering
|
||||
of rows and columns of the array. If no ordering is provided, then
|
||||
the ordering will be random (and also, a return value).
|
||||
|
||||
Other Parameters
|
||||
----------------
|
||||
dtype : NumPy data-type, optional
|
||||
A valid NumPy dtype used to initialize the array. Keep in mind certain
|
||||
dtypes can yield unexpected results if the array is to be normalized.
|
||||
The parameter is passed to numpy.zeros(). If unspecified, the NumPy
|
||||
default is used.
|
||||
|
||||
order : {'C', 'F'}, optional
|
||||
Whether to store multidimensional data in C- or Fortran-contiguous
|
||||
(row- or column-wise) order in memory. This parameter is passed to
|
||||
numpy.zeros(). If unspecified, the NumPy default is used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
M : NumPy matrix
|
||||
The attribute matrix.
|
||||
|
||||
ordering : list
|
||||
If `rc_order` was specified, then only the matrix is returned.
|
||||
However, if `rc_order` was None, then the ordering used to construct
|
||||
the matrix is returned as well.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Construct an adjacency matrix:
|
||||
|
||||
>>> G = nx.Graph()
|
||||
>>> G.add_edge(0, 1, thickness=1, weight=3)
|
||||
>>> G.add_edge(0, 2, thickness=2)
|
||||
>>> G.add_edge(1, 2, thickness=3)
|
||||
>>> nx.attr_matrix(G, rc_order=[0, 1, 2])
|
||||
matrix([[0., 1., 1.],
|
||||
[1., 0., 1.],
|
||||
[1., 1., 0.]])
|
||||
|
||||
Alternatively, we can obtain the matrix describing edge thickness.
|
||||
|
||||
>>> nx.attr_matrix(G, edge_attr='thickness', rc_order=[0, 1, 2])
|
||||
matrix([[0., 1., 2.],
|
||||
[1., 0., 3.],
|
||||
[2., 3., 0.]])
|
||||
|
||||
We can also color the nodes and ask for the probability distribution over
|
||||
all edges (u,v) describing:
|
||||
|
||||
Pr(v has color Y | u has color X)
|
||||
|
||||
>>> G.nodes[0]['color'] = 'red'
|
||||
>>> G.nodes[1]['color'] = 'red'
|
||||
>>> G.nodes[2]['color'] = 'blue'
|
||||
>>> rc = ['red', 'blue']
|
||||
>>> nx.attr_matrix(G, node_attr='color', normalized=True, rc_order=rc)
|
||||
matrix([[0.33333333, 0.66666667],
|
||||
[1. , 0. ]])
|
||||
|
||||
For example, the above tells us that for all edges (u,v):
|
||||
|
||||
Pr( v is red | u is red) = 1/3
|
||||
Pr( v is blue | u is red) = 2/3
|
||||
|
||||
Pr( v is red | u is blue) = 1
|
||||
Pr( v is blue | u is blue) = 0
|
||||
|
||||
Finally, we can obtain the total weights listed by the node colors.
|
||||
|
||||
>>> nx.attr_matrix(G, edge_attr='weight', node_attr='color', rc_order=rc)
|
||||
matrix([[3., 2.],
|
||||
[2., 0.]])
|
||||
|
||||
Thus, the total weight over all edges (u,v) with u and v having colors:
|
||||
|
||||
(red, red) is 3 # the sole contribution is from edge (0,1)
|
||||
(red, blue) is 2 # contributions from edges (0,2) and (1,2)
|
||||
(blue, red) is 2 # same as (red, blue) since graph is undirected
|
||||
(blue, blue) is 0 # there are no edges with blue endpoints
|
||||
|
||||
"""
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"attr_matrix() requires numpy: http://scipy.org/ ")
|
||||
|
||||
edge_value = _edge_value(G, edge_attr)
|
||||
node_value = _node_value(G, node_attr)
|
||||
|
||||
if rc_order is None:
|
||||
ordering = list(set([node_value(n) for n in G]))
|
||||
else:
|
||||
ordering = rc_order
|
||||
|
||||
N = len(ordering)
|
||||
undirected = not G.is_directed()
|
||||
index = dict(zip(ordering, range(N)))
|
||||
M = np.zeros((N, N), dtype=dtype, order=order)
|
||||
|
||||
seen = set([])
|
||||
for u, nbrdict in G.adjacency():
|
||||
for v in nbrdict:
|
||||
# Obtain the node attribute values.
|
||||
i, j = index[node_value(u)], index[node_value(v)]
|
||||
if v not in seen:
|
||||
M[i, j] += edge_value(u, v)
|
||||
if undirected:
|
||||
M[j, i] = M[i, j]
|
||||
|
||||
if undirected:
|
||||
seen.add(u)
|
||||
|
||||
if normalized:
|
||||
M /= M.sum(axis=1).reshape((N, 1))
|
||||
|
||||
M = np.asmatrix(M)
|
||||
|
||||
if rc_order is None:
|
||||
return M, ordering
|
||||
else:
|
||||
return M
|
||||
|
||||
|
||||
def attr_sparse_matrix(G, edge_attr=None, node_attr=None,
|
||||
normalized=False, rc_order=None, dtype=None):
|
||||
"""Returns a SciPy sparse matrix using attributes from G.
|
||||
|
||||
If only `G` is passed in, then the adjacency matrix is constructed.
|
||||
|
||||
Let A be a discrete set of values for the node attribute `node_attr`. Then
|
||||
the elements of A represent the rows and columns of the constructed matrix.
|
||||
Now, iterate through every edge e=(u,v) in `G` and consider the value
|
||||
of the edge attribute `edge_attr`. If ua and va are the values of the
|
||||
node attribute `node_attr` for u and v, respectively, then the value of
|
||||
the edge attribute is added to the matrix element at (ua, va).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
The NetworkX graph used to construct the NumPy matrix.
|
||||
|
||||
edge_attr : str, optional
|
||||
Each element of the matrix represents a running total of the
|
||||
specified edge attribute for edges whose node attributes correspond
|
||||
to the rows/cols of the matirx. The attribute must be present for
|
||||
all edges in the graph. If no attribute is specified, then we
|
||||
just count the number of edges whose node attributes correspond
|
||||
to the matrix element.
|
||||
|
||||
node_attr : str, optional
|
||||
Each row and column in the matrix represents a particular value
|
||||
of the node attribute. The attribute must be present for all nodes
|
||||
in the graph. Note, the values of this attribute should be reliably
|
||||
hashable. So, float values are not recommended. If no attribute is
|
||||
specified, then the rows and columns will be the nodes of the graph.
|
||||
|
||||
normalized : bool, optional
|
||||
If True, then each row is normalized by the summation of its values.
|
||||
|
||||
rc_order : list, optional
|
||||
A list of the node attribute values. This list specifies the ordering
|
||||
of rows and columns of the array. If no ordering is provided, then
|
||||
the ordering will be random (and also, a return value).
|
||||
|
||||
Other Parameters
|
||||
----------------
|
||||
dtype : NumPy data-type, optional
|
||||
A valid NumPy dtype used to initialize the array. Keep in mind certain
|
||||
dtypes can yield unexpected results if the array is to be normalized.
|
||||
The parameter is passed to numpy.zeros(). If unspecified, the NumPy
|
||||
default is used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
M : SciPy sparse matrix
|
||||
The attribute matrix.
|
||||
|
||||
ordering : list
|
||||
If `rc_order` was specified, then only the matrix is returned.
|
||||
However, if `rc_order` was None, then the ordering used to construct
|
||||
the matrix is returned as well.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Construct an adjacency matrix:
|
||||
|
||||
>>> G = nx.Graph()
|
||||
>>> G.add_edge(0,1,thickness=1,weight=3)
|
||||
>>> G.add_edge(0,2,thickness=2)
|
||||
>>> G.add_edge(1,2,thickness=3)
|
||||
>>> M = nx.attr_sparse_matrix(G, rc_order=[0,1,2])
|
||||
>>> M.todense()
|
||||
matrix([[0., 1., 1.],
|
||||
[1., 0., 1.],
|
||||
[1., 1., 0.]])
|
||||
|
||||
Alternatively, we can obtain the matrix describing edge thickness.
|
||||
|
||||
>>> M = nx.attr_sparse_matrix(G, edge_attr='thickness', rc_order=[0,1,2])
|
||||
>>> M.todense()
|
||||
matrix([[0., 1., 2.],
|
||||
[1., 0., 3.],
|
||||
[2., 3., 0.]])
|
||||
|
||||
We can also color the nodes and ask for the probability distribution over
|
||||
all edges (u,v) describing:
|
||||
|
||||
Pr(v has color Y | u has color X)
|
||||
|
||||
>>> G.nodes[0]['color'] = 'red'
|
||||
>>> G.nodes[1]['color'] = 'red'
|
||||
>>> G.nodes[2]['color'] = 'blue'
|
||||
>>> rc = ['red', 'blue']
|
||||
>>> M = nx.attr_sparse_matrix(G, node_attr='color', \
|
||||
normalized=True, rc_order=rc)
|
||||
>>> M.todense()
|
||||
matrix([[0.33333333, 0.66666667],
|
||||
[1. , 0. ]])
|
||||
|
||||
For example, the above tells us that for all edges (u,v):
|
||||
|
||||
Pr( v is red | u is red) = 1/3
|
||||
Pr( v is blue | u is red) = 2/3
|
||||
|
||||
Pr( v is red | u is blue) = 1
|
||||
Pr( v is blue | u is blue) = 0
|
||||
|
||||
Finally, we can obtain the total weights listed by the node colors.
|
||||
|
||||
>>> M = nx.attr_sparse_matrix(G, edge_attr='weight',\
|
||||
node_attr='color', rc_order=rc)
|
||||
>>> M.todense()
|
||||
matrix([[3., 2.],
|
||||
[2., 0.]])
|
||||
|
||||
Thus, the total weight over all edges (u,v) with u and v having colors:
|
||||
|
||||
(red, red) is 3 # the sole contribution is from edge (0,1)
|
||||
(red, blue) is 2 # contributions from edges (0,2) and (1,2)
|
||||
(blue, red) is 2 # same as (red, blue) since graph is undirected
|
||||
(blue, blue) is 0 # there are no edges with blue endpoints
|
||||
|
||||
"""
|
||||
try:
|
||||
import numpy as np
|
||||
from scipy import sparse
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"attr_sparse_matrix() requires scipy: http://scipy.org/ ")
|
||||
|
||||
edge_value = _edge_value(G, edge_attr)
|
||||
node_value = _node_value(G, node_attr)
|
||||
|
||||
if rc_order is None:
|
||||
ordering = list(set([node_value(n) for n in G]))
|
||||
else:
|
||||
ordering = rc_order
|
||||
|
||||
N = len(ordering)
|
||||
undirected = not G.is_directed()
|
||||
index = dict(zip(ordering, range(N)))
|
||||
M = sparse.lil_matrix((N, N), dtype=dtype)
|
||||
|
||||
seen = set([])
|
||||
for u, nbrdict in G.adjacency():
|
||||
for v in nbrdict:
|
||||
# Obtain the node attribute values.
|
||||
i, j = index[node_value(u)], index[node_value(v)]
|
||||
if v not in seen:
|
||||
M[i, j] += edge_value(u, v)
|
||||
if undirected:
|
||||
M[j, i] = M[i, j]
|
||||
|
||||
if undirected:
|
||||
seen.add(u)
|
||||
|
||||
if normalized:
|
||||
norms = np.asarray(M.sum(axis=1)).ravel()
|
||||
for i, norm in enumerate(norms):
|
||||
M[i, :] /= norm
|
||||
|
||||
if rc_order is None:
|
||||
return M, ordering
|
||||
else:
|
||||
return M
|
||||
|
||||
|
||||
# fixture for pytest
|
||||
def setup_module(module):
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
scipy = pytest.importorskip('scipy')
|
92
extensions/networkx/linalg/bethehessianmatrix.py
Normal file
92
extensions/networkx/linalg/bethehessianmatrix.py
Normal file
@ -0,0 +1,92 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004-2019 by
|
||||
# Aric Hagberg <hagberg@lanl.gov>
|
||||
# Dan Schult <dschult@colgate.edu>
|
||||
# Pieter Swart <swart@lanl.gov>
|
||||
# Jean-Gabriel Young <jeangabriel.young@gmail.com>
|
||||
# All rights reserved.
|
||||
# BSD license.
|
||||
#
|
||||
# Authors: Jean-Gabriel Young (jeangabriel.young@gmail.com)
|
||||
"""Bethe Hessian or deformed Laplacian matrix of graphs."""
|
||||
import networkx as nx
|
||||
from networkx.utils import not_implemented_for
|
||||
|
||||
__all__ = ['bethe_hessian_matrix']
|
||||
|
||||
|
||||
@not_implemented_for('directed')
|
||||
@not_implemented_for('multigraph')
|
||||
def bethe_hessian_matrix(G, r=None, nodelist=None):
|
||||
r"""Returns the Bethe Hessian matrix of G.
|
||||
|
||||
The Bethe Hessian is a family of matrices parametrized by r, defined as
|
||||
H(r) = (r^2 - 1) I - r A + D where A is the adjacency matrix, D is the
|
||||
diagonal matrix of node degrees, and I is the identify matrix. It is equal
|
||||
to the graph laplacian when the regularizer r = 1.
|
||||
|
||||
The default choice of regularizer should be the ratio [2]
|
||||
|
||||
.. math::
|
||||
r_m = \left(\sum k_i \right)^{-1}\left(\sum k_i^2 \right) - 1
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : Graph
|
||||
A NetworkX graph
|
||||
|
||||
r : float
|
||||
Regularizer parameter
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
H : Numpy matrix
|
||||
The Bethe Hessian matrix of G, with paramter r.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import networkx as nx
|
||||
>>> k =[3, 2, 2, 1, 0]
|
||||
>>> G = nx.havel_hakimi_graph(k)
|
||||
>>> H = nx.modularity_matrix(G)
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
bethe_hessian_spectrum
|
||||
to_numpy_matrix
|
||||
adjacency_matrix
|
||||
laplacian_matrix
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] A. Saade, F. Krzakala and L. Zdeborová
|
||||
"Spectral clustering of graphs with the bethe hessian",
|
||||
Advances in Neural Information Processing Systems. 2014.
|
||||
.. [2] C. M. Lee, E. Levina
|
||||
"Estimating the number of communities in networks by spectral methods"
|
||||
arXiv:1507.00827, 2015.
|
||||
"""
|
||||
import scipy.sparse
|
||||
if nodelist is None:
|
||||
nodelist = list(G)
|
||||
if r is None:
|
||||
r = sum([d ** 2 for v, d in nx.degree(G)]) /\
|
||||
sum([d for v, d in nx.degree(G)]) - 1
|
||||
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, format='csr')
|
||||
n, m = A.shape
|
||||
diags = A.sum(axis=1)
|
||||
D = scipy.sparse.spdiags(diags.flatten(), [0], m, n, format='csr')
|
||||
I = scipy.sparse.eye(m, n, format='csr')
|
||||
return (r ** 2 - 1) * I - r * A + D
|
||||
|
||||
|
||||
# fixture for pytest
|
||||
def setup_module(module):
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
172
extensions/networkx/linalg/graphmatrix.py
Normal file
172
extensions/networkx/linalg/graphmatrix.py
Normal file
@ -0,0 +1,172 @@
|
||||
"""
|
||||
Adjacency matrix and incidence matrix of graphs.
|
||||
"""
|
||||
# Copyright (C) 2004-2019 by
|
||||
# Aric Hagberg <hagberg@lanl.gov>
|
||||
# Dan Schult <dschult@colgate.edu>
|
||||
# Pieter Swart <swart@lanl.gov>
|
||||
# All rights reserved.
|
||||
# BSD license.
|
||||
import networkx as nx
|
||||
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
|
||||
'Pieter Swart (swart@lanl.gov)',
|
||||
'Dan Schult(dschult@colgate.edu)'])
|
||||
|
||||
__all__ = ['incidence_matrix',
|
||||
'adj_matrix', 'adjacency_matrix',
|
||||
]
|
||||
|
||||
|
||||
def incidence_matrix(G, nodelist=None, edgelist=None,
|
||||
oriented=False, weight=None):
|
||||
"""Returns incidence matrix of G.
|
||||
|
||||
The incidence matrix assigns each row to a node and each column to an edge.
|
||||
For a standard incidence matrix a 1 appears wherever a row's node is
|
||||
incident on the column's edge. For an oriented incidence matrix each
|
||||
edge is assigned an orientation (arbitrarily for undirected and aligning to
|
||||
direction for directed). A -1 appears for the tail of an edge and 1
|
||||
for the head of the edge. The elements are zero otherwise.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
nodelist : list, optional (default= all nodes in G)
|
||||
The rows are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
edgelist : list, optional (default= all edges in G)
|
||||
The columns are ordered according to the edges in edgelist.
|
||||
If edgelist is None, then the ordering is produced by G.edges().
|
||||
|
||||
oriented: bool, optional (default=False)
|
||||
If True, matrix elements are +1 or -1 for the head or tail node
|
||||
respectively of each edge. If False, +1 occurs at both nodes.
|
||||
|
||||
weight : string or None, optional (default=None)
|
||||
The edge data key used to provide each value in the matrix.
|
||||
If None, then each edge has weight 1. Edge weights, if used,
|
||||
should be positive so that the orientation can provide the sign.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A : SciPy sparse matrix
|
||||
The incidence matrix of G.
|
||||
|
||||
Notes
|
||||
-----
|
||||
For MultiGraph/MultiDiGraph, the edges in edgelist should be
|
||||
(u,v,key) 3-tuples.
|
||||
|
||||
"Networks are the best discrete model for so many problems in
|
||||
applied mathematics" [1]_.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Gil Strang, Network applications: A = incidence matrix,
|
||||
http://academicearth.org/lectures/network-applications-incidence-matrix
|
||||
"""
|
||||
import scipy.sparse
|
||||
if nodelist is None:
|
||||
nodelist = list(G)
|
||||
if edgelist is None:
|
||||
if G.is_multigraph():
|
||||
edgelist = list(G.edges(keys=True))
|
||||
else:
|
||||
edgelist = list(G.edges())
|
||||
A = scipy.sparse.lil_matrix((len(nodelist), len(edgelist)))
|
||||
node_index = dict((node, i) for i, node in enumerate(nodelist))
|
||||
for ei, e in enumerate(edgelist):
|
||||
(u, v) = e[:2]
|
||||
if u == v:
|
||||
continue # self loops give zero column
|
||||
try:
|
||||
ui = node_index[u]
|
||||
vi = node_index[v]
|
||||
except KeyError:
|
||||
raise nx.NetworkXError('node %s or %s in edgelist '
|
||||
'but not in nodelist' % (u, v))
|
||||
if weight is None:
|
||||
wt = 1
|
||||
else:
|
||||
if G.is_multigraph():
|
||||
ekey = e[2]
|
||||
wt = G[u][v][ekey].get(weight, 1)
|
||||
else:
|
||||
wt = G[u][v].get(weight, 1)
|
||||
if oriented:
|
||||
A[ui, ei] = -wt
|
||||
A[vi, ei] = wt
|
||||
else:
|
||||
A[ui, ei] = wt
|
||||
A[vi, ei] = wt
|
||||
return A.asformat('csc')
|
||||
|
||||
|
||||
def adjacency_matrix(G, nodelist=None, weight='weight'):
|
||||
"""Returns adjacency matrix of G.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to provide each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A : SciPy sparse matrix
|
||||
Adjacency matrix representation of G.
|
||||
|
||||
Notes
|
||||
-----
|
||||
For directed graphs, entry i,j corresponds to an edge from i to j.
|
||||
|
||||
If you want a pure Python adjacency matrix representation try
|
||||
networkx.convert.to_dict_of_dicts which will return a
|
||||
dictionary-of-dictionaries format that can be addressed as a
|
||||
sparse matrix.
|
||||
|
||||
For MultiGraph/MultiDiGraph with parallel edges the weights are summed.
|
||||
See to_numpy_matrix for other options.
|
||||
|
||||
The convention used for self-loop edges in graphs is to assign the
|
||||
diagonal matrix entry value to the edge weight attribute
|
||||
(or the number 1 if the edge has no weight attribute). If the
|
||||
alternate convention of doubling the edge weight is desired the
|
||||
resulting Scipy sparse matrix can be modified as follows:
|
||||
|
||||
>>> import scipy as sp
|
||||
>>> G = nx.Graph([(1,1)])
|
||||
>>> A = nx.adjacency_matrix(G)
|
||||
>>> print(A.todense())
|
||||
[[1]]
|
||||
>>> A.setdiag(A.diagonal()*2)
|
||||
>>> print(A.todense())
|
||||
[[2]]
|
||||
|
||||
See Also
|
||||
--------
|
||||
to_numpy_matrix
|
||||
to_scipy_sparse_matrix
|
||||
to_dict_of_dicts
|
||||
adjacency_spectrum
|
||||
"""
|
||||
return nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight)
|
||||
|
||||
|
||||
adj_matrix = adjacency_matrix
|
||||
|
||||
|
||||
# fixture for pytest
|
||||
def setup_module(module):
|
||||
import pytest
|
||||
scipy = pytest.importorskip('scipy')
|
375
extensions/networkx/linalg/laplacianmatrix.py
Normal file
375
extensions/networkx/linalg/laplacianmatrix.py
Normal file
@ -0,0 +1,375 @@
|
||||
"""Laplacian matrix of graphs.
|
||||
"""
|
||||
# Copyright (C) 2004-2019 by
|
||||
# Aric Hagberg <hagberg@lanl.gov>
|
||||
# Dan Schult <dschult@colgate.edu>
|
||||
# Pieter Swart <swart@lanl.gov>
|
||||
# All rights reserved.
|
||||
# BSD license.
|
||||
import networkx as nx
|
||||
from networkx.utils import not_implemented_for
|
||||
__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
|
||||
'Pieter Swart (swart@lanl.gov)',
|
||||
'Dan Schult (dschult@colgate.edu)',
|
||||
'Alejandro Weinstein <alejandro.weinstein@gmail.com>'])
|
||||
__all__ = ['laplacian_matrix',
|
||||
'normalized_laplacian_matrix',
|
||||
'directed_laplacian_matrix',
|
||||
'directed_combinatorial_laplacian_matrix']
|
||||
|
||||
|
||||
@not_implemented_for('directed')
|
||||
def laplacian_matrix(G, nodelist=None, weight='weight'):
|
||||
"""Returns the Laplacian matrix of G.
|
||||
|
||||
The graph Laplacian is the matrix L = D - A, where
|
||||
A is the adjacency matrix and D is the diagonal matrix of node degrees.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to compute each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
L : SciPy sparse matrix
|
||||
The Laplacian matrix of G.
|
||||
|
||||
Notes
|
||||
-----
|
||||
For MultiGraph/MultiDiGraph, the edges weights are summed.
|
||||
|
||||
See Also
|
||||
--------
|
||||
to_numpy_matrix
|
||||
normalized_laplacian_matrix
|
||||
laplacian_spectrum
|
||||
"""
|
||||
import scipy.sparse
|
||||
if nodelist is None:
|
||||
nodelist = list(G)
|
||||
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
|
||||
format='csr')
|
||||
n, m = A.shape
|
||||
diags = A.sum(axis=1)
|
||||
D = scipy.sparse.spdiags(diags.flatten(), [0], m, n, format='csr')
|
||||
return D - A
|
||||
|
||||
|
||||
@not_implemented_for('directed')
|
||||
def normalized_laplacian_matrix(G, nodelist=None, weight='weight'):
|
||||
r"""Returns the normalized Laplacian matrix of G.
|
||||
|
||||
The normalized graph Laplacian is the matrix
|
||||
|
||||
.. math::
|
||||
|
||||
N = D^{-1/2} L D^{-1/2}
|
||||
|
||||
where `L` is the graph Laplacian and `D` is the diagonal matrix of
|
||||
node degrees.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to compute each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
N : NumPy matrix
|
||||
The normalized Laplacian matrix of G.
|
||||
|
||||
Notes
|
||||
-----
|
||||
For MultiGraph/MultiDiGraph, the edges weights are summed.
|
||||
See to_numpy_matrix for other options.
|
||||
|
||||
If the Graph contains selfloops, D is defined as diag(sum(A,1)), where A is
|
||||
the adjacency matrix [2]_.
|
||||
|
||||
See Also
|
||||
--------
|
||||
laplacian_matrix
|
||||
normalized_laplacian_spectrum
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Fan Chung-Graham, Spectral Graph Theory,
|
||||
CBMS Regional Conference Series in Mathematics, Number 92, 1997.
|
||||
.. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized
|
||||
Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,
|
||||
March 2007.
|
||||
"""
|
||||
import scipy
|
||||
import scipy.sparse
|
||||
if nodelist is None:
|
||||
nodelist = list(G)
|
||||
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
|
||||
format='csr')
|
||||
n, m = A.shape
|
||||
diags = A.sum(axis=1).flatten()
|
||||
D = scipy.sparse.spdiags(diags, [0], m, n, format='csr')
|
||||
L = D - A
|
||||
with scipy.errstate(divide='ignore'):
|
||||
diags_sqrt = 1.0 / scipy.sqrt(diags)
|
||||
diags_sqrt[scipy.isinf(diags_sqrt)] = 0
|
||||
DH = scipy.sparse.spdiags(diags_sqrt, [0], m, n, format='csr')
|
||||
return DH.dot(L.dot(DH))
|
||||
|
||||
###############################################################################
|
||||
# Code based on
|
||||
# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/
|
||||
|
||||
|
||||
@not_implemented_for('undirected')
|
||||
@not_implemented_for('multigraph')
|
||||
def directed_laplacian_matrix(G, nodelist=None, weight='weight',
|
||||
walk_type=None, alpha=0.95):
|
||||
r"""Returns the directed Laplacian matrix of G.
|
||||
|
||||
The graph directed Laplacian is the matrix
|
||||
|
||||
.. math::
|
||||
|
||||
L = I - (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} ) / 2
|
||||
|
||||
where `I` is the identity matrix, `P` is the transition matrix of the
|
||||
graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and
|
||||
zeros elsewhere.
|
||||
|
||||
Depending on the value of walk_type, `P` can be the transition matrix
|
||||
induced by a random walk, a lazy random walk, or a random walk with
|
||||
teleportation (PageRank).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : DiGraph
|
||||
A NetworkX graph
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to compute each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
walk_type : string or None, optional (default=None)
|
||||
If None, `P` is selected depending on the properties of the
|
||||
graph. Otherwise is one of 'random', 'lazy', or 'pagerank'
|
||||
|
||||
alpha : real
|
||||
(1 - alpha) is the teleportation probability used with pagerank
|
||||
|
||||
Returns
|
||||
-------
|
||||
L : NumPy array
|
||||
Normalized Laplacian of G.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Only implemented for DiGraphs
|
||||
|
||||
See Also
|
||||
--------
|
||||
laplacian_matrix
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Fan Chung (2005).
|
||||
Laplacians and the Cheeger inequality for directed graphs.
|
||||
Annals of Combinatorics, 9(1), 2005
|
||||
"""
|
||||
import scipy as sp
|
||||
from scipy.sparse import spdiags, linalg
|
||||
|
||||
P = _transition_matrix(G, nodelist=nodelist, weight=weight,
|
||||
walk_type=walk_type, alpha=alpha)
|
||||
|
||||
n, m = P.shape
|
||||
|
||||
evals, evecs = linalg.eigs(P.T, k=1)
|
||||
v = evecs.flatten().real
|
||||
p = v / v.sum()
|
||||
sqrtp = sp.sqrt(p)
|
||||
Q = spdiags(sqrtp, [0], n, n) * P * spdiags(1.0 / sqrtp, [0], n, n)
|
||||
I = sp.identity(len(G))
|
||||
|
||||
return I - (Q + Q.T) / 2.0
|
||||
|
||||
|
||||
@not_implemented_for('undirected')
|
||||
@not_implemented_for('multigraph')
|
||||
def directed_combinatorial_laplacian_matrix(G, nodelist=None, weight='weight',
|
||||
walk_type=None, alpha=0.95):
|
||||
r"""Return the directed combinatorial Laplacian matrix of G.
|
||||
|
||||
The graph directed combinatorial Laplacian is the matrix
|
||||
|
||||
.. math::
|
||||
|
||||
L = \Phi - (\Phi P + P^T \Phi) / 2
|
||||
|
||||
where `P` is the transition matrix of the graph and and `\Phi` a matrix
|
||||
with the Perron vector of `P` in the diagonal and zeros elsewhere.
|
||||
|
||||
Depending on the value of walk_type, `P` can be the transition matrix
|
||||
induced by a random walk, a lazy random walk, or a random walk with
|
||||
teleportation (PageRank).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : DiGraph
|
||||
A NetworkX graph
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to compute each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
walk_type : string or None, optional (default=None)
|
||||
If None, `P` is selected depending on the properties of the
|
||||
graph. Otherwise is one of 'random', 'lazy', or 'pagerank'
|
||||
|
||||
alpha : real
|
||||
(1 - alpha) is the teleportation probability used with pagerank
|
||||
|
||||
Returns
|
||||
-------
|
||||
L : NumPy array
|
||||
Combinatorial Laplacian of G.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Only implemented for DiGraphs
|
||||
|
||||
See Also
|
||||
--------
|
||||
laplacian_matrix
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Fan Chung (2005).
|
||||
Laplacians and the Cheeger inequality for directed graphs.
|
||||
Annals of Combinatorics, 9(1), 2005
|
||||
"""
|
||||
from scipy.sparse import spdiags, linalg
|
||||
|
||||
P = _transition_matrix(G, nodelist=nodelist, weight=weight,
|
||||
walk_type=walk_type, alpha=alpha)
|
||||
|
||||
n, m = P.shape
|
||||
|
||||
evals, evecs = linalg.eigs(P.T, k=1)
|
||||
v = evecs.flatten().real
|
||||
p = v / v.sum()
|
||||
Phi = spdiags(p, [0], n, n)
|
||||
|
||||
Phi = Phi.todense()
|
||||
|
||||
return Phi - (Phi*P + P.T*Phi) / 2.0
|
||||
|
||||
|
||||
def _transition_matrix(G, nodelist=None, weight='weight',
|
||||
walk_type=None, alpha=0.95):
|
||||
"""Returns the transition matrix of G.
|
||||
|
||||
This is a row stochastic giving the transition probabilities while
|
||||
performing a random walk on the graph. Depending on the value of walk_type,
|
||||
P can be the transition matrix induced by a random walk, a lazy random walk,
|
||||
or a random walk with teleportation (PageRank).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : DiGraph
|
||||
A NetworkX graph
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to compute each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
walk_type : string or None, optional (default=None)
|
||||
If None, `P` is selected depending on the properties of the
|
||||
graph. Otherwise is one of 'random', 'lazy', or 'pagerank'
|
||||
|
||||
alpha : real
|
||||
(1 - alpha) is the teleportation probability used with pagerank
|
||||
|
||||
Returns
|
||||
-------
|
||||
P : NumPy array
|
||||
transition matrix of G.
|
||||
|
||||
Raises
|
||||
------
|
||||
NetworkXError
|
||||
If walk_type not specified or alpha not in valid range
|
||||
"""
|
||||
|
||||
import scipy as sp
|
||||
from scipy.sparse import identity, spdiags
|
||||
if walk_type is None:
|
||||
if nx.is_strongly_connected(G):
|
||||
if nx.is_aperiodic(G):
|
||||
walk_type = "random"
|
||||
else:
|
||||
walk_type = "lazy"
|
||||
else:
|
||||
walk_type = "pagerank"
|
||||
|
||||
M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
|
||||
dtype=float)
|
||||
n, m = M.shape
|
||||
if walk_type in ["random", "lazy"]:
|
||||
DI = spdiags(1.0 / sp.array(M.sum(axis=1).flat), [0], n, n)
|
||||
if walk_type == "random":
|
||||
P = DI * M
|
||||
else:
|
||||
I = identity(n)
|
||||
P = (I + DI * M) / 2.0
|
||||
|
||||
elif walk_type == "pagerank":
|
||||
if not (0 < alpha < 1):
|
||||
raise nx.NetworkXError('alpha must be between 0 and 1')
|
||||
# this is using a dense representation
|
||||
M = M.todense()
|
||||
# add constant to dangling nodes' row
|
||||
dangling = sp.where(M.sum(axis=1) == 0)
|
||||
for d in dangling[0]:
|
||||
M[d] = 1.0 / n
|
||||
# normalize
|
||||
M = M / M.sum(axis=1)
|
||||
P = alpha * M + (1 - alpha) / n
|
||||
else:
|
||||
raise nx.NetworkXError("walk_type must be random, lazy, or pagerank")
|
||||
|
||||
return P
|
||||
|
||||
|
||||
# fixture for pytest
|
||||
def setup_module(module):
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
166
extensions/networkx/linalg/modularitymatrix.py
Normal file
166
extensions/networkx/linalg/modularitymatrix.py
Normal file
@ -0,0 +1,166 @@
|
||||
"""Modularity matrix of graphs.
|
||||
"""
|
||||
# Copyright (C) 2004-2019 by
|
||||
# Aric Hagberg <hagberg@lanl.gov>
|
||||
# Dan Schult <dschult@colgate.edu>
|
||||
# Pieter Swart <swart@lanl.gov>
|
||||
# All rights reserved.
|
||||
# BSD license.
|
||||
import networkx as nx
|
||||
from networkx.utils import not_implemented_for
|
||||
__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
|
||||
'Pieter Swart (swart@lanl.gov)',
|
||||
'Dan Schult (dschult@colgate.edu)',
|
||||
'Jean-Gabriel Young (Jean.gabriel.young@gmail.com)'])
|
||||
__all__ = ['modularity_matrix', 'directed_modularity_matrix']
|
||||
|
||||
|
||||
@not_implemented_for('directed')
|
||||
@not_implemented_for('multigraph')
|
||||
def modularity_matrix(G, nodelist=None, weight=None):
|
||||
r"""Returns the modularity matrix of G.
|
||||
|
||||
The modularity matrix is the matrix B = A - <A>, where A is the adjacency
|
||||
matrix and <A> is the average adjacency matrix, assuming that the graph
|
||||
is described by the configuration model.
|
||||
|
||||
More specifically, the element B_ij of B is defined as
|
||||
|
||||
.. math::
|
||||
A_{ij} - {k_i k_j \over 2 m}
|
||||
|
||||
where k_i is the degree of node i, and where m is the number of edges
|
||||
in the graph. When weight is set to a name of an attribute edge, Aij, k_i,
|
||||
k_j and m are computed using its value.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : Graph
|
||||
A NetworkX graph
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
weight : string or None, optional (default=None)
|
||||
The edge attribute that holds the numerical value used for
|
||||
the edge weight. If None then all edge weights are 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
B : Numpy matrix
|
||||
The modularity matrix of G.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import networkx as nx
|
||||
>>> k =[3, 2, 2, 1, 0]
|
||||
>>> G = nx.havel_hakimi_graph(k)
|
||||
>>> B = nx.modularity_matrix(G)
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
to_numpy_matrix
|
||||
modularity_spectrum
|
||||
adjacency_matrix
|
||||
directed_modularity_matrix
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] M. E. J. Newman, "Modularity and community structure in networks",
|
||||
Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
|
||||
"""
|
||||
if nodelist is None:
|
||||
nodelist = list(G)
|
||||
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
|
||||
format='csr')
|
||||
k = A.sum(axis=1)
|
||||
m = k.sum() * 0.5
|
||||
# Expected adjacency matrix
|
||||
X = k * k.transpose() / (2 * m)
|
||||
return A - X
|
||||
|
||||
|
||||
@not_implemented_for('undirected')
|
||||
@not_implemented_for('multigraph')
|
||||
def directed_modularity_matrix(G, nodelist=None, weight=None):
|
||||
"""Returns the directed modularity matrix of G.
|
||||
|
||||
The modularity matrix is the matrix B = A - <A>, where A is the adjacency
|
||||
matrix and <A> is the expected adjacency matrix, assuming that the graph
|
||||
is described by the configuration model.
|
||||
|
||||
More specifically, the element B_ij of B is defined as
|
||||
|
||||
.. math::
|
||||
B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m
|
||||
|
||||
where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree
|
||||
of node j, with m the number of edges in the graph. When weight is set
|
||||
to a name of an attribute edge, Aij, k_i, k_j and m are computed using
|
||||
its value.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : DiGraph
|
||||
A NetworkX DiGraph
|
||||
|
||||
nodelist : list, optional
|
||||
The rows and columns are ordered according to the nodes in nodelist.
|
||||
If nodelist is None, then the ordering is produced by G.nodes().
|
||||
|
||||
weight : string or None, optional (default=None)
|
||||
The edge attribute that holds the numerical value used for
|
||||
the edge weight. If None then all edge weights are 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
B : Numpy matrix
|
||||
The modularity matrix of G.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import networkx as nx
|
||||
>>> G = nx.DiGraph()
|
||||
>>> G.add_edges_from(((1,2), (1,3), (3,1), (3,2), (3,5), (4,5), (4,6),
|
||||
... (5,4), (5,6), (6,4)))
|
||||
>>> B = nx.directed_modularity_matrix(G)
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
NetworkX defines the element A_ij of the adjacency matrix as 1 if there
|
||||
is a link going from node i to node j. Leicht and Newman use the opposite
|
||||
definition. This explains the different expression for B_ij.
|
||||
|
||||
See Also
|
||||
--------
|
||||
to_numpy_matrix
|
||||
modularity_spectrum
|
||||
adjacency_matrix
|
||||
modularity_matrix
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] E. A. Leicht, M. E. J. Newman,
|
||||
"Community structure in directed networks",
|
||||
Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008.
|
||||
"""
|
||||
if nodelist is None:
|
||||
nodelist = list(G)
|
||||
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
|
||||
format='csr')
|
||||
k_in = A.sum(axis=0)
|
||||
k_out = A.sum(axis=1)
|
||||
m = k_in.sum()
|
||||
# Expected adjacency matrix
|
||||
X = k_out * k_in / m
|
||||
return A - X
|
||||
|
||||
|
||||
# fixture for pytest
|
||||
def setup_module(module):
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
scipy = pytest.importorskip('scipy')
|
172
extensions/networkx/linalg/spectrum.py
Normal file
172
extensions/networkx/linalg/spectrum.py
Normal file
@ -0,0 +1,172 @@
|
||||
"""
|
||||
Eigenvalue spectrum of graphs.
|
||||
"""
|
||||
# Copyright (C) 2004-2019 by
|
||||
# Aric Hagberg <hagberg@lanl.gov>
|
||||
# Dan Schult <dschult@colgate.edu>
|
||||
# Pieter Swart <swart@lanl.gov>
|
||||
# All rights reserved.
|
||||
# BSD license.
|
||||
import networkx as nx
|
||||
__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
|
||||
'Pieter Swart (swart@lanl.gov)',
|
||||
'Dan Schult(dschult@colgate.edu)',
|
||||
'Jean-Gabriel Young (jean.gabriel.young@gmail.com)'])
|
||||
|
||||
__all__ = ['laplacian_spectrum', 'adjacency_spectrum', 'modularity_spectrum',
|
||||
'normalized_laplacian_spectrum', 'bethe_hessian_spectrum']
|
||||
|
||||
|
||||
def laplacian_spectrum(G, weight='weight'):
|
||||
"""Returns eigenvalues of the Laplacian of G
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to compute each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
evals : NumPy array
|
||||
Eigenvalues
|
||||
|
||||
Notes
|
||||
-----
|
||||
For MultiGraph/MultiDiGraph, the edges weights are summed.
|
||||
See to_numpy_matrix for other options.
|
||||
|
||||
See Also
|
||||
--------
|
||||
laplacian_matrix
|
||||
"""
|
||||
from scipy.linalg import eigvalsh
|
||||
return eigvalsh(nx.laplacian_matrix(G, weight=weight).todense())
|
||||
|
||||
|
||||
def normalized_laplacian_spectrum(G, weight='weight'):
|
||||
"""Return eigenvalues of the normalized Laplacian of G
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to compute each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
evals : NumPy array
|
||||
Eigenvalues
|
||||
|
||||
Notes
|
||||
-----
|
||||
For MultiGraph/MultiDiGraph, the edges weights are summed.
|
||||
See to_numpy_matrix for other options.
|
||||
|
||||
See Also
|
||||
--------
|
||||
normalized_laplacian_matrix
|
||||
"""
|
||||
from scipy.linalg import eigvalsh
|
||||
return eigvalsh(nx.normalized_laplacian_matrix(G, weight=weight).todense())
|
||||
|
||||
|
||||
def adjacency_spectrum(G, weight='weight'):
|
||||
"""Returns eigenvalues of the adjacency matrix of G.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
weight : string or None, optional (default='weight')
|
||||
The edge data key used to compute each value in the matrix.
|
||||
If None, then each edge has weight 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
evals : NumPy array
|
||||
Eigenvalues
|
||||
|
||||
Notes
|
||||
-----
|
||||
For MultiGraph/MultiDiGraph, the edges weights are summed.
|
||||
See to_numpy_matrix for other options.
|
||||
|
||||
See Also
|
||||
--------
|
||||
adjacency_matrix
|
||||
"""
|
||||
from scipy.linalg import eigvals
|
||||
return eigvals(nx.adjacency_matrix(G, weight=weight).todense())
|
||||
|
||||
|
||||
def modularity_spectrum(G):
|
||||
"""Returns eigenvalues of the modularity matrix of G.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : Graph
|
||||
A NetworkX Graph or DiGraph
|
||||
|
||||
Returns
|
||||
-------
|
||||
evals : NumPy array
|
||||
Eigenvalues
|
||||
|
||||
See Also
|
||||
--------
|
||||
modularity_matrix
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] M. E. J. Newman, "Modularity and community structure in networks",
|
||||
Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
|
||||
"""
|
||||
from scipy.linalg import eigvals
|
||||
if G.is_directed():
|
||||
return eigvals(nx.directed_modularity_matrix(G))
|
||||
else:
|
||||
return eigvals(nx.modularity_matrix(G))
|
||||
|
||||
|
||||
def bethe_hessian_spectrum(G, r=None):
|
||||
"""Returns eigenvalues of the Bethe Hessian matrix of G.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : Graph
|
||||
A NetworkX Graph or DiGraph
|
||||
|
||||
r : float
|
||||
Regularizer parameter
|
||||
|
||||
Returns
|
||||
-------
|
||||
evals : NumPy array
|
||||
Eigenvalues
|
||||
|
||||
See Also
|
||||
--------
|
||||
bethe_hessian_matrix
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] A. Saade, F. Krzakala and L. Zdeborová
|
||||
"Spectral clustering of graphs with the bethe hessian",
|
||||
Advances in Neural Information Processing Systems. 2014.
|
||||
"""
|
||||
from scipy.linalg import eigvalsh
|
||||
return eigvalsh(nx.bethe_hessian_matrix(G, r).todense())
|
||||
|
||||
|
||||
# fixture for pytest
|
||||
def setup_module(module):
|
||||
import pytest
|
||||
scipy.linalg = pytest.importorskip('scipy.linalg')
|
0
extensions/networkx/linalg/tests/__init__.py
Normal file
0
extensions/networkx/linalg/tests/__init__.py
Normal file
284
extensions/networkx/linalg/tests/test_algebraic_connectivity.py
Normal file
284
extensions/networkx/linalg/tests/test_algebraic_connectivity.py
Normal file
@ -0,0 +1,284 @@
|
||||
from math import sqrt
|
||||
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
numpy.linalg = pytest.importorskip('numpy.linalg')
|
||||
scipy = pytest.importorskip('scipy')
|
||||
scipy.sparse = pytest.importorskip('scipy.sparse')
|
||||
|
||||
|
||||
|
||||
import networkx as nx
|
||||
from networkx.testing import almost_equal
|
||||
|
||||
try:
|
||||
from scikits.sparse.cholmod import cholesky
|
||||
_cholesky = cholesky
|
||||
except ImportError:
|
||||
_cholesky = None
|
||||
|
||||
if _cholesky is None:
|
||||
methods = ('tracemin_pcg', 'tracemin_lu', 'lanczos', 'lobpcg')
|
||||
else:
|
||||
methods = ('tracemin_pcg', 'tracemin_chol', 'tracemin_lu', 'lanczos', 'lobpcg')
|
||||
|
||||
|
||||
def check_eigenvector(A, l, x):
|
||||
nx = numpy.linalg.norm(x)
|
||||
# Check zeroness.
|
||||
assert not almost_equal(nx, 0)
|
||||
y = A * x
|
||||
ny = numpy.linalg.norm(y)
|
||||
# Check collinearity.
|
||||
assert almost_equal(numpy.dot(x, y), nx * ny)
|
||||
# Check eigenvalue.
|
||||
assert almost_equal(ny, l * nx)
|
||||
|
||||
|
||||
class TestAlgebraicConnectivity(object):
|
||||
|
||||
def test_directed(self):
|
||||
G = nx.DiGraph()
|
||||
for method in self._methods:
|
||||
pytest.raises(nx.NetworkXNotImplemented, nx.algebraic_connectivity,
|
||||
G, method=method)
|
||||
pytest.raises(nx.NetworkXNotImplemented, nx.fiedler_vector, G,
|
||||
method=method)
|
||||
|
||||
def test_null_and_singleton(self):
|
||||
G = nx.Graph()
|
||||
for method in self._methods:
|
||||
pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G,
|
||||
method=method)
|
||||
pytest.raises(nx.NetworkXError, nx.fiedler_vector, G,
|
||||
method=method)
|
||||
G.add_edge(0, 0)
|
||||
for method in self._methods:
|
||||
pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G,
|
||||
method=method)
|
||||
pytest.raises(nx.NetworkXError, nx.fiedler_vector, G,
|
||||
method=method)
|
||||
|
||||
def test_disconnected(self):
|
||||
G = nx.Graph()
|
||||
G.add_nodes_from(range(2))
|
||||
for method in self._methods:
|
||||
assert nx.algebraic_connectivity(G) == 0
|
||||
pytest.raises(nx.NetworkXError, nx.fiedler_vector, G,
|
||||
method=method)
|
||||
G.add_edge(0, 1, weight=0)
|
||||
for method in self._methods:
|
||||
assert nx.algebraic_connectivity(G) == 0
|
||||
pytest.raises(nx.NetworkXError, nx.fiedler_vector, G,
|
||||
method=method)
|
||||
|
||||
def test_unrecognized_method(self):
|
||||
G = nx.path_graph(4)
|
||||
pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G,
|
||||
method='unknown')
|
||||
pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method='unknown')
|
||||
|
||||
def test_two_nodes(self):
|
||||
G = nx.Graph()
|
||||
G.add_edge(0, 1, weight=1)
|
||||
A = nx.laplacian_matrix(G)
|
||||
for method in self._methods:
|
||||
assert almost_equal(nx.algebraic_connectivity(
|
||||
G, tol=1e-12, method=method), 2)
|
||||
x = nx.fiedler_vector(G, tol=1e-12, method=method)
|
||||
check_eigenvector(A, 2, x)
|
||||
G = nx.MultiGraph()
|
||||
G.add_edge(0, 0, spam=1e8)
|
||||
G.add_edge(0, 1, spam=1)
|
||||
G.add_edge(0, 1, spam=-2)
|
||||
A = -3 * nx.laplacian_matrix(G, weight='spam')
|
||||
for method in self._methods:
|
||||
assert almost_equal(nx.algebraic_connectivity(
|
||||
G, weight='spam', tol=1e-12, method=method), 6)
|
||||
x = nx.fiedler_vector(G, weight='spam', tol=1e-12, method=method)
|
||||
check_eigenvector(A, 6, x)
|
||||
|
||||
def test_abbreviation_of_method(self):
|
||||
G = nx.path_graph(8)
|
||||
A = nx.laplacian_matrix(G)
|
||||
sigma = 2 - sqrt(2 + sqrt(2))
|
||||
ac = nx.algebraic_connectivity(G, tol=1e-12, method='tracemin')
|
||||
assert almost_equal(ac, sigma)
|
||||
x = nx.fiedler_vector(G, tol=1e-12, method='tracemin')
|
||||
check_eigenvector(A, sigma, x)
|
||||
|
||||
def test_path(self):
|
||||
G = nx.path_graph(8)
|
||||
A = nx.laplacian_matrix(G)
|
||||
sigma = 2 - sqrt(2 + sqrt(2))
|
||||
for method in self._methods:
|
||||
ac = nx.algebraic_connectivity(G, tol=1e-12, method=method)
|
||||
assert almost_equal(ac, sigma)
|
||||
x = nx.fiedler_vector(G, tol=1e-12, method=method)
|
||||
check_eigenvector(A, sigma, x)
|
||||
|
||||
def test_problematic_graph_issue_2381(self):
|
||||
G = nx.path_graph(4)
|
||||
G.add_edges_from([(4, 2), (5, 1)])
|
||||
A = nx.laplacian_matrix(G)
|
||||
sigma = 0.438447187191
|
||||
for method in self._methods:
|
||||
ac = nx.algebraic_connectivity(G, tol=1e-12, method=method)
|
||||
assert almost_equal(ac, sigma)
|
||||
x = nx.fiedler_vector(G, tol=1e-12, method=method)
|
||||
check_eigenvector(A, sigma, x)
|
||||
|
||||
def test_cycle(self):
|
||||
G = nx.cycle_graph(8)
|
||||
A = nx.laplacian_matrix(G)
|
||||
sigma = 2 - sqrt(2)
|
||||
for method in self._methods:
|
||||
ac = nx.algebraic_connectivity(G, tol=1e-12, method=method)
|
||||
assert almost_equal(ac, sigma)
|
||||
x = nx.fiedler_vector(G, tol=1e-12, method=method)
|
||||
check_eigenvector(A, sigma, x)
|
||||
|
||||
def test_seed_argument(self):
|
||||
G = nx.cycle_graph(8)
|
||||
A = nx.laplacian_matrix(G)
|
||||
sigma = 2 - sqrt(2)
|
||||
for method in self._methods:
|
||||
ac = nx.algebraic_connectivity(G, tol=1e-12, method=method, seed=1)
|
||||
assert almost_equal(ac, sigma)
|
||||
x = nx.fiedler_vector(G, tol=1e-12, method=method, seed=1)
|
||||
check_eigenvector(A, sigma, x)
|
||||
|
||||
def test_buckminsterfullerene(self):
|
||||
G = nx.Graph(
|
||||
[(1, 10), (1, 41), (1, 59), (2, 12), (2, 42), (2, 60), (3, 6),
|
||||
(3, 43), (3, 57), (4, 8), (4, 44), (4, 58), (5, 13), (5, 56),
|
||||
(5, 57), (6, 10), (6, 31), (7, 14), (7, 56), (7, 58), (8, 12),
|
||||
(8, 32), (9, 23), (9, 53), (9, 59), (10, 15), (11, 24), (11, 53),
|
||||
(11, 60), (12, 16), (13, 14), (13, 25), (14, 26), (15, 27),
|
||||
(15, 49), (16, 28), (16, 50), (17, 18), (17, 19), (17, 54),
|
||||
(18, 20), (18, 55), (19, 23), (19, 41), (20, 24), (20, 42),
|
||||
(21, 31), (21, 33), (21, 57), (22, 32), (22, 34), (22, 58),
|
||||
(23, 24), (25, 35), (25, 43), (26, 36), (26, 44), (27, 51),
|
||||
(27, 59), (28, 52), (28, 60), (29, 33), (29, 34), (29, 56),
|
||||
(30, 51), (30, 52), (30, 53), (31, 47), (32, 48), (33, 45),
|
||||
(34, 46), (35, 36), (35, 37), (36, 38), (37, 39), (37, 49),
|
||||
(38, 40), (38, 50), (39, 40), (39, 51), (40, 52), (41, 47),
|
||||
(42, 48), (43, 49), (44, 50), (45, 46), (45, 54), (46, 55),
|
||||
(47, 54), (48, 55)])
|
||||
for normalized in (False, True):
|
||||
if not normalized:
|
||||
A = nx.laplacian_matrix(G)
|
||||
sigma = 0.2434017461399311
|
||||
else:
|
||||
A = nx.normalized_laplacian_matrix(G)
|
||||
sigma = 0.08113391537997749
|
||||
for method in methods:
|
||||
try:
|
||||
assert almost_equal(nx.algebraic_connectivity(
|
||||
G, normalized=normalized, tol=1e-12, method=method),
|
||||
sigma)
|
||||
x = nx.fiedler_vector(G, normalized=normalized, tol=1e-12,
|
||||
method=method)
|
||||
check_eigenvector(A, sigma, x)
|
||||
except nx.NetworkXError as e:
|
||||
if e.args not in (('Cholesky solver unavailable.',),
|
||||
('LU solver unavailable.',)):
|
||||
raise
|
||||
|
||||
_methods = methods
|
||||
|
||||
|
||||
class TestSpectralOrdering(object):
|
||||
|
||||
def test_nullgraph(self):
|
||||
for graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph):
|
||||
G = graph()
|
||||
pytest.raises(nx.NetworkXError, nx.spectral_ordering, G)
|
||||
|
||||
def test_singleton(self):
|
||||
for graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph):
|
||||
G = graph()
|
||||
G.add_node('x')
|
||||
assert nx.spectral_ordering(G) == ['x']
|
||||
G.add_edge('x', 'x', weight=33)
|
||||
G.add_edge('x', 'x', weight=33)
|
||||
assert nx.spectral_ordering(G) == ['x']
|
||||
|
||||
def test_unrecognized_method(self):
|
||||
G = nx.path_graph(4)
|
||||
pytest.raises(nx.NetworkXError, nx.spectral_ordering, G,
|
||||
method='unknown')
|
||||
|
||||
def test_three_nodes(self):
|
||||
G = nx.Graph()
|
||||
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1)],
|
||||
weight='spam')
|
||||
for method in self._methods:
|
||||
order = nx.spectral_ordering(G, weight='spam', method=method)
|
||||
assert set(order) == set(G)
|
||||
assert set([1, 3]) in (set(order[:-1]), set(order[1:]))
|
||||
G = nx.MultiDiGraph()
|
||||
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1), (2, 3, 2)])
|
||||
for method in self._methods:
|
||||
order = nx.spectral_ordering(G, method=method)
|
||||
assert set(order) == set(G)
|
||||
assert set([2, 3]) in (set(order[:-1]), set(order[1:]))
|
||||
|
||||
def test_path(self):
|
||||
# based on setup_class numpy is installed if we get here
|
||||
from numpy.random import shuffle
|
||||
path = list(range(10))
|
||||
shuffle(path)
|
||||
G = nx.Graph()
|
||||
nx.add_path(G, path)
|
||||
for method in self._methods:
|
||||
order = nx.spectral_ordering(G, method=method)
|
||||
assert order in [path, list(reversed(path))]
|
||||
|
||||
def test_seed_argument(self):
|
||||
# based on setup_class numpy is installed if we get here
|
||||
from numpy.random import shuffle
|
||||
path = list(range(10))
|
||||
shuffle(path)
|
||||
G = nx.Graph()
|
||||
nx.add_path(G, path)
|
||||
for method in self._methods:
|
||||
order = nx.spectral_ordering(G, method=method, seed=1)
|
||||
assert order in [path, list(reversed(path))]
|
||||
|
||||
def test_disconnected(self):
|
||||
G = nx.Graph()
|
||||
nx.add_path(G, range(0, 10, 2))
|
||||
nx.add_path(G, range(1, 10, 2))
|
||||
for method in self._methods:
|
||||
order = nx.spectral_ordering(G, method=method)
|
||||
assert set(order) == set(G)
|
||||
seqs = [list(range(0, 10, 2)), list(range(8, -1, -2)),
|
||||
list(range(1, 10, 2)), list(range(9, -1, -2))]
|
||||
assert order[:5] in seqs
|
||||
assert order[5:] in seqs
|
||||
|
||||
def test_cycle(self):
|
||||
path = list(range(10))
|
||||
G = nx.Graph()
|
||||
nx.add_path(G, path, weight=5)
|
||||
G.add_edge(path[-1], path[0], weight=1)
|
||||
A = nx.laplacian_matrix(G).todense()
|
||||
for normalized in (False, True):
|
||||
for method in methods:
|
||||
try:
|
||||
order = nx.spectral_ordering(G, normalized=normalized,
|
||||
method=method)
|
||||
except nx.NetworkXError as e:
|
||||
if e.args not in (('Cholesky solver unavailable.',),
|
||||
('LU solver unavailable.',)):
|
||||
raise
|
||||
else:
|
||||
if not normalized:
|
||||
assert order in [[1, 2, 0, 3, 4, 5, 6, 9, 7, 8],
|
||||
[8, 7, 9, 6, 5, 4, 3, 0, 2, 1]]
|
||||
else:
|
||||
assert order in [[1, 2, 3, 0, 4, 5, 9, 6, 7, 8],
|
||||
[8, 7, 6, 9, 5, 4, 0, 3, 2, 1]]
|
||||
|
||||
_methods = methods
|
34
extensions/networkx/linalg/tests/test_bethehessian.py
Normal file
34
extensions/networkx/linalg/tests/test_bethehessian.py
Normal file
@ -0,0 +1,34 @@
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
npt = pytest.importorskip('numpy.testing')
|
||||
scipy = pytest.importorskip('scipy')
|
||||
|
||||
import networkx as nx
|
||||
from networkx.generators.degree_seq import havel_hakimi_graph
|
||||
|
||||
|
||||
class TestBetheHessian(object):
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
deg = [3, 2, 2, 1, 0]
|
||||
cls.G = havel_hakimi_graph(deg)
|
||||
cls.P = nx.path_graph(3)
|
||||
|
||||
def test_bethe_hessian(self):
|
||||
"Bethe Hessian matrix"
|
||||
H = numpy.array([[ 4, -2, 0],
|
||||
[-2, 5, -2],
|
||||
[ 0, -2, 4]])
|
||||
permutation = [2, 0, 1]
|
||||
# Bethe Hessian gives expected form
|
||||
npt.assert_equal(nx.bethe_hessian_matrix(self.P, r=2).todense(), H)
|
||||
# nodelist is correctly implemented
|
||||
npt.assert_equal(nx.bethe_hessian_matrix(self.P, r=2, nodelist=permutation).todense(),
|
||||
H[numpy.ix_(permutation, permutation)])
|
||||
# Equal to Laplacian matrix when r=1
|
||||
npt.assert_equal(nx.bethe_hessian_matrix(self.G, r=1).todense(),
|
||||
nx.laplacian_matrix(self.G).todense())
|
||||
# Correct default for the regularizer r
|
||||
npt.assert_equal(nx.bethe_hessian_matrix(self.G).todense(),
|
||||
nx.bethe_hessian_matrix(self.G, r=1.25).todense())
|
163
extensions/networkx/linalg/tests/test_graphmatrix.py
Normal file
163
extensions/networkx/linalg/tests/test_graphmatrix.py
Normal file
@ -0,0 +1,163 @@
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
npt = pytest.importorskip('numpy.testing')
|
||||
scipy = pytest.importorskip('scipy')
|
||||
|
||||
import networkx as nx
|
||||
from networkx.generators.degree_seq import havel_hakimi_graph
|
||||
|
||||
|
||||
class TestGraphMatrix(object):
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
deg = [3, 2, 2, 1, 0]
|
||||
cls.G = havel_hakimi_graph(deg)
|
||||
cls.OI = numpy.array([[-1, -1, -1, 0],
|
||||
[1, 0, 0, -1],
|
||||
[0, 1, 0, 1],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 0]])
|
||||
cls.A = numpy.array([[0, 1, 1, 1, 0],
|
||||
[1, 0, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0],
|
||||
[1, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
cls.WG = havel_hakimi_graph(deg)
|
||||
cls.WG.add_edges_from((u, v, {'weight': 0.5, 'other': 0.3})
|
||||
for (u, v) in cls.G.edges())
|
||||
cls.WA = numpy.array([[0, 0.5, 0.5, 0.5, 0],
|
||||
[0.5, 0, 0.5, 0, 0],
|
||||
[0.5, 0.5, 0, 0, 0],
|
||||
[0.5, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
cls.MG = nx.MultiGraph(cls.G)
|
||||
cls.MG2 = cls.MG.copy()
|
||||
cls.MG2.add_edge(0, 1)
|
||||
cls.MG2A = numpy.array([[0, 2, 1, 1, 0],
|
||||
[2, 0, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0],
|
||||
[1, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
cls.MGOI = numpy.array([[-1, -1, -1, -1, 0],
|
||||
[1, 1, 0, 0, -1],
|
||||
[0, 0, 1, 0, 1],
|
||||
[0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
cls.no_edges_G = nx.Graph([(1, 2), (3, 2, {'weight': 8})])
|
||||
cls.no_edges_A = numpy.array([[0, 0], [0, 0]])
|
||||
|
||||
def test_incidence_matrix(self):
|
||||
"Conversion to incidence matrix"
|
||||
I = nx.incidence_matrix(self.G,
|
||||
nodelist=sorted(self.G),
|
||||
edgelist=sorted(self.G.edges()),
|
||||
oriented=True).todense().astype(int)
|
||||
npt.assert_equal(I, self.OI)
|
||||
I = nx.incidence_matrix(self.G,
|
||||
nodelist=sorted(self.G),
|
||||
edgelist=sorted(self.G.edges()),
|
||||
oriented=False).todense().astype(int)
|
||||
npt.assert_equal(I, numpy.abs(self.OI))
|
||||
|
||||
I = nx.incidence_matrix(self.MG,
|
||||
nodelist=sorted(self.MG),
|
||||
edgelist=sorted(self.MG.edges()),
|
||||
oriented=True).todense().astype(int)
|
||||
npt.assert_equal(I, self.OI)
|
||||
I = nx.incidence_matrix(self.MG,
|
||||
nodelist=sorted(self.MG),
|
||||
edgelist=sorted(self.MG.edges()),
|
||||
oriented=False).todense().astype(int)
|
||||
npt.assert_equal(I, numpy.abs(self.OI))
|
||||
|
||||
I = nx.incidence_matrix(self.MG2,
|
||||
nodelist=sorted(self.MG2),
|
||||
edgelist=sorted(self.MG2.edges()),
|
||||
oriented=True).todense().astype(int)
|
||||
npt.assert_equal(I, self.MGOI)
|
||||
I = nx.incidence_matrix(self.MG2,
|
||||
nodelist=sorted(self.MG),
|
||||
edgelist=sorted(self.MG2.edges()),
|
||||
oriented=False).todense().astype(int)
|
||||
npt.assert_equal(I, numpy.abs(self.MGOI))
|
||||
|
||||
def test_weighted_incidence_matrix(self):
|
||||
I = nx.incidence_matrix(self.WG,
|
||||
nodelist=sorted(self.WG),
|
||||
edgelist=sorted(self.WG.edges()),
|
||||
oriented=True).todense().astype(int)
|
||||
npt.assert_equal(I, self.OI)
|
||||
I = nx.incidence_matrix(self.WG,
|
||||
nodelist=sorted(self.WG),
|
||||
edgelist=sorted(self.WG.edges()),
|
||||
oriented=False).todense().astype(int)
|
||||
npt.assert_equal(I, numpy.abs(self.OI))
|
||||
|
||||
# npt.assert_equal(nx.incidence_matrix(self.WG,oriented=True,
|
||||
# weight='weight').todense(),0.5*self.OI)
|
||||
# npt.assert_equal(nx.incidence_matrix(self.WG,weight='weight').todense(),
|
||||
# numpy.abs(0.5*self.OI))
|
||||
# npt.assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other').todense(),
|
||||
# 0.3*self.OI)
|
||||
|
||||
I = nx.incidence_matrix(self.WG,
|
||||
nodelist=sorted(self.WG),
|
||||
edgelist=sorted(self.WG.edges()),
|
||||
oriented=True,
|
||||
weight='weight').todense()
|
||||
npt.assert_equal(I, 0.5 * self.OI)
|
||||
I = nx.incidence_matrix(self.WG,
|
||||
nodelist=sorted(self.WG),
|
||||
edgelist=sorted(self.WG.edges()),
|
||||
oriented=False,
|
||||
weight='weight').todense()
|
||||
npt.assert_equal(I, numpy.abs(0.5 * self.OI))
|
||||
I = nx.incidence_matrix(self.WG,
|
||||
nodelist=sorted(self.WG),
|
||||
edgelist=sorted(self.WG.edges()),
|
||||
oriented=True,
|
||||
weight='other').todense()
|
||||
npt.assert_equal(I, 0.3 * self.OI)
|
||||
|
||||
# WMG=nx.MultiGraph(self.WG)
|
||||
# WMG.add_edge(0,1,weight=0.5,other=0.3)
|
||||
# npt.assert_equal(nx.incidence_matrix(WMG,weight='weight').todense(),
|
||||
# numpy.abs(0.5*self.MGOI))
|
||||
# npt.assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True).todense(),
|
||||
# 0.5*self.MGOI)
|
||||
# npt.assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True).todense(),
|
||||
# 0.3*self.MGOI)
|
||||
|
||||
WMG = nx.MultiGraph(self.WG)
|
||||
WMG.add_edge(0, 1, weight=0.5, other=0.3)
|
||||
I = nx.incidence_matrix(WMG,
|
||||
nodelist=sorted(WMG),
|
||||
edgelist=sorted(WMG.edges(keys=True)),
|
||||
oriented=True,
|
||||
weight='weight').todense()
|
||||
npt.assert_equal(I, 0.5 * self.MGOI)
|
||||
I = nx.incidence_matrix(WMG,
|
||||
nodelist=sorted(WMG),
|
||||
edgelist=sorted(WMG.edges(keys=True)),
|
||||
oriented=False,
|
||||
weight='weight').todense()
|
||||
npt.assert_equal(I, numpy.abs(0.5 * self.MGOI))
|
||||
I = nx.incidence_matrix(WMG,
|
||||
nodelist=sorted(WMG),
|
||||
edgelist=sorted(WMG.edges(keys=True)),
|
||||
oriented=True,
|
||||
weight='other').todense()
|
||||
npt.assert_equal(I, 0.3 * self.MGOI)
|
||||
|
||||
def test_adjacency_matrix(self):
|
||||
"Conversion to adjacency matrix"
|
||||
npt.assert_equal(nx.adj_matrix(self.G).todense(), self.A)
|
||||
npt.assert_equal(nx.adj_matrix(self.MG).todense(), self.A)
|
||||
npt.assert_equal(nx.adj_matrix(self.MG2).todense(), self.MG2A)
|
||||
npt.assert_equal(nx.adj_matrix(self.G, nodelist=[0, 1]).todense(), self.A[:2, :2])
|
||||
npt.assert_equal(nx.adj_matrix(self.WG).todense(), self.WA)
|
||||
npt.assert_equal(nx.adj_matrix(self.WG, weight=None).todense(), self.A)
|
||||
npt.assert_equal(nx.adj_matrix(self.MG2, weight=None).todense(), self.MG2A)
|
||||
npt.assert_equal(nx.adj_matrix(self.WG, weight='other').todense(), 0.6 * self.WA)
|
||||
npt.assert_equal(nx.adj_matrix(self.no_edges_G, nodelist=[1, 3]).todense(), self.no_edges_A)
|
149
extensions/networkx/linalg/tests/test_laplacian.py
Normal file
149
extensions/networkx/linalg/tests/test_laplacian.py
Normal file
@ -0,0 +1,149 @@
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
npt = pytest.importorskip('numpy.testing')
|
||||
scipy = pytest.importorskip('scipy')
|
||||
|
||||
import networkx as nx
|
||||
from networkx.generators.degree_seq import havel_hakimi_graph
|
||||
|
||||
|
||||
class TestLaplacian(object):
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
deg = [3, 2, 2, 1, 0]
|
||||
cls.G = havel_hakimi_graph(deg)
|
||||
cls.WG = nx.Graph((u, v, {'weight': 0.5, 'other': 0.3})
|
||||
for (u, v) in cls.G.edges())
|
||||
cls.WG.add_node(4)
|
||||
cls.MG = nx.MultiGraph(cls.G)
|
||||
|
||||
# Graph with clsloops
|
||||
cls.Gsl = cls.G.copy()
|
||||
for node in cls.Gsl.nodes():
|
||||
cls.Gsl.add_edge(node, node)
|
||||
|
||||
def test_laplacian(self):
|
||||
"Graph Laplacian"
|
||||
NL = numpy.array([[3, -1, -1, -1, 0],
|
||||
[-1, 2, -1, 0, 0],
|
||||
[-1, -1, 2, 0, 0],
|
||||
[-1, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
WL = 0.5 * NL
|
||||
OL = 0.3 * NL
|
||||
npt.assert_equal(nx.laplacian_matrix(self.G).todense(), NL)
|
||||
npt.assert_equal(nx.laplacian_matrix(self.MG).todense(), NL)
|
||||
npt.assert_equal(nx.laplacian_matrix(self.G, nodelist=[0, 1]).todense(),
|
||||
numpy.array([[1, -1], [-1, 1]]))
|
||||
npt.assert_equal(nx.laplacian_matrix(self.WG).todense(), WL)
|
||||
npt.assert_equal(nx.laplacian_matrix(self.WG, weight=None).todense(), NL)
|
||||
npt.assert_equal(nx.laplacian_matrix(self.WG, weight='other').todense(), OL)
|
||||
|
||||
def test_normalized_laplacian(self):
|
||||
"Generalized Graph Laplacian"
|
||||
GL = numpy.array([[1.00, -0.408, -0.408, -0.577, 0.00],
|
||||
[-0.408, 1.00, -0.50, 0.00, 0.00],
|
||||
[-0.408, -0.50, 1.00, 0.00, 0.00],
|
||||
[-0.577, 0.00, 0.00, 1.00, 0.00],
|
||||
[0.00, 0.00, 0.00, 0.00, 0.00]])
|
||||
Lsl = numpy.array([[0.75, -0.2887, -0.2887, -0.3536, 0.],
|
||||
[-0.2887, 0.6667, -0.3333, 0., 0.],
|
||||
[-0.2887, -0.3333, 0.6667, 0., 0.],
|
||||
[-0.3536, 0., 0., 0.5, 0.],
|
||||
[0., 0., 0., 0., 0.]])
|
||||
|
||||
npt.assert_almost_equal(nx.normalized_laplacian_matrix(self.G).todense(),
|
||||
GL, decimal=3)
|
||||
npt.assert_almost_equal(nx.normalized_laplacian_matrix(self.MG).todense(),
|
||||
GL, decimal=3)
|
||||
npt.assert_almost_equal(nx.normalized_laplacian_matrix(self.WG).todense(),
|
||||
GL, decimal=3)
|
||||
npt.assert_almost_equal(nx.normalized_laplacian_matrix(self.WG, weight='other').todense(),
|
||||
GL, decimal=3)
|
||||
npt.assert_almost_equal(nx.normalized_laplacian_matrix(self.Gsl).todense(),
|
||||
Lsl, decimal=3)
|
||||
|
||||
def test_directed_laplacian(self):
|
||||
"Directed Laplacian"
|
||||
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
|
||||
# "Google's PageRank and Beyond". The graph contains dangling nodes, so
|
||||
# the pagerank random walk is selected by directed_laplacian
|
||||
G = nx.DiGraph()
|
||||
G.add_edges_from(((1, 2), (1, 3), (3, 1), (3, 2), (3, 5), (4, 5), (4, 6),
|
||||
(5, 4), (5, 6), (6, 4)))
|
||||
GL = numpy.array([[0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261],
|
||||
[-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554],
|
||||
[-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251],
|
||||
[-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675],
|
||||
[-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078],
|
||||
[-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]])
|
||||
L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G))
|
||||
npt.assert_almost_equal(L, GL, decimal=3)
|
||||
|
||||
# Make the graph strongly connected, so we can use a random and lazy walk
|
||||
G.add_edges_from((((2, 5), (6, 1))))
|
||||
GL = numpy.array([[1., -0.3062, -0.4714, 0., 0., -0.3227],
|
||||
[-0.3062, 1., -0.1443, 0., -0.3162, 0.],
|
||||
[-0.4714, -0.1443, 1., 0., -0.0913, 0.],
|
||||
[0., 0., 0., 1., -0.5, -0.5],
|
||||
[0., -0.3162, -0.0913, -0.5, 1., -0.25],
|
||||
[-0.3227, 0., 0., -0.5, -0.25, 1.]])
|
||||
L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G), walk_type='random')
|
||||
npt.assert_almost_equal(L, GL, decimal=3)
|
||||
|
||||
GL = numpy.array([[0.5, -0.1531, -0.2357, 0., 0., -0.1614],
|
||||
[-0.1531, 0.5, -0.0722, 0., -0.1581, 0.],
|
||||
[-0.2357, -0.0722, 0.5, 0., -0.0456, 0.],
|
||||
[0., 0., 0., 0.5, -0.25, -0.25],
|
||||
[0., -0.1581, -0.0456, -0.25, 0.5, -0.125],
|
||||
[-0.1614, 0., 0., -0.25, -0.125, 0.5]])
|
||||
L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G), walk_type='lazy')
|
||||
npt.assert_almost_equal(L, GL, decimal=3)
|
||||
|
||||
def test_directed_combinatorial_laplacian(self):
|
||||
"Directed combinatorial Laplacian"
|
||||
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
|
||||
# "Google's PageRank and Beyond". The graph contains dangling nodes, so
|
||||
# the pagerank random walk is selected by directed_laplacian
|
||||
G = nx.DiGraph()
|
||||
G.add_edges_from(((1, 2), (1, 3), (3, 1), (3, 2), (3, 5), (4, 5), (4, 6),
|
||||
(5, 4), (5, 6), (6, 4)))
|
||||
|
||||
GL = numpy.array([[0.0366, -0.0132, -0.0153, -0.0034, -0.0020, -0.0027],
|
||||
[-0.0132, 0.0450, -0.0111, -0.0076, -0.0062, -0.0069],
|
||||
[-0.0153, -0.0111, 0.0408, -0.0035, -0.0083, -0.0027],
|
||||
[-0.0034, -0.0076, -0.0035, 0.3688, -0.1356, -0.2187],
|
||||
[-0.0020, -0.0062, -0.0083, -0.1356, 0.2026, -0.0505],
|
||||
[-0.0027, -0.0069, -0.0027, -0.2187, -0.0505, 0.2815]])
|
||||
|
||||
L = nx.directed_combinatorial_laplacian_matrix(G, alpha=0.9,
|
||||
nodelist=sorted(G))
|
||||
npt.assert_almost_equal(L, GL, decimal=3)
|
||||
|
||||
# Make the graph strongly connected, so we can use a random and lazy walk
|
||||
G.add_edges_from((((2, 5), (6, 1))))
|
||||
|
||||
GL = numpy.array([[0.1395, -0.0349, -0.0465, 0, 0, -0.0581],
|
||||
[-0.0349, 0.0930, -0.0116, 0, -0.0465, 0],
|
||||
[-0.0465, -0.0116, 0.0698, 0, -0.0116, 0],
|
||||
[0, 0, 0, 0.2326, -0.1163, -0.1163],
|
||||
[0, -0.0465, -0.0116, -0.1163, 0.2326, -0.0581],
|
||||
[-0.0581, 0, 0, -0.1163, -0.0581, 0.2326]])
|
||||
|
||||
L = nx.directed_combinatorial_laplacian_matrix(G, alpha=0.9,
|
||||
nodelist=sorted(G),
|
||||
walk_type='random')
|
||||
npt.assert_almost_equal(L, GL, decimal=3)
|
||||
|
||||
GL = numpy.array([[0.0698, -0.0174, -0.0233, 0, 0, -0.0291],
|
||||
[-0.0174, 0.0465, -0.0058, 0, -0.0233, 0],
|
||||
[-0.0233, -0.0058, 0.0349, 0, -0.0058, 0],
|
||||
[0, 0, 0, 0.1163, -0.0581, -0.0581],
|
||||
[0, -0.0233, -0.0058, -0.0581, 0.1163, -0.0291],
|
||||
[-0.0291, 0, 0, -0.0581, -0.0291, 0.1163]])
|
||||
|
||||
L = nx.directed_combinatorial_laplacian_matrix(G, alpha=0.9,
|
||||
nodelist=sorted(G),
|
||||
walk_type='lazy')
|
||||
npt.assert_almost_equal(L, GL, decimal=3)
|
65
extensions/networkx/linalg/tests/test_modularity.py
Normal file
65
extensions/networkx/linalg/tests/test_modularity.py
Normal file
@ -0,0 +1,65 @@
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
npt = pytest.importorskip('numpy.testing')
|
||||
scipy = pytest.importorskip('scipy')
|
||||
|
||||
import networkx as nx
|
||||
from networkx.generators.degree_seq import havel_hakimi_graph
|
||||
|
||||
|
||||
class TestModularity(object):
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
deg = [3, 2, 2, 1, 0]
|
||||
cls.G = havel_hakimi_graph(deg)
|
||||
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
|
||||
# "Google's PageRank and Beyond". (Used for test_directed_laplacian)
|
||||
cls.DG = nx.DiGraph()
|
||||
cls.DG.add_edges_from(((1, 2), (1, 3), (3, 1), (3, 2), (3, 5), (4, 5), (4, 6),
|
||||
(5, 4), (5, 6), (6, 4)))
|
||||
|
||||
def test_modularity(self):
|
||||
"Modularity matrix"
|
||||
B = numpy.matrix([[-1.125, 0.25, 0.25, 0.625, 0.],
|
||||
[0.25, -0.5, 0.5, -0.25, 0.],
|
||||
[0.25, 0.5, -0.5, -0.25, 0.],
|
||||
[0.625, -0.25, -0.25, -0.125, 0.],
|
||||
[0., 0., 0., 0., 0.]])
|
||||
|
||||
permutation = [4, 0, 1, 2, 3]
|
||||
npt.assert_equal(nx.modularity_matrix(self.G), B)
|
||||
npt.assert_equal(nx.modularity_matrix(self.G, nodelist=permutation),
|
||||
B[numpy.ix_(permutation, permutation)])
|
||||
|
||||
def test_modularity_weight(self):
|
||||
"Modularity matrix with weights"
|
||||
B = numpy.matrix([[-1.125, 0.25, 0.25, 0.625, 0.],
|
||||
[0.25, -0.5, 0.5, -0.25, 0.],
|
||||
[0.25, 0.5, -0.5, -0.25, 0.],
|
||||
[0.625, -0.25, -0.25, -0.125, 0.],
|
||||
[0., 0., 0., 0., 0.]])
|
||||
|
||||
G_weighted = self.G.copy()
|
||||
for n1, n2 in G_weighted.edges():
|
||||
G_weighted.edges[n1, n2]["weight"] = 0.5
|
||||
# The following test would fail in networkx 1.1
|
||||
npt.assert_equal(nx.modularity_matrix(G_weighted), B)
|
||||
# The following test that the modularity matrix get rescaled accordingly
|
||||
npt.assert_equal(nx.modularity_matrix(G_weighted, weight="weight"), 0.5 * B)
|
||||
|
||||
def test_directed_modularity(self):
|
||||
"Directed Modularity matrix"
|
||||
B = numpy.matrix([[-0.2, 0.6, 0.8, -0.4, -0.4, -0.4],
|
||||
[0., 0., 0., 0., 0., 0.],
|
||||
[0.7, 0.4, -0.3, -0.6, 0.4, -0.6],
|
||||
[-0.2, -0.4, -0.2, -0.4, 0.6, 0.6],
|
||||
[-0.2, -0.4, -0.2, 0.6, -0.4, 0.6],
|
||||
[-0.1, -0.2, -0.1, 0.8, -0.2, -0.2]])
|
||||
node_permutation = [5, 1, 2, 3, 4, 6]
|
||||
idx_permutation = [4, 0, 1, 2, 3, 5]
|
||||
mm = nx.directed_modularity_matrix(self.DG, nodelist=sorted(self.DG))
|
||||
npt.assert_equal(mm, B)
|
||||
npt.assert_equal(nx.directed_modularity_matrix(self.DG,
|
||||
nodelist=node_permutation),
|
||||
B[numpy.ix_(idx_permutation, idx_permutation)])
|
73
extensions/networkx/linalg/tests/test_spectrum.py
Normal file
73
extensions/networkx/linalg/tests/test_spectrum.py
Normal file
@ -0,0 +1,73 @@
|
||||
import pytest
|
||||
numpy = pytest.importorskip('numpy')
|
||||
npt = pytest.importorskip('numpy.testing')
|
||||
scipy = pytest.importorskip('scipy')
|
||||
|
||||
import networkx as nx
|
||||
from networkx.generators.degree_seq import havel_hakimi_graph
|
||||
|
||||
|
||||
class TestSpectrum(object):
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
deg = [3, 2, 2, 1, 0]
|
||||
cls.G = havel_hakimi_graph(deg)
|
||||
cls.P = nx.path_graph(3)
|
||||
cls.WG = nx.Graph((u, v, {'weight': 0.5, 'other': 0.3})
|
||||
for (u, v) in cls.G.edges())
|
||||
cls.WG.add_node(4)
|
||||
cls.DG = nx.DiGraph()
|
||||
nx.add_path(cls.DG, [0, 1, 2])
|
||||
|
||||
def test_laplacian_spectrum(self):
|
||||
"Laplacian eigenvalues"
|
||||
evals = numpy.array([0, 0, 1, 3, 4])
|
||||
e = sorted(nx.laplacian_spectrum(self.G))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
e = sorted(nx.laplacian_spectrum(self.WG, weight=None))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
e = sorted(nx.laplacian_spectrum(self.WG))
|
||||
npt.assert_almost_equal(e, 0.5 * evals)
|
||||
e = sorted(nx.laplacian_spectrum(self.WG, weight='other'))
|
||||
npt.assert_almost_equal(e, 0.3 * evals)
|
||||
|
||||
def test_normalized_laplacian_spectrum(self):
|
||||
"Normalized Laplacian eigenvalues"
|
||||
evals = numpy.array([0, 0, 0.7712864461218, 1.5, 1.7287135538781])
|
||||
e = sorted(nx.normalized_laplacian_spectrum(self.G))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight=None))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
e = sorted(nx.normalized_laplacian_spectrum(self.WG))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight='other'))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
|
||||
|
||||
def test_adjacency_spectrum(self):
|
||||
"Adjacency eigenvalues"
|
||||
evals = numpy.array([-numpy.sqrt(2), 0, numpy.sqrt(2)])
|
||||
e = sorted(nx.adjacency_spectrum(self.P))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
|
||||
def test_modularity_spectrum(self):
|
||||
"Modularity eigenvalues"
|
||||
evals = numpy.array([-1.5, 0., 0.])
|
||||
e = sorted(nx.modularity_spectrum(self.P))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
# Directed modularity eigenvalues
|
||||
evals = numpy.array([-0.5, 0., 0.])
|
||||
e = sorted(nx.modularity_spectrum(self.DG))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
|
||||
def test_bethe_hessian_spectrum(self):
|
||||
"Bethe Hessian eigenvalues"
|
||||
evals = numpy.array([0.5 * (9 - numpy.sqrt(33)), 4,
|
||||
0.5 * (9 + numpy.sqrt(33))])
|
||||
e = sorted(nx.bethe_hessian_spectrum(self.P, r=2))
|
||||
npt.assert_almost_equal(e, evals)
|
||||
# Collapses back to Laplacian:
|
||||
e1 = sorted(nx.bethe_hessian_spectrum(self.P, r=1))
|
||||
e2 = sorted(nx.laplacian_spectrum(self.P))
|
||||
npt.assert_almost_equal(e1, e2)
|
Reference in New Issue
Block a user