在本文中,我们将给您介绍关于Pythonnumpy模块-linalg()实例源码的详细内容,并且为您解答pythonnumpy.linalg的相关问题,此外,我们还将为您提供关于np.linalg.s
在本文中,我们将给您介绍关于Python numpy 模块-linalg() 实例源码的详细内容,并且为您解答python numpy.linalg的相关问题,此外,我们还将为您提供关于np.linalg.solve 和 scipy.linalg.cho_solve 之间的性能差距、Python numpy.linalg 模块-cholesky() 实例源码、Python numpy.linalg 模块-cond() 实例源码、Python numpy.linalg 模块-det() 实例源码的知识。
本文目录一览:- Python numpy 模块-linalg() 实例源码(python numpy.linalg)
- np.linalg.solve 和 scipy.linalg.cho_solve 之间的性能差距
- Python numpy.linalg 模块-cholesky() 实例源码
- Python numpy.linalg 模块-cond() 实例源码
- Python numpy.linalg 模块-det() 实例源码
Python numpy 模块-linalg() 实例源码(python numpy.linalg)
Python numpy 模块,linalg() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.linalg()。
- def logdet(C,eps=1e-6,safe=0):
- ''''''
- Logarithm of the determinant of a matrix
- Works only with real-valued positive definite matrices
- ''''''
- try:
- return 2.0*np.sum(np.log(np.diag(chol(C))))
- except numpy.linalg.linalg.LinAlgError:
- if safe: C = check_covmat(C,eps=eps)
- w = np.linalg.eigh(C)[0]
- w = np.real(w)
- w[w<eps]=eps
- det = np.sum(np.log(w))
- return det
- def _get_skew(corners, board):
- """
- Get skew for given checkerboard detection.
- Scaled to [0,1],which 0 = no skew,1 = high skew
- Skew is proportional to the divergence of three outside corners from 90 degrees.
- """
- # Todo Using three nearby interior corners might be more robust,outside corners occasionally
- # get mis-detected
- up_left, up_right, down_right, _ = _get_outside_corners(corners, board)
- def angle(a, b, c):
- """
- Return angle between lines ab,bc
- """
- ab = a - b
- cb = c - b
- return math.acos(numpy.dot(ab,cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb)))
- skew = min(1.0, 2. * abs((math.pi / 2.) - angle(up_left, down_right)))
- return skew
- def get_ground_state(sparse_operator):
- """Compute lowest eigenvalue and eigenstate.
- Returns:
- eigenvalue: The lowest eigenvalue,a float.
- eigenstate: The lowest eigenstate in scipy.sparse csc format.
- """
- if not is_hermitian(sparse_operator):
- raise ValueError(''sparse_operator must be hermitian.'')
- values, vectors = scipy.sparse.linalg.eigsh(
- sparse_operator, 2, which=''SA'', maxiter=1e7)
- eigenstate = scipy.sparse.csc_matrix(vectors[:, 0])
- eigenvalue = values[0]
- return eigenvalue, eigenstate.getH()
- def __init__(self, mesh, transform_out=None, transform_in=None):
- transform_out = numpy.matrix(transform_out) if transform_out is not None else None
- transform_in = numpy.matrix(transform_in) if transform_in is not None else None
- if transform_in is None and transform_out is None:
- transform_in = numpy.identity(3)
- transform_out = numpy.identity(3)
- elif transform_in is None:
- try:
- transform_in = numpy.linalg.inv(transform_out)
- except:
- transform_in = None
- elif transform_out is None:
- try:
- transform_out = numpy.linalg.inv(transform_in)
- except:
- transform_out = None
- self.transform_out, self.transform_in = transform_out, transform_in
- super().__init__(
- mesh,
- warp_in=lambda vertex: self.transform_in.dot(vertex).tolist()[0][:3] if self.transform_in else None,
- warp_out=lambda vertex: self.transform_out.dot(vertex).tolist()[0][:3] if self.transform_out else None,
- )
- def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
- """return likelihood of x for the normal density N(m,sigma**2 * Cinv**-1)"""
- # testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
- # for i in xrange(3): print mean([cma.likelihood(20*r-10,dim * [0],
- # None,3) for r in rand(10000,dim)]) * 20**dim
- if m is None:
- dx = x
- else:
- dx = x - m # array(x) - array(m)
- n = len(x)
- s2pi = (2 * np.pi)**(n / 2.)
- if Cinv is None:
- return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
- if detC is None:
- detC = 1. / np.linalg.linalg.det(Cinv)
- return exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
- def test_pseudoinverse_correctness():
- rng = numpy.random.RandomState(utt.fetch_seed())
- d1 = rng.randint(4) + 2
- d2 = rng.randint(4) + 2
- r = rng.randn(d1, d2).astype(theano.config.floatX)
- x = tensor.matrix()
- xi = pinv(x)
- ri = function([x], xi)(r)
- assert ri.shape[0] == r.shape[1]
- assert ri.shape[1] == r.shape[0]
- assert ri.dtype == r.dtype
- # Note that pseudoinverse can be quite unprecise so I prefer to compare
- # the result with what numpy.linalg returns
- assert _allclose(ri, numpy.linalg.pinv(r))
- def test_numpy_compare(self):
- rng = numpy.random.RandomState(utt.fetch_seed())
- M = tensor.matrix("A", dtype=theano.config.floatX)
- V = tensor.vector("V", dtype=theano.config.floatX)
- a = rng.rand(4, 4).astype(theano.config.floatX)
- b = rng.rand(4).astype(theano.config.floatX)
- A = ( [None, ''fro'', ''inf'', ''-inf'', 1, -1, None, 0, -2],
- [M, M, V, V],
- [a, a, b],
- [None, inf, -inf, -2])
- for i in range(0, 14):
- f = function([A[1][i]], norm(A[1][i], A[0][i]))
- t_n = f(A[2][i])
- n_n = numpy.linalg.norm(A[2][i], A[3][i])
- assert _allclose(n_n, t_n)
- def test_eval(self):
- A = self.A
- Ai = tensorinv(A)
- n_ainv = numpy.linalg.tensorinv(self.a)
- tf_a = function([A], [Ai])
- t_ainv = tf_a(self.a)
- assert _allclose(n_ainv, t_ainv)
- B = self.B
- Bi = tensorinv(B)
- Bi1 = tensorinv(B, ind=1)
- n_binv = numpy.linalg.tensorinv(self.b)
- n_binv1 = numpy.linalg.tensorinv(self.b1, ind=1)
- tf_b = function([B], [Bi])
- tf_b1 = function([B], [Bi1])
- t_binv = tf_b(self.b)
- t_binv1 = tf_b1(self.b1)
- assert _allclose(t_binv, n_binv)
- assert _allclose(t_binv1, n_binv1)
- def test_perform(self):
- if not imported_scipy:
- raise SkipTest(''kron tests need the scipy package to be installed'')
- for shp0 in [(2,), (2, 3), 3, 4), 4, 5)]:
- x = tensor.tensor(dtype=''floatX'',
- broadcastable=(False,) * len(shp0))
- a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
- for shp1 in [(6, (6, 7), 7, 8), 8, 9)]:
- if len(shp0) + len(shp1) == 2:
- continue
- y = tensor.tensor(dtype=''floatX'',
- broadcastable=(False,) * len(shp1))
- f = function([x, y], kron(x, y))
- b = self.rng.rand(*shp1).astype(config.floatX)
- out = f(a, b)
- # Newer versions of scipy want 4 dimensions at least,
- # so we have to add a dimension to a and flatten the result.
- if len(shp0) + len(shp1) == 3:
- scipy_val = scipy.linalg.kron(
- a[numpy.newaxis, :], b).flatten()
- else:
- scipy_val = scipy.linalg.kron(a, b)
- utt.assert_allclose(out, scipy_val)
- def real_eig(M,eps=1e-9):
- ''''''
- This code expects a real hermetian matrix
- and should throw a ValueError if not.
- This is probably redundant to the scipy eigh function.
- Do not use.
- ''''''
- if not (type(M)==np.ndarray):
- raise ValueError("Expected array; type is %s"%type(M))
- if np.any(np.abs(np.imag(M))>eps):
- raise ValueError("Matrix has imaginary values >%0.2e; will not extract real eigenvalues"%eps)
- M = np.real(M)
- w,v = np.linalg.eig(M)
- if np.any(abs(np.imag(w))>eps):
- raise ValueError(''Eigenvalues with imaginary part >%0.2e; matrix has complex eigenvalues''%eps)
- w = np.real(w)
- order = np.argsort(w)
- w = w[order]
- v = v[:,order]
- return w,v
- def _getAplus(A):
- ''''''
- Please see the documentation for nearPDHigham
- ''''''
- eigval, eigvec = np.linalg.eig(A)
- Q = np.matrix(eigvec)
- xdiag = np.matrix(np.diag(np.maximum(eigval, 0)))
- return Q*xdiag*Q.T
- def bench_on(runner, sym, Ns, trials, dtype=None):
- global args, kernel, out, mkl_layer
- prepare = globals().get("prepare_"+sym, prepare_default)
- kernel = globals().get("kernel_"+sym, None)
- if not kernel:
- kernel = getattr(np.linalg, sym)
- out_lvl = runner.__doc__.split(''.'')[0].strip()
- func_s = kernel.__doc__.split(''.'')[0].strip()
- log.debug(''Preparing input data for %s (%s).. '' % (sym, func_s))
- args = [prepare(int(i)) for i in Ns]
- it = range(len(Ns))
- # pprint(Ns)
- out = np.empty(shape=(len(Ns), trials))
- b = body(trials)
- tic, toc = (0, 0)
- log.debug(''Warming up %s (%s).. '' % (sym, func_s))
- runner(range(1000), empty_work)
- kernel(*args[0])
- runner(range(1000), empty_work)
- log.debug(''Benchmarking %s on %s: '' % (func_s, out_lvl))
- gc_old = gc.isenabled()
- # gc.disable()
- tic = time.time()
- runner(it, b)
- toc = time.time() - tic
- if gc_old:
- gc.enable()
- if ''reused_pool'' in globals():
- del globals()[''reused_pool'']
- #calculate average time and min time and also keep track of outliers (max time in the loop)
- min_time = np.amin(out)
- max_time = np.amax(out)
- mean_time = np.mean(out)
- stdev_time = np.std(out)
- #print("Min = %.5f,Max = %.5f,Mean = %.5f,stdev = %.5f " % (min_time,max_time,mean_time,stdev_time))
- #final_times = [min_time,stdev_time]
- print(''## %s: Outter:%s,Inner:%s,Wall seconds:%f\\n'' % (sym, out_lvl, mkl_layer, float(toc)))
- return out
- def get_whitening_matrix(X, fudge=1E-18):
- from numpy.linalg import eigh
- Xcov = numpy.dot(X.T, X)/X.shape[0]
- d,V = eigh(Xcov)
- D = numpy.diag(1./numpy.sqrt(d+fudge))
- W = numpy.dot(numpy.dot(V,D), V.T)
- return W
- def get_precision(self):
- """Compute data precision matrix with the generative model.
- Equals the inverse of the covariance but computed with
- the matrix inversion lemma for efficiency.
- Returns
- -------
- precision : array,shape=(n_features,n_features)
- Estimated precision of data.
- """
- n_features = self.components_.shape[1]
- # handle corner cases first
- if self.n_components_ == 0:
- return np.eye(n_features) / self.noise_variance_
- if self.n_components_ == n_features:
- return linalg.inv(self.get_covariance())
- # Get precision using matrix inversion lemma
- components_ = self.components_
- exp_var = self.explained_variance_
- exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
- precision = np.dot(components_, components_.T) / self.noise_variance_
- precision.flat[::len(precision) + 1] += 1. / exp_var_diff
- precision = np.dot(components_.T,
- np.dot(linalg.inv(precision), components_))
- precision /= -(self.noise_variance_ ** 2)
- precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
- return precision
- def __init__(self, dimension,
- lazy_update_gap=0,
- constant_trace='''',
- randn=np.random.randn,
- eigenmethod=np.linalg.eigh):
- try:
- self.dimension = len(dimension)
- standard_deviations = np.asarray(dimension)
- except TypeError:
- self.dimension = dimension
- standard_deviations = np.ones(dimension)
- assert len(standard_deviations) == self.dimension
- # prevent equal eigenvals,a hack for np.linalg:
- self.C = np.diag(standard_deviations**2
- * np.exp((1e-4 / self.dimension) *
- np.arange(self.dimension)))
- "covariance matrix"
- self.lazy_update_gap = lazy_update_gap
- self.constant_trace = constant_trace
- self.randn = randn
- self.eigenmethod = eigenmethod
- self.B = np.eye(self.dimension)
- "columns,B.T[i] == B[:,i],are eigenvectors of C"
- self.D = np.diag(self.C)**0.5 # we assume that C is yet diagonal
- idx = self.D.argsort()
- self.D = self.D[idx]
- self.B = self.B[:, idx]
- "axis lengths,roots of eigenvalues,sorted"
- self._inverse_root_C = None # see transform_inv...
- self.last_update = 0
- self.count_tell = 0
- self.count_eigen = 0
- def rotation_from_matrix(matrix):
- """Return rotation angle and axis from rotation matrix.
- >>> angle = (random.random() - 0.5) * (2*math.pi)
- >>> direc = numpy.random.random(3) - 0.5
- >>> point = numpy.random.random(3) - 0.5
- >>> R0 = rotation_matrix(angle,direc,point)
- >>> angle,point = rotation_from_matrix(R0)
- >>> R1 = rotation_matrix(angle,point)
- >>> is_same_transform(R0,R1)
- True
- """
- R = numpy.array(matrix, dtype=numpy.float64, copy=False)
- R33 = R[:3, :3]
- # direction: unit eigenvector of R33 corresponding to eigenvalue of 1
- w, W = numpy.linalg.eig(R33.T)
- i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
- if not len(i):
- raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
- direction = numpy.real(W[:, i[-1]]).squeeze()
- # point: unit eigenvector of R33 corresponding to eigenvalue of 1
- w, Q = numpy.linalg.eig(R)
- i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
- if not len(i):
- raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
- point = numpy.real(Q[:, i[-1]]).squeeze()
- point /= point[3]
- # rotation angle depending on direction
- cosa = (numpy.trace(R33) - 1.0) / 2.0
- if abs(direction[2]) > 1e-8:
- sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
- elif abs(direction[1]) > 1e-8:
- sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
- else:
- sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
- angle = math.atan2(sina, cosa)
- return angle, direction, point
- # Function to translate handshape coding to degrees of rotation,adduction,flexion
- def vector_norm(data, axis=None, out=None):
- """Return length,i.e. Euclidean norm,of ndarray along axis.
- >>> v = numpy.random.random(3)
- >>> n = vector_norm(v)
- >>> numpy.allclose(n,numpy.linalg.norm(v))
- True
- >>> v = numpy.random.rand(6,5,3)
- >>> n = vector_norm(v,axis=-1)
- >>> numpy.allclose(n,numpy.sqrt(numpy.sum(v*v,axis=2)))
- True
- >>> n = vector_norm(v,axis=1)
- >>> numpy.allclose(n,axis=1)))
- True
- >>> v = numpy.random.rand(5,4,3)
- >>> n = numpy.empty((5,3))
- >>> vector_norm(v,axis=1,out=n)
- >>> numpy.allclose(n,axis=1)))
- True
- >>> vector_norm([])
- 0.0
- >>> vector_norm([1])
- 1.0
- """
- data = numpy.array(data, copy=True)
- if out is None:
- if data.ndim == 1:
- return math.sqrt(numpy.dot(data, data))
- data *= data
- out = numpy.atleast_1d(numpy.sum(data, axis=axis))
- numpy.sqrt(out, out)
- return out
- else:
- data *= data
- numpy.sum(data, axis=axis, out=out)
- numpy.sqrt(out, out)
- def rotation_from_matrix(matrix):
- """Return rotation angle and axis from rotation matrix.
- >>> angle = (random.random() - 0.5) * (2*math.pi)
- >>> direc = numpy.random.random(3) - 0.5
- >>> point = numpy.random.random(3) - 0.5
- >>> R0 = rotation_matrix(angle,flexion
- def expms(A, eig=np.linalg.eigh):
- """matrix exponential for a symmetric matrix"""
- # Todo: check that this works reliably for low rank matrices
- # first: symmetrize A
- D, B = eig(A)
- return np.dot(B, (np.exp(D) * B).T)
- def likelihood(x,None,dim)]) * 20**dim
- if m is None:
- dx = x
- else:
- dx = x - m # array(x) - array(m)
- n = len(x)
- s2pi = (2 * np.pi)**(n / 2.)
- if Cinv is None:
- return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
- if detC is None:
- detC = 1. / np.linalg.linalg.det(Cinv)
- return exp(-np.dot(dx, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
- def rmsd(X, Y):
- """
- Calculate the root mean squared deviation (RMSD) using Kabsch'' formula.
- @param X: (n,d) input vector
- @type X: numpy array
- @param Y: (n,d) input vector
- @type Y: numpy array
- @return: rmsd value between the input vectors
- @rtype: float
- """
- from numpy import sum, dot, sqrt, clip, average
- from numpy.linalg import svd, det
- X = X - X.mean(0)
- Y = Y - Y.mean(0)
- R_x = sum(X ** 2)
- R_y = sum(Y ** 2)
- V, L, U = svd(dot(Y.T, X))
- if det(dot(V, U)) < 0.:
- L[-1] *= -1
- return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300) / len(X))
- def wrmsd(X, Y, w):
- """
- Calculate the weighted root mean squared deviation (wRMSD) using Kabsch''
- formula.
- @param X: (n,d) input vector
- @type Y: numpy array
- @param w: input weights
- @type w: numpy array
- @return: rmsd value between the input vectors
- @rtype: float
- """
- from numpy import sum, average
- from numpy.linalg import svd
- ## normalize weights
- w = w / w.sum()
- X = X - dot(w, X)
- Y = Y - dot(w, Y)
- R_x = sum(X.T ** 2 * w)
- R_y = sum(Y.T ** 2 * w)
- L = svd(dot(Y.T * w, X))[1]
- return sqrt(clip(R_x + R_y - 2 * sum(L), 1e300))
- def is_mirror_image(X, Y):
- """
- Check if two configurations X and Y are mirror images
- (i.e. their optimal superposition involves a reflection).
- @param X: n x 3 input vector
- @type X: numpy array
- @param Y: n x 3 input vector
- @type Y: numpy array
- @rtype: bool
- """
- from numpy.linalg import det, svd
- ## center configurations
- X = X - numpy.mean(X, 0)
- Y = Y - numpy.mean(Y, 0)
- ## SVD of correlation matrix
- V, U = svd(numpy.dot(numpy.transpose(X), Y)) #@UnusedVariable
- R = numpy.dot(V, U)
- return det(R) < 0
- def expms(A, (np.exp(D) * B).T)
- def likelihood(x, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
- def expms(A, (np.exp(D) * B).T)
- def likelihood(x, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
- def eig(a):
- u,v = np.linalg.eig(a)
- return u.T
- def sparse_eigenspectrum(sparse_operator):
- """Perform a dense diagonalization.
- Returns:
- eigenspectrum: The lowest eigenvalues in a numpy array.
- """
- dense_operator = sparse_operator.todense()
- if is_hermitian(sparse_operator):
- eigenspectrum = numpy.linalg.eigvalsh(dense_operator)
- else:
- eigenspectrum = numpy.linalg.eigvals(dense_operator)
- return numpy.sort(eigenspectrum)
- def get_gap(sparse_operator):
- """Compute gap between lowest eigenvalue and first excited state.
- Returns: A real float giving eigenvalue gap.
- """
- if not is_hermitian(sparse_operator):
- raise ValueError(''sparse_operator must be hermitian.'')
- values, _ = scipy.sparse.linalg.eigsh(
- sparse_operator, maxiter=1e7)
- gap = abs(values[1] - values[0])
- return gap
- def perpedndicular1(v):
- """calculate the perpendicular unit vector"""
- return numpy.array((-v[1], v[0])) / numpy.linalg.norm((-v[1], v[0]))
- def normalize(v):
- return v / numpy.linalg.norm(v)
- def expms(A, (np.exp(D) * B).T)
- def test_qr_modes():
- rng = numpy.random.RandomState(utt.fetch_seed())
- A = tensor.matrix("A", dtype=theano.config.floatX)
- a = rng.rand(4, 4).astype(theano.config.floatX)
- f = function([A], qr(A))
- t_qr = f(a)
- n_qr = numpy.linalg.qr(a)
- assert _allclose(n_qr, t_qr)
- for mode in ["reduced", "r", "raw"]:
- f = function([A], qr(A, mode))
- t_qr = f(a)
- n_qr = numpy.linalg.qr(a, mode)
- if isinstance(n_qr, (list, tuple)):
- assert _allclose(n_qr[0], t_qr[0])
- assert _allclose(n_qr[1], t_qr[1])
- else:
- assert _allclose(n_qr, t_qr)
- try:
- n_qr = numpy.linalg.qr(a, "complete")
- f = function([A], "complete"))
- t_qr = f(a)
- assert _allclose(n_qr, t_qr)
- except TypeError as e:
- assert "name ''complete'' is not defined" in str(e)
- def test_svd():
- rng = numpy.random.RandomState(utt.fetch_seed())
- A = tensor.matrix("A", dtype=theano.config.floatX)
- U, T = svd(A)
- fn = function([A], [U, T])
- a = rng.rand(4, 4).astype(theano.config.floatX)
- n_u, n_v, n_t = numpy.linalg.svd(a)
- t_u, t_v, t_t = fn(a)
- assert _allclose(n_u, t_u)
- assert _allclose(n_v, t_v)
- assert _allclose(n_t, t_t)
- def test_inverse_singular():
- singular = numpy.array([[1, 0]] + [[0, 0]] * 2,
- dtype=theano.config.floatX)
- a = tensor.matrix()
- f = function([a], matrix_inverse(a))
- try:
- f(singular)
- except numpy.linalg.LinAlgError:
- return
- assert False
- def test_wrong_coefficient_matrix(self):
- x = tensor.vector()
- y = tensor.vector()
- z = tensor.scalar()
- b = theano.tensor.nlinalg.lstsq()(x, y, z)
- f = function([x, z], b)
- self.assertRaises(numpy.linalg.linalg.LinAlgError, f, [2, 1], 1)
- def test_wrong_rcond_dimension(self):
- x = tensor.vector()
- y = tensor.vector()
- z = tensor.vector()
- b = theano.tensor.nlinalg.lstsq()(x, b)
- self.assertRaises(numpy.linalg.LinAlgError, 1])
- def test_numpy_compare(self):
- rng = numpy.random.RandomState(utt.fetch_seed())
- A = tensor.matrix("A", dtype=theano.config.floatX)
- Q = matrix_power(A, 3)
- fn = function([A], [Q])
- a = rng.rand(4, 4).astype(theano.config.floatX)
- n_p = numpy.linalg.matrix_power(a, 3)
- t_p = fn(a)
- assert numpy.allclose(n_p, t_p)
- def test_eigvalsh_grad():
- if not imported_scipy:
- raise SkipTest("Scipy needed for the geigvalsh op.")
- import scipy.linalg
- rng = numpy.random.RandomState(utt.fetch_seed())
- a = rng.randn(5, 5)
- a = a + a.T
- b = 10 * numpy.eye(5, 5) + rng.randn(5, 5)
- tensor.verify_grad(lambda a, b: eigvalsh(a, b).dot([1, 5]),
- [a, rng=numpy.random)
- def test_solve_correctness(self):
- if not imported_scipy:
- raise SkipTest("Scipy needed for the Cholesky and Solve ops.")
- rng = numpy.random.RandomState(utt.fetch_seed())
- A = theano.tensor.matrix()
- b = theano.tensor.matrix()
- y = self.op(A, b)
- gen_solve_func = theano.function([A, y)
- cholesky_lower = Cholesky(lower=True)
- L = cholesky_lower(A)
- y_lower = self.op(L, b)
- lower_solve_func = theano.function([L, y_lower)
- cholesky_upper = Cholesky(lower=False)
- U = cholesky_upper(A)
- y_upper = self.op(U, b)
- upper_solve_func = theano.function([U, y_upper)
- b_val = numpy.asarray(rng.rand(5, 1), dtype=config.floatX)
- # 1-test general case
- A_val = numpy.asarray(rng.rand(5, 5), dtype=config.floatX)
- # positive definite matrix:
- A_val = numpy.dot(A_val.transpose(), A_val)
- assert numpy.allclose(scipy.linalg.solve(A_val, b_val),
- gen_solve_func(A_val, b_val))
- # 2-test lower traingular case
- L_val = scipy.linalg.cholesky(A_val, lower=True)
- assert numpy.allclose(scipy.linalg.solve_triangular(L_val, b_val, lower=True),
- lower_solve_func(L_val, b_val))
- # 3-test upper traingular case
- U_val = scipy.linalg.cholesky(A_val, lower=False)
- assert numpy.allclose(scipy.linalg.solve_triangular(U_val, lower=False),
- upper_solve_func(U_val, b_val))
- def test_expm():
- if not imported_scipy:
- raise SkipTest("Scipy needed for the expm op.")
- rng = numpy.random.RandomState(utt.fetch_seed())
- A = rng.randn(5, 5).astype(config.floatX)
- ref = scipy.linalg.expm(A)
- x = tensor.matrix()
- m = expm(x)
- expm_f = function([x], m)
- val = expm_f(A)
- numpy.testing.assert_array_almost_equal(val, ref)
- def rcond(x):
- ''''''
- Reciprocal condition number
- ''''''
- return 1./np.linalg.cond(x)
- def check_covmat_fast(C,N=None,eps=1e-6):
- ''''''
- Verify that matrix M is a size NxN precision or covariance matirx
- ''''''
- if not type(C)==np.ndarray:
- raise ValueError("Covariance matrix should be a 2D numpy array")
- if not len(C.shape)==2:
- raise ValueError("Covariance matrix should be a 2D numpy array")
- if N is None:
- N = C.shape[0]
- if not C.shape==(N,N):
- raise ValueError("Expected size %d x %d matrix"%(N,N))
- if np.any(~np.isreal(C)):
- raise ValueError("Covariance matrices should not contain complex numbers")
- C = np.real(C)
- if np.any(~np.isfinite(C)):
- raise ValueError("Covariance matrix contains NaN or ±inf!")
- if not np.all(np.abs(C-C.T)<eps):
- raise ValueError("Covariance matrix is not symmetric up to precision %0.1e"%eps)
- try:
- ch = chol(C)
- except numpy.linalg.linalg.LinAlgError:
- # Check smallest eigenvalue if cholesky fails
- mine = np.real(scipy.linalg.decomp.eigh(C,eigvals=(0,0))[0][0])
- if np.any(mine<-eps):
- raise ValueError(''Covariance matrix contains eigenvalue %0.3e<%0.3e''%(mine,-eps))
- if (mine<eps):
- C = C + np.eye(N)*(eps-mine)
- C = 0.5*(C+C.T)
- return C
- def rsolve(a,b):
- ''''''
- wraps solve,applies to right hand solution
- solves system x A = B
- ''''''
- return scipy.linalg.solve(b.T,a.T).T
- def expms(A, (np.exp(D) * B).T)
- def likelihood(x, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
- def lwlr(testPoint, xMat, yMat, k=1.0):
- m = np.shape(xMat)[0]
- weights = np.matrix(np.eye(m)) # ??????
- for j in range(m):
- diffMat = testPoint-xMat[j, :]
- weights[j, j] = np.exp(diffMat*diffMat.T/(-2.0*k**2)) # ???
- print weights
- xTx = xMat.T*(weights*xMat)
- if np.linalg.det(xTx) == 0.0:
- print ''This matrix is singular,cannot do inverse''
- return
- ws = xTx.I*(xMat.T*(weights*yMat))
- return testPoint*ws
- def ridgeRegres(xMat, lam=0.2):
- xTx = xMat.T*xMat
- denom = xTx+np.eye(np.shape(xMat)[1])*lam
- if np.linalg.det(denom) == 0.0:
- print ''This matrix is singular,cannot do inverse''
- return
- ws = denom.I*(xMat.T*yMat)
- return ws
- def expms(A, (np.exp(D) * B).T)
np.linalg.solve 和 scipy.linalg.cho_solve 之间的性能差距
如何解决np.linalg.solve 和 scipy.linalg.cho_solve 之间的性能差距
我试图通过求解两个涉及 Cholesky 分解的线性方程组来找到 alpha
。 scipy
有一个特殊的功能来做到这一点。 scipy
和 numpy
之间存在显着的性能差距。我可以通过任何其他方式在 scipy
中获得与 numpy
一样好的性能吗? (假设我不允许使用 scipy)。
import numpy as np
import scipy
def numpy_cho_solve(N,M):
for seed in range(N):
np.random.seed(seed)
x = np.random.rand(M,1)
y = np.random.rand(M,1)
k = x@x.T + np.eye(M)# M*M
L = np.linalg.cholesky(k)
alpha = np.linalg.solve(L.T,np.linalg.solve(L,y))
def scipy_cho_solve(N,1)
k = x@x.T + np.eye(M)# M*M
L = np.linalg.cholesky(k)
alpha = scipy.linalg.cho_solve((L,True),y)
%timeit numpy_cho_solve(100,100)
%timeit scipy_cho_solve(100,100)
输出
317 ms ± 12.3 ms per loop (mean ± std. dev. of 7 runs,1 loop each)
76.9 ms ± 3.12 ms per loop (mean ± std. dev. of 7 runs,10 loops each)
解决方法
考虑到您只能使用 numpy,那么 np.linalg.solve
是求解线性方程的最佳函数,因为它给出了准确的结果。您可以使用 numpy 的 inv
和 transpose
函数,但准确的结果将是 solve
函数。
Cholesky 分解方法的前向和后向替换步骤非常快但不可向量化,因此 numpy
无济于事。你需要一个编译函数(如 scipy
实现) - 但如果你不能使用 scipy
我怀疑你可以使用 numba
(它通常用于制作 c 编译函数numpy
)。
np.linalg.solve
试图通过天真地应用 LU 替换来解决简单的前向替换步骤,因此它比专门构建的函数花费的时间长得多(甚至根本不使用 Cholesky)。
Python numpy.linalg 模块-cholesky() 实例源码
Python numpy.linalg 模块,cholesky() 实例源码
我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用numpy.linalg.cholesky()。
- def _udpate_item_features(self):
- # Gibbs sampling for item features
- for item_id in xrange(self.n_item):
- indices = self.ratings_csc_[:, item_id].indices
- features = self.user_features_[indices, :]
- rating = self.ratings_csc_[:, item_id].data - self.mean_rating_
- rating = np.reshape(rating, (rating.shape[0], 1))
- covar = inv(self.alpha_item +
- self.beta * np.dot(features.T, features))
- lam = cholesky(covar)
- temp = (self.beta * np.dot(features.T, rating) +
- np.dot(self.alpha_item, self.mu_item))
- mean = np.dot(covar, temp)
- temp_feature = mean + np.dot(
- lam, self.rand_state.randn(self.n_feature, 1))
- self.item_features_[item_id, :] = temp_feature.ravel()
- def _update_user_features(self):
- # Gibbs sampling for user features
- for user_id in xrange(self.n_user):
- indices = self.ratings_csr_[user_id, :].indices
- features = self.item_features_[indices, :]
- rating = self.ratings_csr_[user_id, :].data - self.mean_rating_
- rating = np.reshape(rating, 1))
- covar = inv(
- self.alpha_user + self.beta * np.dot(features.T, features))
- lam = cholesky(covar)
- # aplha * sum(V_j * R_ij) + LAMBDA_U * mu_u
- temp = (self.beta * np.dot(features.T, rating) +
- np.dot(self.alpha_user, self.mu_user))
- # mu_i_star
- mean = np.dot(covar, 1))
- self.user_features_[user_id, :] = temp_feature.ravel()
- def solve(A, y, delta, method):
- if method == ''ridge_reg_chol'':
- R = cholesky(dot(A.T, A) + delta*np.identity(A.shape[1]))
- z = lstsq(R.T, dot(A.T, y))[0]
- x = lstsq(R, z)[0]
- elif method == ''ridge_reg_inv'':
- x = dot(dot(inv(dot(A.T, A) + delta*np.identity(A.shape[1])), A.T), y)
- elif method == ''ls_mldivide'':
- if delta > 0:
- print(''ignoring lambda; no regularization used'')
- x = lstsq(A, y)[0]
- loss = 0.5 * (dot(A, x) - y) **2
- return x.reshape(-1, 1)
- def test_lapack_endian(self):
- # For bug #1482
- a = array([[5.7998084, -2.1825367],
- [-2.1825367, 9.85910595]], dtype=''>f8'')
- b = array(a, dtype=''<f8'')
- ap = linalg.cholesky(a)
- bp = linalg.cholesky(b)
- assert_array_equal(ap, bp)
- def test_lapack_endian(self):
- # For bug #1482
- a = array([[5.7998084, bp)
- def rand_k(self, k):
- """
- Return a random mean vector and covariance matrix from the posterior
- NIW distribution for component `k`.
- """
- k_N = self.prior.k_0 + self.counts[k]
- v_N = self.prior.v_0 + self.counts[k]
- m_N = self.m_N_numerators[k]/k_N
- S_N = self.S_N_partials[k] - k_N*np.outer(m_N, m_N)
- sigma = np.linalg.solve(cholesky(S_N).T, np.eye(self.D)) # don''t understand this step
- sigma = wishart.iwishrnd(sigma, v_N, sigma)
- mu = np.random.multivariate_normal(m_N, sigma/k_N)
- return mu, sigma
- def _update_item_params(self):
- N = self.n_item
- X_bar = np.mean(self.item_features_, 0).reshape((self.n_feature, 1))
- # print ''X_bar'',X_bar.shape
- S_bar = np.cov(self.item_features_.T)
- # print ''S_bar'',S_bar.shape
- diff_X_bar = self.mu0_item - X_bar
- # W_{0}_star
- WI_post = inv(inv(self.WI_item) +
- N * S_bar +
- np.dot(diff_X_bar, diff_X_bar.T) *
- (N * self.beta_item) / (self.beta_item + N))
- # Note: WI_post and WI_post.T should be the same.
- # Just make sure it is symmertic here
- WI_post = (WI_post + WI_post.T) / 2.0
- # update alpha_item
- df_post = self.df_item + N
- self.alpha_item = wishart.rvs(df_post, WI_post, 1, self.rand_state)
- # update mu_item
- mu_mean = (self.beta_item * self.mu0_item + N * X_bar) / \\
- (self.beta_item + N)
- mu_var = cholesky(inv(np.dot(self.beta_item + N, self.alpha_item)))
- # print ''lam'',lam.shape
- self.mu_item = mu_mean + np.dot(
- mu_var, 1))
- # print ''mu_item'',self.mu_item.shape
- def _update_user_params(self):
- # same as _update_user_params
- N = self.n_user
- X_bar = np.mean(self.user_features_, 1))
- S_bar = np.cov(self.user_features_.T)
- # mu_{0} - U_bar
- diff_X_bar = self.mu0_user - X_bar
- # W_{0}_star
- WI_post = inv(inv(self.WI_user) +
- N * S_bar +
- np.dot(diff_X_bar, diff_X_bar.T) *
- (N * self.beta_user) / (self.beta_user + N))
- # Note: WI_post and WI_post.T should be the same.
- # Just make sure it is symmertic here
- WI_post = (WI_post + WI_post.T) / 2.0
- # update alpha_user
- df_post = self.df_user + N
- # LAMBDA_{U} ~ W(W{0}_star,df_post)
- self.alpha_user = wishart.rvs(df_post, self.rand_state)
- # update mu_user
- # mu_{0}_star = (beta_{0} * mu_{0} + N * U_bar) / (beta_{0} + N)
- mu_mean = (self.beta_user * self.mu0_user + N * X_bar) / \\
- (self.beta_user + N)
- # decomposed inv(beta_{0}_star * LAMBDA_{U})
- mu_var = cholesky(inv(np.dot(self.beta_user + N, self.alpha_user)))
- # sample multivariate gaussian
- self.mu_user = mu_mean + np.dot(
- mu_var, 1))
- def test_lapack_endian(self):
- # For bug #1482
- a = array([[5.7998084, bp)
- def test_lapack_endian(self):
- # For bug #1482
- a = array([[5.7998084, bp)
- def wishartrand(nu, phi):
- dim = phi.shape[0]
- chol = cholesky(phi)
- foo = np.zeros((dim, dim))
- for i in range(dim):
- for j in range(i + 1):
- if i == j:
- foo[i, j] = np.sqrt(chi2.rvs(nu - (i + 1) + 1))
- else:
- foo[i, j] = np.random.normal(0, 1)
- return np.dot(chol, np.dot(foo, np.dot(foo.T, chol.T)))
- def mv_normalrand(mu, sigma, size):
- lamb = cholesky(sigma)
- return mu + np.dot(lamb, np.random.randn(size))
- def test_lapack_endian(self):
- # For bug #1482
- a = array([[5.7998084, bp)
- def isPD(B):
- """Returns true when input is positive-definite,via Cholesky"""
- try:
- _ = la.cholesky(B)
- return True
- except la.LinAlgError:
- return False
- def test_lapack_endian(self):
- # For bug #1482
- a = array([[5.7998084, bp)
- def apply(self, f, mean, cov, pars):
- mean = mean[:, na]
- # form sigma-points from unit sigma-points
- x = mean + cholesky(cov).dot(self.unit_sp)
- # push sigma-points through non-linearity
- fx = np.apply_along_axis(f, 0, x, pars)
- # output mean
- mean_f = fx.dot(self.wm)
- # output covariance
- dfx = fx - mean_f[:, na]
- cov_f = dfx.dot(self.Wc).dot(dfx.T)
- # input-output covariance
- cov_fx = dfx.dot(self.Wc).dot((x - mean).T)
- return mean_f, cov_f, cov_fx
- def apply(self, pars):
- # method defined in terms of abstract private functions for computing mean,covariance and cross-covariance
- mean = mean[:, na]
- x = mean + cholesky(cov).dot(self.unit_sp)
- fx = self._fcn_eval(f, pars)
- mean_f = self._mean(self.wm, fx)
- cov_f = self._covariance(self.Wc, fx, mean_f)
- cov_fx = self._cross_covariance(self.Wcc, mean_f, mean)
- return mean_f, cov_fx
- def rand_k(self, sigma
- def test_lapack_endian(self):
- # For bug #1482
- a = array([[5.7998084, bp)
Python numpy.linalg 模块-cond() 实例源码
Python numpy.linalg 模块,cond() 实例源码
我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用numpy.linalg.cond()。
- def aryule(c, k):
- """Solve Yule-Walker equation.
- Args:
- c (numpy array): Coefficients (i.e. autocorrelation)
- k (int): Assuming the AR(k) model
- Returns:
- numpy array: k model parameters
- Some formulations solve: C a = -c,
- but we actually solve C a = c.
- """
- a = np.zeros(k)
- # ignore a singular matrix
- C = toeplitz(c[:k])
- if not np.all(C == 0.0) and np.isfinite(ln.cond(C)):
- a = np.dot(ln.inv(C), c[1:])
- return a
- def aryule(c, c[1:])
- return a
- def do(self, a, b):
- c = asarray(a) # a might be a matrix
- s = linalg.svd(c, compute_uv=False)
- old_assert_almost_equal(
- s[..., 0] / s[..., -1], linalg.cond(a), decimal=5)
- def test_stacked_arrays_explicitly(self):
- A = np.array([[1., 2., 1.], [0, -2., 0], [6., 3.]])
- assert_equal(linalg.cond(A), linalg.cond(A[None, ...])[0])
- def do(self, linalg.cond(a, 2), 3.]])
- assert_equal(linalg.cond(A, ...], 2)[0])
- def test(self):
- A = array([[1., 0, 3.]])
- assert_almost_equal(linalg.cond(A, inf), 3.)
- def do(self, decimal=5)
- def test_stacked_arrays_explicitly(self):
- A = np.array([[1., ...])[0])
- def do(self, 2)[0])
- def test(self):
- A = array([[1., 3.)
- def do(self, decimal=5)
- def test_stacked_arrays_explicitly(self):
- A = np.array([[1., ...])[0])
- def do(self, 2)[0])
- def test(self):
- A = array([[1., 3.)
- def do(self, decimal=5)
- def test_stacked_arrays_explicitly(self):
- A = np.array([[1., ...])[0])
- def do(self, 2)[0])
- def test(self):
- A = array([[1., 3.)
- def do(self, decimal=5)
- def test_stacked_arrays_explicitly(self):
- A = np.array([[1., ...])[0])
- def do(self, 2)[0])
- def test(self):
- A = array([[1., 3.)
- def do(self, decimal=5)
- def test_stacked_arrays_explicitly(self):
- A = np.array([[1., ...])[0])
- def do(self, 2)[0])
- def test(self):
- A = array([[1., 3.)
- def do(self, decimal=5)
- def test_stacked_arrays_explicitly(self):
- A = np.array([[1., ...])[0])
- def do(self, 2)[0])
- def test(self):
- A = array([[1., 3.)
Python numpy.linalg 模块-det() 实例源码
Python numpy.linalg 模块,det() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.linalg.det()。
- def __call__(self, params):
- print ''???'', params
- sd1 = params[0]
- sd2 = params[1]
- cor = params[2]
- if sd1 < 0. or sd1 > 10. or sd2 < 0. or sd2 > 10. or cor < -1. or cor > 1.:
- return np.inf
- bandwidth = maths.stats.choleskysqrt2d(sd1, sd2, cor)
- bandwidthdet = la.det(bandwidth)
- bandwidthinv = la.inv(bandwidth)
- diff = sample[self.__iidx] - sample[self.__jidx]
- temp = diff.dot(bandwidthinv.T)
- temp *= temp
- e = np.exp(np.sum(temp, axis=1))
- s = np.sum(e**(-.25) - 4 * e**(-.5))
- cost = self.__n / bandwidthdet + (2. / bandwidthdet) * s
- print ''!!!'', cost
- return cost / 10000.
- def do(self, a, b):
- d = linalg.det(a)
- (s, ld) = linalg.slogdet(a)
- if asarray(a).dtype.type in (single, double):
- ad = asarray(a).astype(double)
- else:
- ad = asarray(a).astype(cdouble)
- ev = linalg.eigvals(ad)
- assert_almost_equal(d, multiply.reduce(ev, axis=-1))
- assert_almost_equal(s * np.exp(ld), axis=-1))
- s = np.atleast_1d(s)
- ld = np.atleast_1d(ld)
- m = (s != 0)
- assert_almost_equal(np.abs(s[m]), 1)
- assert_equal(ld[~m], -inf)
- def test_byteorder_check():
- # Byte order check should pass for native order
- if sys.byteorder == ''little'':
- native = ''<''
- else:
- native = ''>''
- for dtt in (np.float32, np.float64):
- arr = np.eye(4, dtype=dtt)
- n_arr = arr.newbyteorder(native)
- sw_arr = arr.newbyteorder(''S'').byteswap()
- assert_equal(arr.dtype.byteorder, ''='')
- for routine in (linalg.inv, linalg.det, linalg.pinv):
- # normal call
- res = routine(arr)
- # Native but not ''=''
- assert_array_equal(res, routine(n_arr))
- # Swapped
- assert_array_equal(res, routine(sw_arr))
- def do(self, -inf)
- def do(self, -inf)
- def test_byteorder_check():
- # Byte order check should pass for native order
- if sys.byteorder == ''little'':
- native = ''<''
- else:
- native = ''>''
- for dtt in (np.float32, routine(sw_arr))
- def do(self, -inf)
- def test_byteorder_check():
- # Byte order check should pass for native order
- if sys.byteorder == ''little'':
- native = ''<''
- else:
- native = ''>''
- for dtt in (np.float32, routine(sw_arr))
- def do(self, -inf)
- def test_byteorder_check():
- # Byte order check should pass for native order
- if sys.byteorder == ''little'':
- native = ''<''
- else:
- native = ''>''
- for dtt in (np.float32, routine(sw_arr))
- def do(self, -inf)
- def test_byteorder_check():
- # Byte order check should pass for native order
- if sys.byteorder == ''little'':
- native = ''<''
- else:
- native = ''>''
- for dtt in (np.float32, routine(sw_arr))
- def F_value_multivariate(ER, EF, dfnum, dfden):
- """
- Returns an F-statistic given the following:
- ER = error associated with the null hypothesis (the Restricted model)
- EF = error associated with the alternate hypothesis (the Full model)
- dfR = degrees of freedom the Restricted model
- dfF = degrees of freedom associated with the Restricted model
- where ER and EF are matrices from a multivariate F calculation.
- """
- if type(ER) in [IntType, FloatType]:
- ER = N.array([[ER]])
- if type(EF) in [IntType, FloatType]:
- EF = N.array([[EF]])
- n_um = (LA.det(ER) - LA.det(EF)) / float(dfnum)
- d_en = LA.det(EF) / float(dfden)
- return n_um / d_en
- #####################################
- ####### ASUPPORT FUNCTIONS ########
- #####################################
- def _int_var_rbf(self, X, hyp, jitter=1e-8):
- """
- Posterior integral variance of the Gaussian Process quadrature.
- X - vector (1,2*xdim**2+xdim)
- hyp - kernel hyperparameters [s2,el_1,... el_d]
- """
- # reshape X to SP matrix
- X = np.reshape(X, (self.n, self.d))
- # set kernel hyper-parameters
- s2, el = hyp[0], hyp[1:]
- self.kern.param_array[0] = s2 # variance
- self.kern.param_array[1:] = el # lengthscale
- K = self.kern.K(X)
- L = np.diag(el ** 2)
- # posterior variance of the integral
- ks = s2 * np.sqrt(det(L + np.eye(self.d))) * multivariate_normal(mean=np.zeros(self.d), cov=L).pdf(X)
- postvar = -ks.dot(solve(K + jitter * np.eye(self.n), ks.T))
- return postvar
- def _int_var_rbf_hyp(self, jitter=1e-8):
- """
- Posterior integral variance as a function of hyper-parameters
- :param hyp: RBF kernel hyper-parameters [s2,...,el_d]
- :param X: sigma-points
- :param jitter: numerical jitter (for stabilizing computations)
- :return: posterior integral variance
- """
- # reshape X to SP matrix
- X = np.reshape(X, el = 1, hyp # sig_var hyper always set to 1
- self.kern.param_array[0] = s2 # variance
- self.kern.param_array[1:] = el # lengthscale
- K = self.kern.K(X)
- L = np.diag(el ** 2)
- # posterior variance of the integral
- ks = s2 * np.sqrt(det(L + np.eye(self.d))) * multivariate_normal(mean=np.zeros(self.d), cov=L).pdf(X)
- postvar = s2 * np.sqrt(det(2 * inv(L) + np.eye(self.d))) ** -1 - ks.dot(
- solve(K + jitter * np.eye(self.n), ks.T))
- return postvar
- def genCovariace(size):
- MaxIter = 1e+6
- S = np.zeros((size,size))
- itn = 0
- while(alg.det(S) <= 1e-3 and itn < MaxIter):
- itn = itn + 1
- #print int(numpy.log2(size))*size
- G6 = GenRndGnm(PUNGraph, size, int((size*(size-1))*0.05))
- S = np.zeros((size,size))
- for EI in G6.Edges():
- S[EI.GetSrcNId(), EI.GetDstNId()] = 0.6
- S = S + S.T + S.max()*np.matrix(np.eye(size))
- if itn == MaxIter:
- print ''fail to find an invertible sparse inverse covariance matrix''
- S = np.asarray(S)
- return S
- def genCovariace(size):
- MaxIter = 1e+6
- S = np.zeros((size, int((size*(size-1))*0.05))
- #G6 = snap.GenRndGnm(snap.PUNGraph,5,5)
- S = np.zeros((size, EI.GetDstNId()] = 0.6
- S = S + S.T + S.max()*np.matrix(np.eye(size))
- if itn == MaxIter:
- print ''fail to find an invertible sparse inverse covariance matrix''
- S = np.asarray(S)
- return S
- def do(self, -inf)
- def test_byteorder_check():
- # Byte order check should pass for native order
- if sys.byteorder == ''little'':
- native = ''<''
- else:
- native = ''>''
- for dtt in (np.float32, routine(sw_arr))
- def F(self, other):
- """
- Compute the fundamental matrix with respect to other camera
- http://www.robots.ox.ac.uk/~vgg/hzbook/code/vgg_multiview/vgg_F_from_P.m
- The computed fundamental matrix,given by the formula 17.3 (p. 412) in
- Hartley & Zisserman book (2nd ed.).
- Use as:
- F_10 = poses[0].F(poses[1])
- l_1 = F_10 * x_0
- """
- X1 = self.P[[1,2],:]
- X2 = self.P[[2,0],:]
- X3 = self.P[[0,1],:]
- Y1 = other.P[[1,:]
- Y2 = other.P[[2,:]
- Y3 = other.P[[0,:]
- F = np.float64([
- [det(np.vstack([X1, Y1])), det(np.vstack([X2, det(np.vstack([X3, Y1]))],
- [det(np.vstack([X1, Y2])), Y2]))], Y3])), Y3]))]
- ])
- return F # / F[2,2]
- def __init__(self, density, bandwidth):
- self.__density = density
- self.__bandwidth = bandwidth
- self.__bandwidthinv = la.inv(bandwidth)
- self.__bandwidthdet = la.det(bandwidth)
- def test_zero(self):
- assert_equal(linalg.det([[0.0]]), 0.0)
- assert_equal(type(linalg.det([[0.0]])), double)
- assert_equal(linalg.det([[0.0j]]), 0.0)
- assert_equal(type(linalg.det([[0.0j]])), cdouble)
- assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
- assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
- assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
- assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
- assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
- assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- assert_equal(np.linalg.det(x).dtype, dtype)
- ph, s = np.linalg.slogdet(x)
- assert_equal(s.dtype, get_real_dtype(dtype))
- assert_equal(ph.dtype, dtype)
- for dtype in [single, double, csingle, cdouble]:
- yield check, dtype
- def is_left(x0, x1, x2):
- """Returns True if x0 is left of the line between x1 and x2,
- False otherwise. Ref: https://stackoverflow.com/questions/1560492"""
- assert x1.shape == x2.shape == (2,)
- matrix = array([x1-x0, x2-x0])
- if len(x0.shape) == 2:
- matrix = matrix.transpose((1, 2, 0))
- return det(matrix) > 0
- def log_prob(self, x):
- from numpy.linalg import det
- mu = self.mu
- S = self.sigma
- D = len(mu)
- q = self.__q(x)
- return -0.5 * (D * log(2 * pi) + log(abs(det(S)))) - 0.5 * q ** 2
- def rmsd(X, Y):
- """
- Calculate the root mean squared deviation (RMSD) using Kabsch'' formula.
- @param X: (n,d) input vector
- @type X: numpy array
- @param Y: (n,d) input vector
- @type Y: numpy array
- @return: rmsd value between the input vectors
- @rtype: float
- """
- from numpy import sum, dot, sqrt, clip, average
- from numpy.linalg import svd, det
- X = X - X.mean(0)
- Y = Y - Y.mean(0)
- R_x = sum(X ** 2)
- R_y = sum(Y ** 2)
- V, L, U = svd(dot(Y.T, X))
- if det(dot(V, U)) < 0.:
- L[-1] *= -1
- return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300) / len(X))
- def gauss2D(x, mean, cov):
- # x,mean,cov??numpy.array??
- z = -np.dot(np.dot((x-mean).T,inv(cov)),(x-mean))/2.0
- temp = pow(sqrt(2.0*pi),len(x))*sqrt(det(cov))
- return (1.0/temp)*exp(z)
- def emit_prob_updated(self, post_state): # ??????
- for k in range(self.n_state):
- for j in range(self.x_size):
- self.emit_means[k][j] = np.sum(post_state[:,k] *X[:,j]) / np.sum(post_state[:,k])
- X_cov = np.dot((X-self.emit_means[k]).T, (post_state[:,k]*(X-self.emit_means[k]).T).T)
- self.emit_covars[k] = X_cov / np.sum(post_state[:,k])
- if det(self.emit_covars[k]) == 0: # ????????
- self.emit_covars[k] = self.emit_covars[k] + 0.01*np.eye(len(X[0]))
- def test_zero(self):
- assert_equal(linalg.det([[0.0]]), double)
- def test_types(self):
- def check(dtype):
- x = np.array([[1, dtype
- def test_byteorder_check():
- # Byte order check should pass for native order
- if sys.byteorder == ''little'':
- native = ''<''
- else:
- native = ''>''
- for dtt in (np.float32, routine(sw_arr))
- def V(self):
- n1,n2,n3,n4 = self.getNodes()
- V = np.array([[1, n1.x, n1.y, n1.z],
- [1, n2.x, n2.y, n2.z], n3.x, n3.y, n3.z], n4.x, n4.y, n4.z]])
- return la.det(V)/6
- def get_gauss_pdf_value(x, mu, cov):
- p = len(mu)
- xs = x - mu
- covi = inv(cov)
- arg = -0.5 * (xs.T).dot(covi.dot(xs))
- # normalization constant
- C = (((2.0 * np.pi)**p)*det(cov))**(-0.5)
- prob = C * np.exp(arg)
- return prob
- def calc_log_Z(a, b, V_inv):
- # Equation 19.
- return gammaln(a) + log(sqrt(1./det(V_inv))) - a * np.log(b)
- def test_zero(self):
- assert_equal(linalg.det([[0.0]]), double)
- def test_types(self):
- def check(dtype):
- x = np.array([[1, dtype
- def test_zero(self):
- assert_equal(linalg.det([[0.0]]), double)
- def test_types(self):
- def check(dtype):
- x = np.array([[1, dtype
- def dispersion_relation_analytical(omega,beta,tau,Tpar_Tperp,kperp,kpar,gam,eta,nb,theta,k):
- k2=kperp**2+kpar**2
- b=0.5*kperp**2/Tpar_Tperp
- inv_kpar=1./kpar
- inv_kperp=1./kperp
- inv_b=1./b
- summand=get_sums_analytical(kperp,omega,nb)
- M = 1j*eta*k2*inv_kpar + omega - 0.5*inv_kpar*(1j*eta*kperp**2*inv_kpar+omega)*(summand[6]-summand[5]) + 0.5*(Tpar_Tperp-1.)/Tpar_Tperp*inv_kpar*(summand[8]-summand[7]) + 1j*eta*inv_kpar*(summand[2]-summand[3])
- N = 1j/beta*k2*inv_kperp + 1j*omega*inv_kperp*(summand[9]+inv_b*summand[10]-0.5*(summand[6]+3*summand[5])) - 1j*inv_kperp*(Tpar_Tperp-1.)/Tpar_Tperp*(summand[11]+inv_b*summand[12]-0.5*(summand[8]+3*summand[7]))
- O = 0.5j*gam/tau*(-inv_kperp*(summand[2]-summand[3]) + 0.5*kperp*inv_kpar*(summand[6]-summand[5]))
- P = 1j*kpar/beta - 1j*inv_kpar*(1j*eta*kperp**2*inv_kpar+omega)*(summand[9]+inv_b*summand[10]+0.5*(summand[6]-summand[5])) + 1j*inv_kpar*(Tpar_Tperp-1.)/Tpar_Tperp*(summand[11]+inv_b*summand[12]+0.5*(summand[8]-summand[7])) + eta*inv_kpar*(summand[2]+summand[3])
- Q = -(1j*eta*k2+omega*kpar)*inv_kperp + 0.5*omega*inv_kperp*(summand[6]-summand[5]) - 0.5*(Tpar_Tperp-1.)/Tpar_Tperp*inv_kperp*(summand[8]-summand[7])
- R = -0.5*gam/tau*(inv_kperp*(summand[2]+summand[3]) + kperp*inv_kpar*(summand[9] + inv_b*summand[10] + 0.5*(summand[6]-summand[5])))
- S = -(1j*eta*kperp**2*inv_kpar+omega)*1j*inv_kperp*inv_kpar*Tpar_Tperp*(summand[0]+summand[1]) + 1j*inv_kpar*inv_kperp*(Tpar_Tperp-1)*(summand[2]+summand[3]) + 2*eta*kperp*inv_kpar*summand[4]
- T = -Tpar_Tperp*omega*inv_kperp**2*(summand[0]-summand[1]) + inv_kperp**2*(Tpar_Tperp-1)*(summand[2]-summand[3])
- U = -1 - 0.5*gam/tau*(2*summand[4]+Tpar_Tperp*inv_kpar*(summand[0]+summand[1]))
- global mat
- mat=[[M,N,O],[P,Q,R],[S,T,U]]
- if det(mat)>1: print(summand[6],-summand[5],file=outfile)
- return det(mat)
- #wrapper for analytical or numerical dispersion relation
- def test_zero(self):
- assert_equal(linalg.det([[0.0]]), double)
- def test_types(self):
- def check(dtype):
- x = np.array([[1, dtype
- def update (self):
- pass
- # def execute(self,refholder):
- # input_matrix = refholder.matrices[self.inputs[0].links[0].from_socket.matrix_ref]
- # answer_matrix = np.zeros(16)
- # if la.det(input_matrix) == 0:
- # print("Matrix has no inverse")
- # else:
- # answer_matrix = la.inv(input_matrix)
- # self.outputs[0].matrix_ref = refholder.getRefForMatrix(answer_matrix)
- def __init__(self, ctr, am):
- self.n = len(ctr) # dimension
- self.ctr = np.array(ctr) # center coordinates
- self.am = np.array(am) # precision matrix (inverse of covariance)
- # Volume of ellipsoid is the volume of an n-sphere divided
- # by the (determinant of the) Jacobian associated with the
- # transformation,which by deFinition is the precision matrix.
- self.vol = vol_prefactor(self.n) / np.sqrt(linalg.det(self.am))
- # The eigenvalues (l) of `a` are (a^-2,b^-2,...) where
- # (a,b,...) are the lengths of principle axes.
- # The eigenvectors (v) are the normalized principle axes.
- l, v = linalg.eigh(self.am)
- if np.all((l > 0.) & (np.isfinite(l))):
- self.axlens = 1. / np.sqrt(l)
- else:
- raise ValueError("The input precision matrix defining the "
- "ellipsoid {0} is apparently singular with "
- "l={1} and v={2}.".format(self.am, l, v))
- # Scaled eigenvectors are the axes,where `axes[:,i]` is the
- # i-th axis. Multiplying this matrix by a vector will transform a
- # point in the unit n-sphere to a point in the ellipsoid.
- self.axes = np.dot(v, np.diag(self.axlens))
- # Amount by which volume was increased after initialization (i.e.
- # cumulative factor from `scale_to_vol`).
- self.expand = 1.
- def test_zero(self):
- assert_equal(linalg.det([[0.0]]), double)
- def test_types(self):
- def check(dtype):
- x = np.array([[1, dtype
- def correct(self, r):
- """Perform correction (measurement) update."""
- zhat, H = r.mfn(self.x)
- dz = r.z - zhat
- S = H @ self.P @ H.T + r.R
- SI = inv(S)
- K = self.P @ H.T @ SI
- self.x += K @ dz
- self.P -= K @ H @ self.P
- score = dz.T @ SI @ dz / 2.0 + ln(2 * pi * sqrt(det(S)))
- self._calc_bBox()
- return float(score)
- def nll(self, r):
- """Get the nll score of assigning a measurement to the filter."""
- zhat, H = r.mfn(self.x)
- dz = r.z - zhat
- S = H @ self.P @ H.T + r.R
- score = dz.T @ inv(S) @ dz / 2.0 + ln(2 * pi * sqrt(det(S)))
- return float(score)
- def _cov_and_inv(self, n, indices):
- """
- Calculate covariance around local support vector
- and also the inverse
- """
- cov = self._cov(indices, n)
- det = la.det(cov)
- while det <= 0:
- cov += sp.identity(cov.shape[0]) * self.EPS
- det = la.det(cov)
- inv_cov = la.inv(cov)
- return cov, inv_cov, det
- def weights_rbf(self, unit_sp, hypers):
- # BQ weights for RBF kernel with given hypers,computations adopted from the GP-ADF code [Deisenroth] with
- # the following assumptions:
- # (A1) the uncertain input is zero-mean with unit covariance
- # (A2) one set of hyper-parameters is used for all output dimensions (one GP models all outputs)
- d, n = unit_sp.shape
- # GP kernel hyper-parameters
- alpha, el, jitter = hypers[''sig_var''], hypers[''lengthscale''], hypers[''noise_var'']
- assert len(el) == d
- # pre-allocation for convenience
- eye_d, eye_n = np.eye(d), np.eye(n)
- iLam1 = np.atleast_2d(np.diag(el ** -1)) # sqrt(Lambda^-1)
- iLam2 = np.atleast_2d(np.diag(el ** -2))
- inp = unit_sp.T.dot(iLam1) # sigmas / el[:,na] (x - m)^T*sqrt(Lambda^-1) # (numSP,xdim)
- K = np.exp(2 * np.log(alpha) - 0.5 * maha(inp, inp))
- iK = cho_solve(cho_factor(K + jitter * eye_n), eye_n)
- B = iLam2 + eye_d # (D,D)
- c = alpha ** 2 / np.sqrt(det(B))
- t = inp.dot(inv(B)) # inn*(P + Lambda)^-1
- l = np.exp(-0.5 * np.sum(inp * t, 1)) # (N,1)
- zet = 2 * np.log(alpha) - 0.5 * np.sum(inp * inp, 1)
- inp = inp.dot(iLam1)
- R = 2 * iLam2 + eye_d
- t = 1 / np.sqrt(det(R))
- L = np.exp((zet[:, na] + zet[:, na].T) + maha(inp, -inp, V=0.5 * inv(R)))
- q = c * l # evaluations of the kernel mean map (from the viewpoint of RHKS methods)
- # mean weights
- wm_f = q.dot(iK)
- iKQ = iK.dot(t * L)
- # covariance weights
- wc_f = iKQ.dot(iK)
- # cross-covariance "weights"
- wc_fx = np.diag(q).dot(iK)
- # used for self.D.dot(x - mean).dot(wc_fx).dot(fx)
- self.D = inv(eye_d + np.diag(el ** 2)) # S(S+Lam)^-1; for S=I,(I+Lam)^-1
- # model variance; to be added to the covariance
- # this diagonal form assumes independent GP outputs (cov(f^a,f^b) = 0 for all a,b: a neq b)
- self.model_var = np.diag((alpha ** 2 - np.trace(iKQ)) * np.ones((d, 1)))
- return wm_f, wc_f, wc_fx
- def weights_rbf(self, V=0.5 * inv(R)))
- q = c * l # evaluations of the kernel mean map (from the viewpoint of RHKS methods)
- # mean weights
- wm_f = q.dot(iK)
- iKQ = iK.dot(t * L)
- # covariance weights
- wc_f = iKQ.dot(iK)
- # cross-covariance "weights"
- wc_fx = np.diag(q).dot(iK)
- self.iK = iK
- # used for self.D.dot(x - mean).dot(wc_fx).dot(fx)
- self.D = inv(eye_d + np.diag(el ** 2)) # S(S+Lam)^-1; for S=I, wc_fx
我们今天的关于Python numpy 模块-linalg() 实例源码和python numpy.linalg的分享就到这里,谢谢您的阅读,如果想了解更多关于np.linalg.solve 和 scipy.linalg.cho_solve 之间的性能差距、Python numpy.linalg 模块-cholesky() 实例源码、Python numpy.linalg 模块-cond() 实例源码、Python numpy.linalg 模块-det() 实例源码的相关信息,可以在本站进行搜索。
本文标签: