using LinearAlgebra
function classical_gram_schmidt_alt(matrix)
# orthogonalises the columns of the input matrix
num_vectors = size(matrix)[2]
orth_matrix = zeros(size(matrix))
for vec_idx = 1:num_vectors
orth_matrix[:, vec_idx] = matrix[:, vec_idx]
sum = zeros(size(orth_matrix[:, 1]))
for span_base_idx = 1:(vec_idx-1)
# compute sum
sum += dot(orth_matrix[:, span_base_idx], orth_matrix[:, vec_idx])*orth_matrix[:, span_base_idx]
end
orth_matrix[:, vec_idx] -= sum
# normalise vector
orth_matrix[:, vec_idx] = orth_matrix[:, vec_idx]/norm(orth_matrix[:, vec_idx])
end
return orth_matrix
end
In Julia, modified Gram-Schmidt can be implemented as follows.
using LinearAlgebra
function modified_gram_schmidt(matrix)
# orthogonalises the columns of the input matrix
num_vectors = size(matrix)[2]
orth_matrix = copy(matrix)
for vec_idx = 1:num_vectors
orth_matrix[:, vec_idx] = orth_matrix[:, vec_idx]/norm(orth_matrix[:, vec_idx])
for span_base_idx = (vec_idx+1):num_vectors
# perform block step
orth_matrix[:, span_base_idx] -= dot(orth_matrix[:, span_base_idx], orth_matrix[:, vec_idx])*orth_matrix[:, vec_idx]
end
end
return orth_matrix
end
error_cgs = []
error_mgs = []
for n = 1:10
H = [1.0/(i+j-1) for i=1:2^n, j=1:2^n] + 0.00001 * I # regularised Hilbert Matrixy
eye = Matrix{Float64}(I, 2^n, 2^n) # Identity Matrix
H_cgs = classical_gram_schmidt(H)
H_mgs = modified_gram_schmidt(H)
append!(error_cgs, norm(eye - transpose(H_cgs)*H_cgs, Inf))
append!(error_mgs, norm(eye - transpose(H_mgs)*H_mgs, Inf))
end
Please note that the error is expressed in multiples of machine epsilon here.
As we can see, the error is increasing much faster in classical Gram-Schmidt than in modified Gram-Schmidt.
It is interesting to note that the following formulation of Gram-Schmidt algorithm (which is really just a better implementation of classical Gram-Schmidt) is referred to as the modified Gram-Schmidt by Schwarz and Rutishauser in [3].
using LinearAlgebra
function classical_gram_schmidt(matrix)
# orthogonalises the columns of the input matrix
num_vectors = size(matrix)[2]
orth_matrix = copy(matrix)
for vec_idx = 1:num_vectors
for span_base_idx = 1:(vec_idx-1)
# substrack sum
orth_matrix[:, vec_idx] -= dot(orth_matrix[:, span_base_idx], orth_matrix[:, vec_idx])*orth_matrix[:, span_base_idx]
end
# normalise vector
orth_matrix[:, vec_idx] = orth_matrix[:, vec_idx]/norm(orth_matrix[:, vec_idx])
end
return orth_matrix
end
Notes
All the code was evaluated with Julia version 1.5.0 (2020-08-01) using the official Julia Docker image.
References
[1] N. J. Higham, Accuracy and Stability of Numerical Algorithms, Society for Industrial and Applied Mathematics, 2002
[2] G. H. Golub and C. F. Van Loan, Matrix Computations, The John Hopkins University Press, 1996.
[3] W. Gander, Algorithms for the QR-Decomposition, Eidgenössische Technische Hochschule, Research Report No. 80-02, 1980. Retyped 2003
[4] S. J. Leon, A. Björck, and W. Gander, “Gram-Schmidt orthogonalization: 100 years and more,” Numerical Linear Algebra with Applications, vol. 20, no. 3, Art. no. 3, 2012
[5] E. Schmidt, “Zur Theorie der linearen und nichtlinearen Integralgleichungen I. Teil: Entwicklung willkürlicher Funktionen nach Systemen vorgeschriebener,” Mathematische Annalen, vol. 63, pp. 433–476, 1907.
[6] J. P. Gram, “Ueber die Entwickelung reeller Functionen in Reihen mittels der Methode der kleinsten Quadrate,” Journal for die Reine und Angewandte Mathematik, vol. 94, pp. 41–73, 1883.
[7] Y. K. Wong, “An application of orthogonalization process to the theory of least squares,” Annals of Mathematical Statistics, vol. 6, pp. 53–75, 1935.
原文见博客
https://laurenthoeltgen.name/post/gram-schmidt/