import numpy as np
import matplotlib.pyplot as plt
def cg(A, b, init_x, max_num_iterations, use_altered_variant):
"""
:param A: The NxN, positive-definite matrix of the linear system Ax = b
:param b: The right-hand-side vector of the linear system Ax = b
:param init_x: The initial
:param max_num_iterations: The PCG iterates either until convergence or until it reaches max_num_iterations
:param use_altered_variant: Choose whether to use "r = b - Ax" (when use_altered_variant=False) or "r = b + Ax" (when use_altered_variant=True)
:return: An array of intermediate iteration steps `x`.
"""
eps = 1e-6
x = init_x.copy()
solution_steps = [x.copy()]
r = None
if use_altered_variant:
r = b + A @ x
else:
r = b - A @ x
if np.linalg.norm(r) < eps:
return np.array(solution_steps)
p = r.copy()
for iter_idx in range(max_num_iterations):
alpha = r.dot(r) / p.dot(A @ p)
x = x + alpha * p
solution_steps.append(x.copy())
old_r_dot_r = r.dot(r)
r = r - alpha * A @ p
if np.linalg.norm(r) < eps:
return np.array(solution_steps)
beta = r.dot(r) / old_r_dot_r
p = r + beta * p
return np.array(solution_steps)
if __name__ == '__main__':
num_residuals = 50
system_size = 2
num_pcg_iters = 10
# Initialize the Jacobian matrix and the residual vector of a least squares problem to random values
J = np.random.rand(num_residuals, system_size) - np.random.rand(2)
r = np.random.rand(num_residuals) - 0.5
# Construct the normal equations and choose a random initial guess for the solution
A = J.T @ J
b = -J.T @ r
x_init = np.random.rand(system_size)
# Run the algorithms
x_og_steps = cg(A, b, x_init, num_pcg_iters, use_altered_variant=False) # Solution of the original linear system using the original CG
x_al_steps = cg(A, b, x_init, num_pcg_iters, use_altered_variant=True) # Solution of the original linear system using the altered CG
x_su_steps = cg(A, -b, x_init, num_pcg_iters, use_altered_variant=False) # Solution of the altered linear system using the original CG
# Compare the solutions
print('x_og = ', x_og_steps[-1], ', num steps: ', x_og_steps.shape[0] - 1)
print('x_al = ', x_al_steps[-1], ', num steps: ', x_al_steps.shape[0] - 1)
print('x_su = ', x_su_steps[-1], ', num steps: ', x_su_steps.shape[0] - 1)
# Plot the steps of each algorithm
plt.plot(x_og_steps[:, 0], x_og_steps[:, 1], '--bo', color='green', label='x_og')
plt.plot(x_al_steps[:, 0], x_al_steps[:, 1], '--bo', color='red', label='x_al')
plt.plot(x_su_steps[:, 0], x_su_steps[:, 1], '--bo', color='blue', label='x_su')
plt.plot(x_init[0], x_init[1], '*', color='black', label='x_init', markersize=15)
plt.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), fancybox=True, shadow=True, ncol=5)
plt.axis('square')
plt.plot()
plt.show()
Click Run or press shift + ENTER to run code