每日 31

求解无约束优化问题:f(x) = $$(x_1 + 10x_2)^2 + 5(x_3 - x_4)^2 + (x_2 - 2x_3)^4 + 10(x_1 - x_4)^4$$
(2)终止准则取 $$|\nabla f(\mathbf{x}^{k})| \leq 10^{-6}$$ ;;
(3)完成FR共轭梯度法的MATLAB编程、调试;
(4)选取几个与实验二实验三中相同的初始点,已知实验二点 np.array([0, 0, 0, 0]), # 初始点1
np.array([1, -1, 2, -2]), # 初始点2
np.array([-5, 3, 1, 4]) # 初始点3
并给出相关实验结果的对比及分析(从最优解、最优值、收敛速度(迭代次数)等方面进行比较);

python:
import numpy as np

def target_function(x):
"""目标函数"""
x1, x2, x3, x4 = x
term1 = (x1 + 10x2)**2
term2 = 5
(x3 - x4)2
term3 = (x2 - 2*x3)
4
term4 = 10*(x1 - x4)**4
return term1 + term2 + term3 + term4

def gradient_function(x):
"""梯度函数"""
x1, x2, x3, x4 = x
grad1 = 2(x1 + 10x2) + 40(x1 - x4)**3 # ∂f/∂x1
grad2 = 20
(x1 + 10x2) + 4(x2 - 2x3)**3 # ∂f/∂x2
grad3 = -10
(x3 - x4) - 8(x2 - 2x3)3 # ∂f/∂x3
grad4 = 10(x3 - x4) - 40(x1 - x4)
3 # ∂f/∂x4
return np.array([grad1, grad2, grad3, grad4])

def backtracking_line_search(f, grad_f, x, d, c=1e-4, rho=0.5, alpha0=1.0):
"""回溯线搜索(Armijo条件)"""
alpha = alpha0
while f(x + alpha*d) > f(x) + c * alpha * np.dot(grad_f(x), d):
alpha *= rho
return alpha

def fr_conjugate_gradient(f, grad_f, x0, epsilon=1e-6, max_iter=1000):
"""FR共轭梯度法"""
x = x0.copy()
g_prev = grad_f(x)
d = -g_prev # 初始搜索方向为负梯度
iter_count = 0
history = [(x.copy(), f(x))]

while iter_count < max_iter:
    norm_g = np.linalg.norm(g_prev)
    if norm_g < epsilon:
        break
    
    # 回溯线搜索获取步长
    alpha = backtracking_line_search(f, grad_f, x, d)
    x_new = x + alpha * d
    g_new = grad_f(x_new)
    
    # FR公式计算beta
    beta = np.dot(g_new, g_new) / (np.dot(g_prev, g_prev) + 1e-8)  # 避免分母为0
    d = -g_new + beta * d  # 更新搜索方向
    
    x = x_new
    g_prev = g_new
    history.append((x.copy(), f(x)))
    iter_count += 1

return x, f(x), iter_count, history

实验二初始点

initial_points = [
np.array([0.0, 0.0, 0.0, 0.0]), # 初始点1
np.array([1.0, -1.0, 2.0, -2.0]), # 初始点2
np.array([-5.0, 3.0, 1.0, 4.0]) # 初始点3
]

if name == "main":
epsilon = 1e-6
max_iter = 1000

for i, x0 in enumerate(initial_points, 1):
    print(f"===== 初始点{i} =====")
    x_opt, f_opt, iter_num, _ = fr_conjugate_gradient(
        f=target_function,
        grad_f=gradient_function,
        x0=x0,
        epsilon=epsilon,
        max_iter=max_iter
    )
    print(f"最优解: {np.round(x_opt, 6)}")
    print(f"最优值: {f_opt:.6e}")
    print(f"迭代次数: {iter_num}")
    print("\n")
posted @ 2025-05-27 20:34  一如初见233  阅读(6)  评论(0)    收藏  举报