5.23
Newton法程序设计
function [x_opt, f_opt, iterations] = newtons_method(f, gradient, hessian, x0, epsilon)
x = x0;
iterations = 0;
while norm(gradient(x)) > epsilon
H = hessian(x);
grad = gradient(x);
p = -inv(H)*grad;
alpha = backtracking_line_search(f, gradient, x, p);
x = x + alpha*p;
iterations = iterations + 1;
end
x_opt = x;
f_opt = f(x);
end
% 定义函数f(x)
f = @(x) (x(1) + 10*x(2))^2 + 5*(x(3) - x(4))^2 + (x(2) - 2*x(3))^4 + 10*(x(1) - x(4))^4;
% 定义梯度向量gradient(x)
gradient = @(x) [2*(x(1) + 10*x(2)) + 40*(x(1) - x(4))^3;
20*(x(1) + 10*x(2)) + 4*(x(2) - 2*x(3))^3;
10*(x(3) - x(4)) - 8*(x(2) - 2*x(3))^3;
-10*(x(3) - x(4)) - 40*(x(1) - x(4))^3];
% 定义Hessian矩阵hessian(x)
hessian = @(x) [2 + 120*(x(1) - x(4))^2, 20, 0, -120*(x(1) - x(4))^2;
20, 200 + 12*(x(2) - 2*x(3))^2, -24*(x(2) - 2*x(3))^2, 0;
0, -24*(x(2) - 2*x(3))^2, 10 + 48*(x(2) - 2*x(3))^2, -10;
-120*(x(1) - x(4))^2, 0, -10, 10 + 120*(x(1) - x(4))^2];
% 设置初始点和终止准则
x0 = [1; 1; 1; 1];
epsilon = 1e-6;
% 调用Newton法求解优化问题
[x_opt, f_opt, iterations] = newtons_method(f, gradient, hessian, x0, epsilon);
disp('最优解:');
disp(x_opt);
disp('最优值:');
disp(f_opt);
disp('迭代次数:');
disp(iterations);
function alpha = backtracking_line_search(f, gradient, x, p, rho, c)
% 参数说明:
% f: 目标函数,输入参数为x
% gradient: 目标函数的梯度函数,输入参数为x
% x: 当前点的位置向量
% p: 搜索方向向量
% rho: 回溯因子,通常取值在0到1之间,如0.5
% c: 充分下降系数,一个正数,通常较小,如0.1
rho=0.5
c=0.1
% 初始化步长alpha为1
alpha = 1;
% Armijo-Goldstein条件检查
while f(x + alpha*p) > f(x) + c*alpha*gradient(x)'*p
% 若不满足充分下降条件,则按rho的比例减小步长
alpha = alpha * rho;
end
end
浙公网安备 33010602011771号