数值分析之非线性方程
import math
# 定义函数 f(x)
def f(x):
return x ** 3 + 4 * x ** 2 - 10
# 定义函数 f 的导数
def f_prime(x):
return 3 * x ** 2 + 8 * x
# 二分法
def bisection_method(a, b, tol=1e-8):
print(f"二分法开始,初始区间 [{a}, {b}]")
while (b - a) / 2 > tol: # 当区间长度的一半大于容差时继续迭代
c = (a + b) / 2 # 计算中点
print(f"当前区间 [{a}, {b}],中点 c = {c}")
if f(c) == 0: # 如果中点是根
print(f"f(c) = 0, 找到根 c = {c}")
return c
elif f(a) * f(c) < 0: # 如果根在左半区间
b = c # 更新右边界
print(f"f(a) * f(c) < 0, 更新 b = {b}")
else: # 如果根在右半区间
a = c # 更新左边界
print(f"f(a) * f(c) >= 0, 更新 a = {a}")
root = (a + b) / 2 # 计算最终的根近似值
print(f"达到容差,根的近似值为 {root}")
return root
# 牛顿法
def newton_method(x0, tol=1e-8, max_iter=100):
print(f"牛顿法开始,初始值 x0 = {x0}")
x = x0
for i in range(max_iter): # 最大迭代次数
x_new = x - f(x) / f_prime(x) # 计算新的近似值
error = abs(x_new - x) # 计算误差
print(f"迭代 {i + 1}: x = {x}, f(x) = {f(x)}, f'(x) = {f_prime(x)}, x_new = {x_new}, 牛顿法误差error=|x_new - x|=|{x_new}-{x}|= {error}")
if error < tol: # 如果误差小于容差
print(f"达到容差,根的近似值为 {x_new}")
return x_new
x = x_new # 更新当前值
print(f"达到最大迭代次数,根的近似值为 {x_new}")
return x_new
# 割线法
def secant_method(x0, x1, tol=1e-8, max_iter=100):
print(f"割线法开始,初始值 x0 = {x0}, x1 = {x1}")
for i in range(max_iter): # 最大迭代次数
x2 = x1 - f(x1) * (x1 - x0) / (f(x1) - f(x0)) # 计算新的近似值
error = abs(x2 - x1) # 计算误差
print(f"迭代 {i + 1}: x0 = {x0}, x1 = {x1}, f(x0) = {f(x0)}, f(x1) = {f(x1)}, x2 = {x2}, 割线法误差error=|x2 - x1|=|{x2} - {x1})| = {error}")
if error < tol: # 如果误差小于容差
print(f"达到容差,根的近似值为 {x2}")
return x2
x0 = x1 # 更新前一个值
x1 = x2 # 更新当前值
print(f"达到最大迭代次数,根的近似值为 {x2}")
return x2
# 抛物线法(Muller方法)
def muller_method(x0, x1, x2, tol=1e-8, max_iter=100):
print(f"抛物线法开始,初始值 x0 = {x0}, x1 = {x1}, x2 = {x2}")
for i in range(max_iter): # 最大迭代次数
h1 = x1 - x0 # 计算 h1
h2 = x2 - x1 # 计算 h2
d1 = (f(x1) - f(x0)) / h1 # 计算 d1
d2 = (f(x2) - f(x1)) / h2 # 计算 d2
a = (d2 - d1) / (h2 + h1) # 计算 a
b = a * h2 + d2 # 计算 b
c = f(x2) # 计算 c
discriminant = b ** 2 - 4 * a * c # 计算判别式
if discriminant < 0: # 如果判别式为负数
raise ValueError("Discriminant is negative, no real roots.")
sqrt_discriminant = math.sqrt(discriminant) # 计算判别式的平方根
if abs(b + sqrt_discriminant) > abs(b - sqrt_discriminant): # 选择较大的根
x_new = x2 - (2 * c) / (b + sqrt_discriminant)
else:
x_new = x2 - (2 * c) / (b - sqrt_discriminant)
error = abs(x_new - x2) # 计算误差
print(
f"迭代 {i + 1}: x0 = {x0}, x1 = {x1}, x2 = {x2}, f(x0) = {f(x0)}, f(x1) = {f(x1)}, f(x2) = {f(x2)}, x_new = {x_new}, 抛物线法误差error=|x_new - x2|=|{x_new} - {x2}| = {error}")
if error < tol: # 如果误差小于容差
print(f"达到容差,根的近似值为 {x_new}")
return x_new
x0 = x1 # 更新前一个值
x1 = x2 # 更新当前值
x2 = x_new # 更新新的近似值
print(f"达到最大迭代次数,根的近似值为 {x2}")
return x2
# 迭代法(示例:简单迭代法)
def iteration_method(x0, tol=1e-8, max_iter=100):
print(f"迭代法开始,初始值 x0 = {x0}")
def g(x):
return math.sqrt((10 - x ** 3) / 4) # 定义迭代函数 g(x)
x = x0
for i in range(max_iter): # 最大迭代次数
x_new = g(x) # 计算新的近似值
error = abs(x_new - x) # 计算误差
print(f"迭代 {i + 1}: x = {x}, g(x) = {g(x)}, x_new = {x_new}, 迭代法误差error=|x_new - x|=|{x_new} - {x}| = {error}")
if error < tol: # 如果误差小于容差
print(f"达到容差,根的近似值为 {x_new}")
return x_new
x = x_new # 更新当前值
print(f"达到最大迭代次数,根的近似值为 {x_new}")
return x_new
# Aitken 加速法
def aitken_acceleration(x0, x1, tol=1e-8, max_iter=100):
print(f"Aitken 加速法开始,初始值 x0 = {x0}, x1 = {x1}")
for i in range(max_iter): # 最大迭代次数
x2 = x1 - ((x1 - x0) ** 2 / (f(x1) - 2 * f(x0) + f(x1))) # 计算新的近似值
error = abs(x2 - x1) # 计算误差
print(f"迭代 {i + 1}: x0 = {x0}, x1 = {x1}, f(x0) = {f(x0)}, f(x1) = {f(x1)}, x2 = {x2}, Aitken加速法误差error=|x2 - x1|=|{x2} - {x1}| = {error}")
if error < tol: # 如果误差小于容差
print(f"达到容差,根的近似值为 {x2}")
return x2
x0 = x1 # 更新前一个值
x1 = x2 # 更新当前值
print(f"达到最大迭代次数,根的近似值为 {x2}")
return x2
# Steffensen 加速法
def steffensen_acceleration(x0, tol=1e-8, max_iter=100):
print(f"Steffensen 加速法开始,初始值 x0 = {x0}")
for i in range(max_iter): # 最大迭代次数
y = f(x0) # 计算 y = f(x0)
z = f(y) # 计算 z = f(y)
x1 = x0 - ((y - x0) ** 2 / (z - 2 * y + x0)) # 计算新的近似值
error = abs(x1 - x0) # 计算误差
print(f"迭代 {i + 1}: x0 = {x0}, f(x0) = {f(x0)}, y = {y}, f(y) = {f(y)}, x1 = {x1}, Steffensen加速法误差error=|x1 - x0|=|{x1} - {x0}| = {error}")
if error < tol: # 如果误差小于容差
print(f"达到容差,根的近似值为 {x1}")
return x1
x0 = x1 # 更新当前值
print(f"达到最大迭代次数,根的近似值为 {x1}")
return x1
# 外推法(示例:理查森外推法)
def richardson_extrapolation(x0, h, tol=1e-8, max_iter=100):
print(f"外推法开始,初始值 x0 = {x0}, h = {h}")
def T(n, h):
return (f(x0 + n * h) - f(x0 - n * h)) / (2 * n * h) # 定义 T(n, h)
for i in range(max_iter): # 最大迭代次数
T1 = T(1, h) # 计算 T1
T2 = T(2, h) # 计算 T2
x1 = x0 - (T2 - T1) / (T2 - 2 * T1) # 计算新的近似值
error = abs(x1 - x0) # 计算误差
print(f"迭代 {i + 1}: x0 = {x0}, h = {h}, T1 = {T1}, T2 = {T2}, x1 = {x1}, 理查森外推法误差error=|x1 - x0| =|{x1} - {x0}|= {error}")
if error < tol: # 如果误差小于容差
print(f"达到容差,根的近似值为 {x1}")
return x1
x0 = x1 # 更新当前值
print(f"达到最大迭代次数,根的近似值为 {x1}")
return x1
# 测试
print("二分法的根:", bisection_method(1, 2))
print("牛顿法的根:", newton_method(1.5))
print("割线法的根:", secant_method(1, 2))
print("抛物线法的根(Muller方法):", muller_method(1, 1.5, 2))
print("迭代法的根:", iteration_method(1.5))
print("Aitken 加速法的根:", aitken_acceleration(1, 2))
print("Steffensen 加速法的根:", steffensen_acceleration(1.5))
print("外推法的根(理查森外推法):", richardson_extrapolation(1.5, 0.1))
posted on 2025-01-17 10:58 Indian_Mysore 阅读(7) 评论(0) 收藏 举报
浙公网安备 33010602011771号