基于贝叶斯理论的稀疏信号重建 MATLAB 实现

贝叶斯稀疏信号重建系统,包含多种贝叶斯先验模型变分推断MCMC采样方法。

1. 主程序框架

%% 基于贝叶斯理论的稀疏信号重建系统
% 功能:使用贝叶斯方法从压缩观测中重建稀疏信号
% 方法:Laplace先验、Student-t先验、高斯尺度混合(GSM)、变分贝叶斯

clear; clc; close all;
rng(42);  % 固定随机种子

%% 1. 生成测试问题
fprintf('生成测试问题...\n');
[Phi, x_true, y, sigma2] = GenerateTestProblem();

% 显示问题规模
[m, n] = size(Phi);
fprintf('观测维度: %d, 信号维度: %d, 压缩比: %.2f\n', m, n, m/n);
fprintf('真实稀疏度: %d (%.1f%%)\n', nnz(x_true), nnz(x_true)/n*100);

%% 2. 传统方法基准(Lasso)
fprintf('\n运行Lasso基准...\n');
x_lasso = lasso(Phi, y, 'Lambda', 0.1*sigma2*sqrt(log(n)/m));
fprintf('Lasso稀疏度: %d\n', nnz(x_lasso));

%% 3. 贝叶斯方法对比
methods = {'MAP-Laplace', 'MAP-StudentT', 'VB-GSM', 'MCMC-GSM'};
results = cell(length(methods), 1);

% 3.1 MAP with Laplace先验(等价于Lasso)
fprintf('\n=== MAP with Laplace先验 ===\n');
tic;
x_map_laplace = Bayesian_MAP_Laplace(Phi, y, sigma2);
time_map_laplace = toc;
results{1} = EvaluateResult(x_map_laplace, x_true, time_map_laplace, 'MAP-Laplace');

% 3.2 MAP with Student-t先验
fprintf('\n=== MAP with Student-t先验 ===\n');
tic;
x_map_studentt = Bayesian_MAP_StudentT(Phi, y, sigma2);
time_map_studentt = toc;
results{2} = EvaluateResult(x_map_studentt, x_true, time_map_studentt, 'MAP-StudentT');

% 3.3 变分贝叶斯 with GSM先验
fprintf('\n=== 变分贝叶斯 with GSM先验 ===\n');
tic;
[x_vb, alpha_vb, sigma2_vb] = Bayesian_VB_GSM(Phi, y);
time_vb = toc;
results{3} = EvaluateResult(x_vb, x_true, time_vb, 'VB-GSM');

% 3.4 MCMC with GSM先验
fprintf('\n=== MCMC with GSM先验 ===\n');
tic;
[x_mcmc, samples] = Bayesian_MCMC_GSM(Phi, y, sigma2, 2000);
time_mcmc = toc;
results{4} = EvaluateResult(x_mcmc, x_true, time_mcmc, 'MCMC-GSM');

%% 4. 结果分析与可视化
fprintf('\n========== 性能对比 ==========\n');
PrintResultsTable(results);

% 可视化重建结果
VisualizeResults(Phi, y, x_true, results, sigma2);

% 绘制后验分布(针对特定系数)
if exist('samples', 'var')
    PlotPosteriorDistribution(samples, x_true);
end

2. 测试问题生成

function [Phi, x_true, y, sigma2] = GenerateTestProblem()
% 生成压缩感知测试问题

% 问题参数
n = 200;          % 信号维度
m = 50;           % 观测维度(压缩比 4:1)
k = 10;           % 稀疏度(非零元素个数)
SNR = 30;         % 信噪比(dB)

% 生成测量矩阵(随机高斯)
Phi = randn(m, n);
Phi = Phi ./ vecnorm(Phi, 2, 1);  % 列归一化

% 生成稀疏信号
x_true = zeros(n, 1);
support = randperm(n, k);
x_true(support) = randn(k, 1);

% 生成观测
noise_var = norm(Phi*x_true)^2 / (m * 10^(SNR/10));
sigma2 = noise_var;
y = Phi * x_true + sqrt(sigma2) * randn(m, 1);

fprintf('  测量矩阵: %d × %d\n', m, n);
fprintf('  稀疏度: %d/%d\n', k, n);
fprintf('  噪声方差: %.4f\n', sigma2);
end

3. 贝叶斯方法实现

3.1 MAP with Laplace先验

function x_est = Bayesian_MAP_Laplace(Phi, y, sigma2)
% MAP估计 with Laplace先验(等价于Lasso)
% 目标:min_x 0.5*||y-Phi*x||^2/sigma2 + lambda*||x||_1

[m, n] = size(Phi);

% 自动选择正则化参数(基于贝叶斯信息准则)
lambda = sigma2 * sqrt(2*log(n)/m);

% 使用Lasso求解
cv_obj = cvlasso(Phi, y, 'Lambda', lambda, 'Standardize', false);
x_est = cv_obj.Beta(:,1);

% 或者使用ADMM求解(更可控)
% x_est = ADMM_Lasso(Phi, y, lambda, sigma2);
end

function x = ADMM_Lasso(Phi, y, lambda, sigma2, max_iter)
% ADMM求解Lasso问题
if nargin < 6, max_iter = 1000; end

[m, n] = size(Phi);
rho = 1.0;  % ADMM惩罚参数

% 初始化
x = zeros(n, 1);
z = zeros(n, 1);
u = zeros(n, 1);

% 预计算
PhiTy = Phi' * y;
PhiTPhi = Phi' * Phi;
I = eye(n);

for iter = 1:max_iter
    % x-update
    x = (PhiTPhi + rho*I) \ (PhiTy + rho*(z - u));
    
    % z-update (soft-thresholding)
    z_old = z;
    z = soft_threshold(x + u, lambda/(rho*sigma2));
    
    % u-update
    u = u + x - z;
    
    % 检查收敛
    if norm(x - z) < 1e-6 && norm(z - z_old) < 1e-6
        break;
    end
end
end

function z = soft_threshold(x, threshold)
z = sign(x) .* max(abs(x) - threshold, 0);
end

3.2 MAP with Student-t先验

function x_est = Bayesian_MAP_StudentT(Phi, y, sigma2)
% MAP估计 with Student-t先验
% 先验:p(x_j) ∝ (1 + x_j^2/(νλ))^(-(ν+1)/2)
% 等价于迭代重加权L1范数

[m, n] = size(Phi);
nu = 4;          % Student-t自由度
lambda = 0.1;    % 缩放参数
max_iter = 100;

% 初始化
x = zeros(n, 1);
weights = ones(n, 1);

for iter = 1:max_iter
    % 更新权重(与|x_j|成反比)
    weights = (nu + 1) ./ (nu*lambda + x.^2 + eps);
    
    % 加权L1正则化问题
    W = diag(sqrt(weights));
    Phi_weighted = Phi * W;
    
    % 求解加权Lasso
    x_weighted = lasso(Phi_weighted, y, ...
        'Lambda', 0.5*sigma2, 'Standardize', false);
    
    % 恢复原始尺度
    x_new = W * x_weighted;
    
    % 检查收敛
    if norm(x_new - x)/norm(x+eps) < 1e-6
        x = x_new;
        break;
    end
    x = x_new;
end

x_est = x;
end

3.3 变分贝叶斯 with GSM先验

function [x_mean, alpha, sigma2_est] = Bayesian_VB_GSM(Phi, y)
% 变分贝叶斯推断 with 高斯尺度混合先验
% 模型:x_j ~ N(0, alpha_j^(-1)), alpha_j ~ Gamma(a0, b0)
%       y|x ~ N(Phi*x, sigma2*I)

[m, n] = size(Phi);
max_iter = 200;

% 超参数(无信息先验)
a0 = 1e-6; b0 = 1e-6;
c0 = 1e-6; d0 = 1e-6;

% 初始化
alpha = ones(n, 1);      % 精度参数
sigma2 = var(y)/10;      % 噪声方差
x_mean = zeros(n, 1);    % 后验均值
Sigma = eye(n);          % 后验协方差

for iter = 1:max_iter
    % === E-step: 更新后验分布 q(x) ===
    % 后验:x|y ~ N(mu, Sigma)
    A = (1/sigma2) * (Phi' * Phi) + diag(alpha);
    Sigma = inv(A + eye(n)*1e-6);  % 数值稳定
    x_mean = (1/sigma2) * Sigma * (Phi' * y);
    
    % === M-step: 更新超参数 ===
    % 更新alpha_j (Gamma分布参数)
    alpha = (a0 + 0.5) ./ (b0 + 0.5*(x_mean.^2 + diag(Sigma)) + eps);
    
    % 更新sigma2 (逆Gamma分布参数)
    residual = y - Phi * x_mean;
    sq_error = residual' * residual + trace(Phi * Sigma * Phi');
    sigma2 = (c0 + 0.5*m) / (d0 + 0.5*sq_error + eps);
    
    % 检查收敛
    if iter > 10 && norm(diff([x_mean; sigma2])) < 1e-6
        break;
    end
end

% 返回估计结果
x_mean = x_mean;
sigma2_est = sigma2;
end

3.4 MCMC with GSM先验

function [x_map, samples] = Bayesian_MCMC_GSM(Phi, y, sigma2, n_samples)
% MCMC采样 with GSM先验(Gibbs采样)
% 使用切片采样或MH采样

[m, n] = size(Phi);
burnin = round(0.5 * n_samples);

% 初始化
x_current = zeros(n, 1);
alpha_current = ones(n, 1);
sigma2_current = sigma2;

% 存储样本
samples = zeros(n, n_samples);

% 超参数
a0 = 1e-6; b0 = 1e-6;

for s = 1:n_samples
    % === 采样 x | y, alpha, sigma2 ===
    % 使用切片采样或MH步骤
    for j = 1:n
        % 计算条件分布
        x_proposal = x_current;
        x_proposal(j) = x_current(j) + 0.1*randn();
        
        % MH接受概率
        log_p_current = LogPosterior(x_current, alpha_current, sigma2_current, Phi, y);
        log_p_proposal = LogPosterior(x_proposal, alpha_current, sigma2_current, Phi, y);
        
        if log(rand) < log_p_proposal - log_p_current
            x_current = x_proposal;
        end
    end
    
    % === 采样 alpha | x ===
    for j = 1:n
        % Gamma分布采样
        shape = a0 + 0.5;
        rate = b0 + 0.5 * x_current(j)^2;
        alpha_current(j) = gamrnd(shape, 1/rate);
    end
    
    % === 采样 sigma2 | x, y ===
    residual = y - Phi * x_current;
    shape = 1e-6 + m/2;
    rate = 1e-6 + 0.5 * (residual' * residual);
    sigma2_current = 1 / gamrnd(shape, 1/rate);
    
    % 存储样本
    samples(:, s) = x_current;
end

% 使用后验均值作为估计
x_map = mean(samples(:, burnin:end), 2);
end

function log_p = LogPosterior(x, alpha, sigma2, Phi, y)
% 计算对数后验概率
log_p = -0.5 * (y - Phi*x)' * (y - Phi*x) / sigma2 ...
        - 0.5 * x' * diag(alpha) * x ...
        - 0.5 * length(x) * log(2*pi) ...
        - 0.5 * sum(log(alpha)) ...
        - 0.5 * m * log(sigma2);
end

4. 结果评估与可视化

function result = EvaluateResult(x_est, x_true, time, method_name)
% 评估结果
result.method = method_name;
result.time = time;
result.nnz = nnz(x_est);
result.support_error = sum((x_est ~= 0) ~= (x_true ~= 0));
result.nmse = norm(x_est - x_true)^2 / norm(x_true)^2;
result.correlation = corr(x_est, x_true, 'Type', 'Pearson');

% 计算成功恢复率(如果支持集完全一致)
result.success = isequal((x_est ~= 0), (x_true ~= 0));

fprintf('%s: NNZ=%d, NMSE=%.2e, Corr=%.4f, Time=%.3fs\n', ...
    method_name, result.nnz, result.nmse, result.correlation, result.time);
end

function PrintResultsTable(results)
% 打印结果对比表
fprintf('\n%-15s %-8s %-10s %-12s %-12s %-10s\n', ...
    'Method', 'NNZ', 'NMSE', 'Correlation', 'Support Err', 'Time(s)');
fprintf('%s\n', repmat('-', 1, 70));

for i = 1:length(results)
    r = results{i};
    fprintf('%-15s %-8d %-10.2e %-12.4f %-12d %-10.3f\n', ...
        r.method, r.nnz, r.nmse, r.correlation, r.support_error, r.time);
end
end

function VisualizeResults(Phi, y, x_true, results, sigma2)
% 可视化重建结果
figure('Position', [100, 100, 1200, 800]);

% 子图1:真实信号与重建信号对比
subplot(2, 3, 1);
hold on;
stem(1:length(x_true), x_true, 'b.', 'MarkerSize', 15, 'LineWidth', 1.5, 'DisplayName', 'True');
for i = 1:length(results)
    stem(1:length(x_true), results{i}.x_est, 'r.', 'MarkerSize', 10, 'LineWidth', 1);
end
xlabel('Index'); ylabel('Amplitude');
title('信号重建对比');
legend('True', 'Reconstructed', 'Location', 'best');
grid on;

% 子图2:残差分析
subplot(2, 3, 2);
residuals = zeros(length(results), 1);
for i = 1:length(results)
    residuals(i) = norm(y - Phi * results{i}.x_est)^2 / length(y);
end
bar(1:length(results), residuals);
set(gca, 'XTick', 1:length(results), 'XTickLabel', {results.method});
ylabel('Residual Norm');
title('重建残差');
grid on;

% 子图3:稀疏度对比
subplot(2, 3, 3);
nnz_counts = [results.nnz];
bar(1:length(nnz_counts), nnz_counts);
set(gca, 'XTick', 1:length(nnz_counts), 'XTickLabel', {results.method});
ylabel('Non-zero Count');
title('稀疏度对比');
grid on;

% 子图4:NMSE对比
subplot(2, 3, 4);
nmse_vals = [results.nmse];
semilogy(1:length(nmse_vals), nmse_vals, 'bo-', 'LineWidth', 2, 'MarkerSize', 8);
set(gca, 'XTick', 1:length(nmse_vals), 'XTickLabel', {results.method});
ylabel('NMSE (log scale)');
title('归一化均方误差');
grid on;

% 子图5:计算时间对比
subplot(2, 3, 5);
time_vals = [results.time];
bar(1:length(time_vals), time_vals);
set(gca, 'XTick', 1:length(time_vals), 'XTickLabel', {results.method});
ylabel('Time (seconds)');
title('计算时间对比');
grid on;

% 子图6:相关性对比
subplot(2, 3, 6);
corr_vals = [results.correlation];
plot(1:length(corr_vals), corr_vals, 'ro-', 'LineWidth', 2, 'MarkerSize', 8);
set(gca, 'XTick', 1:length(corr_vals), 'XTickLabel', {results.method});
ylabel('Correlation Coefficient');
title('与真实信号的相关性');
ylim([0, 1]);
grid on;

sgtitle('贝叶斯稀疏信号重建性能对比', 'FontSize', 16, 'FontWeight', 'bold');
end

function PlotPosteriorDistribution(samples, x_true)
% 绘制后验分布
figure('Position', [200, 200, 800, 400]);

% 选择几个代表性系数
indices = [1, 10, 50, 100];  % 非零和零系数的索引

for i = 1:length(indices)
    idx = indices(i);
    subplot(2, 2, i);
    
    % 绘制后验分布直方图
    histogram(samples(idx, :), 30, 'Normalization', 'probability', ...
              'FaceColor', [0.7, 0.7, 0.9], 'EdgeColor', 'none');
    hold on;
    
    % 标记真实值
    line([x_true(idx), x_true(idx)], [0, 1], 'Color', 'r', 'LineWidth', 2, ...
          'LineStyle', '--');
    
    xlabel('Coefficient Value');
    ylabel('Probability Density');
    title(sprintf('Coefficient %d (True = %.2f)', idx, x_true(idx)));
    grid on;
end

sgtitle('后验分布可视化', 'FontSize', 14, 'FontWeight', 'bold');
end

5. 高级功能扩展

%% 6. 自适应贝叶斯压缩感知(在线学习)
function [x_seq, alpha_seq] = OnlineBayesianCS(Phi_seq, y_seq, sigma2)
% 在线贝叶斯压缩感知
% 适用于时变稀疏信号

n = size(Phi_seq, 2);
T = length(y_seq);  % 时间步数

x_seq = zeros(n, T);
alpha_seq = zeros(n, T);

% 初始化
alpha = 1e-6 * ones(n, 1);

for t = 1:T
    % 获取当前观测
    Phi_t = Phi_seq{t};
    y_t = y_seq{t};
    
    % 更新后验分布
    [x_t, alpha] = UpdatePosterior(Phi_t, y_t, alpha, sigma2);
    
    % 存储结果
    x_seq(:, t) = x_t;
    alpha_seq(:, t) = alpha;
end
end

function [x_new, alpha_new] = UpdatePosterior(Phi, y, alpha_old, sigma2)
% 更新后验分布
[m, n] = size(Phi);

% 变分更新
A = (1/sigma2) * (Phi' * Phi) + diag(alpha_old);
Sigma = inv(A + eye(n)*1e-6);
x_new = (1/sigma2) * Sigma * (Phi' * y);

% 更新alpha
alpha_new = (1 + 0.5) ./ (1 + 0.5*(x_new.^2 + diag(Sigma)) + eps);
end

%% 7. 多测量向量(MMV)贝叶斯CS
function X_est = Bayesian_MMV_CS(Phi, Y, sigma2)
% 多测量向量贝叶斯压缩感知
% Y = Phi * X + Noise, X每行稀疏

[n, T] = size(Y);
[m, n] = size(Phi);

% 初始化
alpha = ones(n, 1);
X_est = zeros(n, T);

for iter = 1:100
    % 更新X
    for t = 1:T
        A = (1/sigma2) * (Phi' * Phi) + diag(alpha);
        Sigma = inv(A + eye(n)*1e-6);
        X_est(:, t) = (1/sigma2) * Sigma * (Phi' * Y(:, t));
    end
    
    % 更新alpha(共享稀疏模式)
    row_norms = sum(X_est.^2, 2);
    alpha = (1 + T/2) ./ (1 + 0.5*row_norms + eps);
end
end

%% 8. 结构化稀疏贝叶斯CS
function x_est = Bayesian_StructuredSparse(Phi, y, group_indices)
% 结构化稀疏贝叶斯压缩感知
% 组内稀疏,组间共享结构

groups = unique(group_indices);
n_groups = length(groups);
alpha = ones(n_groups, 1);

% 迭代更新
for iter = 1:100
    % 构建分组精度矩阵
    Alpha_mat = zeros(length(group_indices), 1);
    for g = 1:n_groups
        idx = find(group_indices == groups(g));
        Alpha_mat(idx) = alpha(g);
    end
    
    % 更新x
    A = (Phi' * Phi) + diag(Alpha_mat);
    x_est = A \ (Phi' * y);
    
    % 更新alpha(组级别)
    for g = 1:n_groups
        idx = find(group_indices == groups(g));
        alpha(g) = length(idx) / (x_est(idx)' * x_est(idx) + eps);
    end
end
end

参考代码 基于贝叶斯理论的稀疏信号重建 www.youwenfan.com/contentcnt/135796.html

6. 使用说明

运行步骤:

  1. 将所有代码保存为.m文件
  2. 在MATLAB中运行主程序
  3. 观察不同贝叶斯方法的性能对比

参数调优建议:

  1. Student-t先验:调整自由度nu(通常2-10)
  2. GSM先验:调整超参数a0, b0(小值表示无信息)
  3. 迭代次数:根据收敛情况调整

扩展方向:

  1. 实时处理:实现在线贝叶斯CS
  2. 多维信号:扩展到图像/视频重建
  3. 非高斯噪声:使用鲁棒先验(如混合高斯)
  4. GPU加速:使用MATLAB GPU计算加速MCMC
posted @ 2026-04-23 17:35  yes_go  阅读(8)  评论(0)    收藏  举报