当前位置: 代码迷 >> 综合 >> 重要度采样 important sample及 MATLAB,python实现
  详细解决方案

重要度采样 important sample及 MATLAB,python实现

热度:53   发布时间:2023-12-19 03:51:20.0

1实践

MATLAB:

% Demo Kalman + Sequential Importance Sampling for linear gaussian model
% use prior proposal and locally optimal proposalT=100;
sv=1;
sw=sqrt(1);
phi=0.95;% simulate data according to the model
x=zeros(1,T);
y=zeros(1,T);
x=randn(1);
for k=2:Tx(1,k)=phi*x(1,k-1)+sv.*randn(1);
end
y=x+sw.*randn(1,T);
plot(y)
% load simulatedstatesobs   data corresponding to lecture notes% number of samples/particles
N=1000;% exact inference using Kalman filter
mp=zeros(1,T);
mf=zeros(1,T);
vp=zeros(1,T);
vf=zeros(1,T);
my=zeros(1,T);
vy=zeros(1,T);
loglike=0;mp(1,1)=0;
vp(1,1)=1;
my(1,1)=mp(1,1);
vy(1,1)=vp(1,1)+sw^2;
loglike=-0.5*log(2*pi*vy(1,1))-0.5*(y(1,1)-my(1,1))^2/vy(1,1);for k=1:T% updatevf(1,k)=sw^2*vp(1,k)/(vp(1,k)+sw^2);mf(1,k)=vf(1,k)*(mp(1,k)/vp(1,k)+y(1,k)/sw^2);% predictif (k<T)mp(1,k+1)=phi*mf(1,k);vp(1,k+1)=phi^2*vf(1,k)+sv^2;my(1,k+1)=mp(1,k+1);vy(1,k+1)=vp(1,k+1)+sw^2;loglike=loglike-0.5*log(2*pi*vy(1,k+1))-0.5*(y(1,k+1)-my(1,k+1))^2/vy(1,k+1);end
end% prior proposal
xs1=zeros(T,N);
lw1=zeros(T,N);
w1=zeros(T,N);
wnorm1=zeros(T,N);
ess1=zeros(1,T);
xmean1=zeros(1,T);
xvar1=zeros(1,T);
varlogw1=zeros(1,T);% locally optimal proposal
xs2=zeros(T,N);
lw2=zeros(T,N);
w2=zeros(T,N);
wnorm2=zeros(T,N);
ess2=zeros(1,T);
xmean2=zeros(1,T);
xvar2=zeros(1,T);
varlogw2=zeros(1,T);% SIS using prior
for k=1:Tif (k==1)% sample particles initial distributionxs1(k,:)=randn(1,N);% compute log weights for numerical stabilitylw1(k,:)=-0.5*log(2*pi*sw^2).*ones(1,N)-0.5*(y(1,k)*ones(1,N)-xs1(k,:)).^2./sw^2;else% propagate particles according to prior xs1(k,:)=phi.*xs1(k-1,:)+sv.*randn(1,N);% compute log weights for numerical stabilitylw1(k,:)=lw1(k-1,:)-0.5*log(2*pi*sw^2).*ones(1,N)-0.5*(y(1,k)*ones(1,N)-xs1(k,:)).^2./sw^2;endvarlogw1(1,k)=var(lw1(k,:));lmax=max(lw1(k,:));w1(k,:)=exp(lw1(k,:)-lmax);  % correct only up to a multiplicative factor for unnormalized weightswnorm1(k,:)=w1(k,:)./sum(w1(k,:));% effective sample sizeess1(1,k)=1/sum(wnorm1(k,:).^2);% compute estimate of E(Xk|y1:k)xmean1(1,k)=sum(wnorm1(k,:).*xs1(k,:));% compute estimate of Var(Xk|y1:k)xvar1(1,k)=sum(wnorm1(k,:).*xs1(k,:).^2)-xmean1(1,k)^2;
end% SIS using optimal% variance locally optimal proposal
ss2=sw^2*sv^2/(sv^2+sw^2);
ss=sqrt(ss2);for k=1:Tif (k==1)% sample particles initial proposal; e.g p(x1|y1)xs2(k,:)=ss2*y(1,k)*ones(1,N)/sw^2+ss.*randn(1,N);% compute log weights for numerical stabilitylw2(k,:)=-0.5*log(2*pi*(sw^2+sv^2)).*ones(1,N)-0.5*(y(1,k)*ones(1,N)).^2./(sw^2+sv^2);else% propagate particles according to p(xk|xk-1,yk) xs2(k,:)=ss2.*(phi.*xs2(k-1,:)./sv^2+y(1,k)/sw^2)+ss.*randn(1,N);lw2(k,:)=lw2(k-1,:)-0.5*log(2*pi*(sw^2+sv^2)).*ones(1,N)-0.5*(y(1,k)*ones(1,N)-phi.*xs2(k-1,:)).^2./(sw^2+sv^2);endvarlogw2(1,k)=var(lw2(k,:));lmax=max(lw2(k,:));w2(k,:)=exp(lw2(k,:)-lmax);  % correct only up to a multiplicative factor for unnormalized weightswnorm2(k,:)=w2(k,:)./sum(w2(k,:));% effective sample sizeess2(1,k)=1/sum(wnorm2(k,:).^2);% compute estimate of E(Xk|y1:k)xmean2(1,k)=sum(wnorm2(k,:).*xs2(k,:));% compute estimate of Var(Xk|y1:k)xvar2(1,k)=sum(wnorm2(k,:).*xs2(k,:).^2)-xmean2(1,k)^2;
end% display ESS
figure(1)
plot(ess1,'r')
hold on
plot (ess2,'b')
print essprioroptimalhighsw -depsc
axis([1 T 0 N])
% display variance log weightsfigure(2)
subplot(2,1,1)
plot(varlogw1,'r');
subplot(2,1,2)
plot(varlogw2,'b');
print varlogweightprioroptimalhighsw -depsc% display absolute error exact conditional mean versus SIS
figure(3)
plot(abs(mf-xmean1),'r');
hold on
plot(abs(mf-xmean2),'b');
axis([1 T 0 5])
print errorxmeanprioroptkalman -depsc% display exact variance versus SIS
figure(4)
plot(xvar1,'r');
hold on
plot(xvar2,'b');
plot(vf,'c');
axis([1 T 0 3])
print vmeanprioroptkalman -depschold off

python:

# -*- coding: utf-8 -*-
""" Created on Mon Apr 22 21:19:55 2019@author: win10 """import numpy as np
from scipy import stats
from numpy.random import randn,random
import matplotlib.pyplot as pltdef gaussian_particles(mean,std,N):particles = np.empty((N,1))particles[:, 0] = mean + (randn(N) * std)return particlesdef predict(particles, d, std, dt=1.):N = len(particles)degradation = (d * dt) + (randn(N) * std)return degradationdef update(particles, weights, z, R):weights.fill(1.)weights = weights*stats.norm(z, R).pdf(particles)weights += 1.e-100weights /= sum(weights)def simple_resample(particles, weights):N = len(particles)cumulative_sum = np.cumsum(weights)cumulative_sum[-1] = 1.indexes = np.searchsorted(cumulative_sum, random(N))# resample according to indexesparticles[:] = particles[indexes]weights.fill(1.0 / N)def neff(weights):return 1. / np.sum(np.square(weights))def estimate(particles, weights):'''returns the mean and variance of the weighted particles.'''mean = np.average(particles, weights=weights, axis = 0)var  = np.average((particles - mean)**2, weights=weights, axis = 0)return mean, varif __name__ == '__main__':N = 100 # Number of particlesx_0 = 0.1 #initial statex_N = 0.001  # Noise covariance in the systemx_R = 0.001  # Noise covariance in the measurementT = 10 # Time stepsd = 0.001 # very simple State update model# Initial particles, gaussian distributedparticles = gaussian_particles(x_0,x_N,N)weights = np.zeros(N)true_state = [0.1]for t in range(T):# measurement. We just add some random sensor noisetrue_state.append(true_state[t] + 0.001)z = true_state[t] + (randn(N) * x_R)# predict particlespred_z = predict(particles, d, x_N)# updated Observationz_updated = particles + (randn(N) * x_R)# incorporate measurements and update our belief- posteriorupdate(particles, weights, z=z, R=x_R)# resample if number of effective particles drops below N/2if neff(weights) < N/2:simple_resample(particles, weights)mu, var = estimate(particles, weights)plt.plot(np.arange(len(true_state)), true_state)

参考:
1 Gaussian Process with Particle Filter - Part 2 python 源码;
2 Oxford 课站 含MATLAB code;
3 豆瓣 粒子群滤波总结

  相关解决方案