Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/PRML/PRMLT
Browse files Browse the repository at this point in the history
  • Loading branch information
sth4nth committed Feb 9, 2017
2 parents 7067c94 + a793365 commit 46db0f9
Show file tree
Hide file tree
Showing 8 changed files with 41 additions and 15 deletions.
5 changes: 2 additions & 3 deletions TODO.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
TODO:
ch08: BP
ch10: EP
ch13: LDS stability
ch05: MLP bias
ch13: LDS numerical stability (numerical stable (square root) version of Kalman filter and smoother)
ch05: MLP bias and gradient unit
2 changes: 1 addition & 1 deletion chapter04/logitBinPred.m
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,6 @@
% Written by Mo Chen ([email protected]).
X = [X;ones(1,size(X,2))];
w = model.w;
p = exp(-log1pexp(w'*X));
p = exp(-log1pexp(-w'*X));
y = round(p);

23 changes: 23 additions & 0 deletions chapter13/HMM/hmmRecSmoother_.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
function [ gamma, c ] = hmmRecSmoother_( M, A, s )
% Forward-backward (recursive gamma no alpha-beta) alogrithm for HMM to compute posterior p(z_i|x)
% Input:
% x: 1xn observation
% s: kx1 starting probability of p(z_1|s)
% A: kxk transition probability
% E: kxd emission probability
% Output:
% gamma: 1xn posterier p(z_i|x)
% llh: loglikelihood or evidence lnp(x)
% Written by Mo Chen [email protected]
[K,T] = size(M);
At = A';
c = zeros(1,T); % normalization constant
gamma = zeros(K,T);
[gamma(:,1),c(1)] = normalize(s.*M(:,1),1);
for t = 2:T
[gamma(:,t),c(t)] = normalize((At*gamma(:,t-1)).*M(:,t),1); % 13.59
end
for t = T-1:-1:1
gamma(:,t) = normalize(bsxfun(@times,A,gamma(:,t)),1)*gamma(:,t+1);
end

6 changes: 4 additions & 2 deletions chapter13/HMM/hmmSmoother.m
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
function [gamma, alpha, beta, c] = hmmSmoother(x, model)
% HMM smoothing alogrithm (normalized forward-backward or normalized alpha-beta algorithm). This is a wrapper function which transform input and call underlying algorithm
% Unlike the method described in the book of PRML, the alpha returned is the normalized version: gamma(t)=p(z_t|x_{1:T})
% Computing unnormalized version gamma(t)=p(z_t,x_{1:T}) is numerical unstable, which grows exponential fast to infinity.
% Unlike the method described in the book of PRML, the alpha and beta
% returned is the normalized.
% Computing unnormalized version alpha and beta is numerical unstable, which grows exponential fast to infinity.
% Input:
% x: 1 x n integer vector which is the sequence of observations
% model: model structure
Expand All @@ -20,3 +21,4 @@
X = sparse(x,1:n,1,d,n);
M = E*X;
[gamma, alpha, beta, c] = hmmSmoother_(M, A, s);
% [gamma,c] = hmmRecSmoother_(M, A, s);
5 changes: 3 additions & 2 deletions chapter13/HMM/hmmSmoother_.m
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
function [gamma, alpha, beta, c] = hmmSmoother_(M, A, s)
% Implmentation function HMM smoothing alogrithm.
% Unlike the method described in the book of PRML, the alpha returned is the normalized version: gamma(t)=p(z_t|x_{1:T})
% Computing unnormalized version gamma(t)=p(z_t,x_{1:T}) is numerical unstable, which grows exponential fast to infinity.
% Unlike the method described in the book of PRML, the alpha and beta
% returned is the normalized.
% Computing unnormalized version alpha and beta is numerical unstable, which grows exponential fast to infinity.
% Input:
% M: k x n emmision data matrix M=E*X
% A: k x k transition matrix
Expand Down
5 changes: 3 additions & 2 deletions chapter13/HMM/hmmViterbi.m
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
function z = hmmViterbi(x, model)
function [z, llh] = hmmViterbi(x, model)
% Viterbi algorithm calculated in log scale to improve numerical stability.
% This is a wrapper function which transform input and call underlying algorithm
% Input:
% x: 1 x n integer vector which is the sequence of observations
% model: model structure
% Output:
% z: 1 x n latent state
% llh: loglikelihood
% Written by Mo Chen ([email protected]).
A = model.A;
E = model.E;
Expand All @@ -15,4 +16,4 @@
d = max(x);
X = sparse(x,1:n,1,d,n);
M = E*X;
z = hmmViterbi_(M, A, s);
[z,llh] = hmmViterbi_(M, A, s);
8 changes: 4 additions & 4 deletions chapter13/HMM/hmmViterbi_.m
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
function [z, p] = hmmViterbi_(M, A, s)
function [z, llh] = hmmViterbi_(M, A, s)
% Implmentation function of Viterbi algorithm.
% Input:
% M: k x n emmision data matrix M=E*X
% A: k x k transition matrix
% s: k x 1 starting probability (prior)
% Output:
% z: 1 x n latent state
% p: 1 x n probability
% llh: loglikelihood
% Written by Mo Chen ([email protected]).
[k,n] = size(M);
Z = zeros(k,n);
Expand All @@ -20,6 +20,6 @@
Z = Z(idx,:);
Z(:,t) = 1:k;
end
[v,idx] = max(v);
[llh,idx] = max(v);
z = Z(idx,:);
p = exp(v);

2 changes: 1 addition & 1 deletion demo/ch13/hmm_demo.m
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
n = 10000;
[x, model] = hmmRnd(d, k, n);
%%
z = hmmViterbi(x,model);
[z,p] = hmmViterbi(x,model);
%%
[alpha,llh] = hmmFilter(x,model);
%%
Expand Down

0 comments on commit 46db0f9

Please sign in to comment.