Skip to content

Commit

Permalink
fix email
Browse files Browse the repository at this point in the history
  • Loading branch information
sth4nth committed Oct 29, 2013
1 parent 21e21e2 commit 95369fa
Show file tree
Hide file tree
Showing 62 changed files with 62 additions and 45 deletions.
2 changes: 1 addition & 1 deletion chapter01/entropy.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function z = entropy(x)
% Compute entropy H(x) of a discrete variable x.
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).
n = numel(x);
x = reshape(x,1,n);
[u,~,label] = unique(x);
Expand Down
2 changes: 1 addition & 1 deletion chapter01/jointEntropy.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function z = jointEntropy(x, y)
% Compute joint entropy H(x,y) of two discrete variables x and y.
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).
assert(numel(x) == numel(y));
n = numel(x);
x = reshape(x,1,n);
Expand Down
2 changes: 1 addition & 1 deletion chapter01/mutInfo.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function z = mutInfo(x, y)
% Compute mutual information I(x,y) of two discrete variables x and y.
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).
assert(numel(x) == numel(y));
n = numel(x);
x = reshape(x,1,n);
Expand Down
2 changes: 1 addition & 1 deletion chapter01/relatEntropy.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function z = relatEntropy (x, y)
% Compute relative entropy (a.k.a KL divergence) KL(p(x)||p(y)) of two discrete variables x and y.
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).
assert(numel(x) == numel(y));
n = numel(x);
x = reshape(x,1,n);
Expand Down
1 change: 0 additions & 1 deletion chapter02/demo.m

This file was deleted.

2 changes: 1 addition & 1 deletion chapter02/pdfDirichletLn.m
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
% X: d x n data matrix satifying (sum(X,1)==ones(1,n) && X>=0)
% a: d x k parameters
% y: k x n probability density
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).
X = bsxfun(@times,X,1./sum(X,1));
if size(a,1) == 1
a = repmat(a,size(X,1),1);
Expand Down
2 changes: 1 addition & 1 deletion chapter02/pdfGaussLn.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function y = pdfGaussLn(X, mu, sigma)
% Compute log pdf of a Gaussian distribution.
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).

[d,n] = size(X);
k = size(mu,2);
Expand Down
7 changes: 3 additions & 4 deletions chapter02/pdfKdeLn.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
function z = pdfKdeLn (X, Y, sigma)
% Compute log pdf of kernel density estimator.
% Written by Mo Chen ([email protected]).
D = bsxfun(@plus,full(dot(X,X,1)),full(dot(Y,Y,1))')-full(2*(Y'*X));
z = logSumExp(D/(-2*sigma^2),1)-0.5*log(2*pi)-log(sigma*size(Y,2));
endfunction
% Written by Mo Chen ([email protected]).
D = bsxfun(@plus,full(dot(X,X,1)),full(dot(Y,Y,1))')-full(2*(Y'*X));
z = logSumExp(D/(-2*sigma^2),1)-0.5*log(2*pi)-log(sigma*size(Y,2));
2 changes: 1 addition & 1 deletion chapter02/pdfMnLn.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function z = pdfMnLn (x, p)
% Compute log pdf of a multinomial distribution.
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).
if numel(x) ~= numel(p)
n = numel(x);
x = reshape(x,1,n);
Expand Down
2 changes: 1 addition & 1 deletion chapter02/pdfStLn.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function y = pdfStLn(X, mu, sigma, v)
% Compute log pdf of a student-t distribution.
% Written by mo Chen (mochen80@gmail.com).
% Written by mo Chen (sth4nth@gmail.com).
[d,k] = size(mu);

if size(sigma,1)==d && size(sigma,2)==d && k==1
Expand Down
2 changes: 1 addition & 1 deletion chapter02/pdfWishartLn.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function y = pdfWishartLn(Sigma, v, W)
% Compute log pdf of a Wishart distribution.
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).
d = length(Sigma);
B = -0.5*v*logdet(W)-0.5*v*d*log(2)-logmvgamma(0.5*v,d);
y = B+0.5*(v-d-1)*logdet(Sigma)-0.5*trace(W\Sigma);
2 changes: 1 addition & 1 deletion chapter02/pdflogVmfLn.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function y = pdflogVmfLn(X, mu, kappa)
% Compute log pdf of a von Mises-Fisher distribution.
% Written by Mo Chen (mochen80@gmail.com).
% Written by Mo Chen (sth4nth@gmail.com).
d = size(X,1);
c = (d/2-1)*log(kappa)-(d/2)*log(2*pi)-logbesseli(d/2-1,kappa);
q = bsxfun(@times,mu,kappa)'*X;
Expand Down
1 change: 1 addition & 0 deletions chapter03/linInfer.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
% Compute linear model reponse y = w'*x+b and likelihood
% X: d x n data
% t: 1 x n response
% Written by Mo Chen ([email protected]).
w = model.w;
b = model.w0;
y = w'*X+b;
Expand Down
1 change: 1 addition & 0 deletions chapter03/regress.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
% Fit linear regression model t=w'x+b
% X: d x n data
% t: 1 x n response
% Written by Mo Chen ([email protected]).
if nargin < 3
lambda = 0;
end
Expand Down
1 change: 1 addition & 0 deletions chapter03/regressEbEm.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
% Fit empirical Bayesian linear model with EM
% X: d x n data
% t: 1 x n response
% Written by Mo Chen ([email protected]).
if nargin < 3
alpha = 0.02;
beta = 0.5;
Expand Down
1 change: 1 addition & 0 deletions chapter03/regressEbFp.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
% Fit empirical Bayesian linear model with Mackay fixed point method
% X: d x n data
% t: 1 x n response
% Written by Mo Chen ([email protected]).
if nargin < 3
alpha = 0.02;
beta = 0.5;
Expand Down
1 change: 1 addition & 0 deletions chapter04/classFda.m
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
function U = classFda(X, y, d)
% Fisher (linear) discriminant analysis
% Written by Mo Chen ([email protected]).
n = size(X,2);
k = max(y);

Expand Down
1 change: 1 addition & 0 deletions chapter04/classLogitBin.m
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
function [model, llh] = classLogitBin(X, t, lambda)
% logistic regression for binary classification (Bernoulli likelihood)
% Written by Mo Chen ([email protected]).
if any(unique(t) ~= [0,1])
error('t must be a 0/1 vector!');
end
Expand Down
1 change: 1 addition & 0 deletions chapter04/classLogitMul.m
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
function [model, llh] = classLogitMul(X, t, lambda, method)
% logistic regression for multiclass problem (Multinomial likelihood)
% Written by Mo Chen ([email protected]).
if nargin < 4
method = 1;
end
Expand Down
1 change: 1 addition & 0 deletions chapter04/sigmoid.m
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
function y = sigmoid(x)
% Written by Mo Chen ([email protected]).
y = 1./(1+exp(-x));
2 changes: 1 addition & 1 deletion chapter04/softmax.m
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
function s = softmax(x, dim)
% Compute softmax
% By default dim = 1 (columns).
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
if nargin == 1,
% Determine which dimension sum will use
dim = find(size(x)~=1,1);
Expand Down
1 change: 1 addition & 0 deletions chapter06/knCenterize.m
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
function Kc = knCenterize(kn, X, Xt)
% Centerize the data in the kernel space
% Written by Mo Chen ([email protected]).
K = kn(X,X);
mK = mean(K);
mmK = mean(mK);
Expand Down
1 change: 1 addition & 0 deletions chapter06/knGauss.m
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
function K = knGauss(X, Y, s)
% Gaussian (RBF) kernel
% Written by Mo Chen ([email protected]).
if nargin < 3
s = 1;
end
Expand Down
1 change: 1 addition & 0 deletions chapter06/knInfer.m
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
function [y, sigma, p] = knInfer(x, model)
% inference for kernel model
% Written by Mo Chen ([email protected]).
kn = model.kn;
a = model.a;
X = model.X;
Expand Down
1 change: 1 addition & 0 deletions chapter06/knLin.m
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
function K = knLin(X, Y)
% Linear kernel (inner product)
% Written by Mo Chen ([email protected]).
K = X'*Y;

1 change: 1 addition & 0 deletions chapter06/regressKn.m
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
function model = regressKn(X, t, lambda, kn)
% Gaussian process for regression
% Written by Mo Chen ([email protected]).
if nargin < 4
kn = @knGauss;
end
Expand Down
1 change: 1 addition & 0 deletions chapter07/classRvmEbEm.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
function [model, llh] = classRvmEbEm(X, t, alpha)
% Relevance Vector Machine classification training by empirical bayesian (ARD)
% using standard EM update
% Written by Mo Chen ([email protected]).
if nargin < 3
alpha = 0.02;
end
Expand Down
1 change: 1 addition & 0 deletions chapter07/classRvmEbFp.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
function [model, llh] = classRvmEbFp(X, t, alpha)
% Relevance Vector Machine classification training by empirical bayesian (ARD)
% using fix point update (Mackay update)
% Written by Mo Chen ([email protected]).
if nargin < 3
alpha = 0.02;
end
Expand Down
2 changes: 1 addition & 1 deletion chapter07/optLogitNewton.m
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
% t: 1 x n 0/1 label
% A: d x d regularization penalty
% w: d x 1 initial value of w

% Written by Mo Chen ([email protected]).
[d,n] = size(X);

if nargin < 4
Expand Down
1 change: 1 addition & 0 deletions chapter07/regressRvmEbCd.m
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
% Analysis of sparse Bayesian learning. NIPS(2002). By Faul and Tipping
% Fast marginal likelihood maximisation for sparse Bayesian models.
% AISTATS(2003). by Tipping and Faul
% Written by Mo Chen ([email protected]).
[d,n] = size(X);
xbar = mean(X,2);
tbar = mean(t,2);
Expand Down
1 change: 1 addition & 0 deletions chapter07/regressRvmEbEm.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
function [model, llh] = regressRvmEbEm(X, t, alpha, beta)
% Relevance Vector Machine regression training by empirical bayesian (ARD)
% using standard EM update
% Written by Mo Chen ([email protected]).
if nargin < 3
alpha = 0.02;
beta = 0.5;
Expand Down
1 change: 1 addition & 0 deletions chapter07/regressRvmEbFp.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
function [model, llh] = regressRvmEbFp(X, t, alpha, beta)
% Relevance Vector Machine regression training by empirical bayesian (ARD)
% using fix point update (Mackay update)
% Written by Mo Chen ([email protected]).
if nargin < 3
alpha = 0.02;
beta = 0.5;
Expand Down
1 change: 1 addition & 0 deletions chapter07/regressRvmEbFpSvd.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
function [model, llh] = regressRvmEbFpSvd(X, t, alpha, beta)
% Relevance Vector Machine regression training by empirical bayesian (ARD)
% using fix point update (Mackay update) with SVD
% Written by Mo Chen ([email protected]).
if nargin < 3
alpha = 0.02;
beta = 0.5;
Expand Down
2 changes: 1 addition & 1 deletion chapter09/clusterKmeans.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
% Perform k-means clustering.
% X: d x n data matrix
% k: number of seeds
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
n = size(X,2);
last = 0;
label = ceil(k*rand(1,n)); % random initialization
Expand Down
2 changes: 1 addition & 1 deletion chapter09/mixBernoulliEm.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
% Perform EM algorithm for fitting the Bernoulli mixture model.
% X: d x n data matrix
% init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or center (d x k)
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
%% initialization
fprintf('EM for mixture model: running ... \n');
n = size(X,2);
Expand Down
2 changes: 1 addition & 1 deletion chapter09/mixGaussEm.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
% Perform EM algorithm for fitting the Gaussian mixture model.
% X: d x n data matrix
% init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or center (d x k)
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
%% initialization
fprintf('EM for Gaussian mixture: running ... \n');
R = initialization(X,init);
Expand Down
2 changes: 1 addition & 1 deletion chapter09/mixMnEm.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
% Perform EM algorithm for fitting the multinomial mixture model.
% X: d x n data matrix
% init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or center (d x k)
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
%% initialization
fprintf('EM for mixture model: running ... \n');
n = size(X,2);
Expand Down
1 change: 0 additions & 1 deletion chapter10/classLogitVb.m

This file was deleted.

3 changes: 1 addition & 2 deletions chapter10/mixGaussVb.m
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
% X: d x n data matrix
% init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or center (d x k)
% Reference: Pattern Recognition and Machine Learning by Christopher M. Bishop (P.474)
% Written by Michael Chen ([email protected]).

% Written by Mo Chen ([email protected]).
fprintf('Variational Bayesian Gaussian mixture: running ... \n');
[d,n] = size(X);
if nargin < 3
Expand Down
1 change: 1 addition & 0 deletions chapter10/regressRvmVb.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
% Fit empirical Bayesian linear model with EM
% X: m x n data
% t: 1 x n response
% Written by Mo Chen ([email protected]).
[m,n] = size(X);
if nargin < 3
a0 = 1e-4;
Expand Down
1 change: 1 addition & 0 deletions chapter10/regressVb.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
% Fit empirical Bayesian linear model with EM
% X: m x n data
% t: 1 x n response
% Written by Mo Chen ([email protected]).
if nargin < 3
a0 = 1e-4;
b0 = 1e-4;
Expand Down
2 changes: 1 addition & 1 deletion chapter11/rndDirichlet.m
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
function x = rndDirichlet(a)
% Sampling from a Dirichlet distribution.
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
x = gamrnd(a,1);
x = x/sum(x);
2 changes: 1 addition & 1 deletion chapter11/rndDiscrete.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function x = rndDiscrete(p, n)
% Sampling from a discrete distribution (multinomial).
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
if nargin == 1
n = 1;
end
Expand Down
2 changes: 1 addition & 1 deletion chapter11/rndGauss.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function x = rndGauss(mu,Sigma,n)
% Sampling from a Gaussian distribution.
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
if nargin == 2
n = 1;
end
Expand Down
2 changes: 1 addition & 1 deletion chapter12/dimFa.m
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
% X: d x n data matrix
% p: dimension of target space
% Reference: Pattern Recognition and Machine Learning by Christopher M. Bishop
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
[d,n] = size(X);
mu = mean(X,2);
X = bsxfun(@minus,X,mu);
Expand Down
3 changes: 1 addition & 2 deletions chapter12/dimPca.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
% Perform standard PCA (spectral method).
% X: d x n data matrix
% p: dimension of target space (p>=1) or ratio (0<p<1)
% Written by Michael Chen ([email protected]).

% Written by Mo Chen ([email protected]).
opts.disp = 0;
opts.issym = 1;
opts.isreal = 1;
Expand Down
2 changes: 1 addition & 1 deletion chapter12/dimPcaEm.m
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
% Reference:
% Pattern Recognition and Machine Learning by Christopher M. Bishop
% Probabilistic Principal Component Analysis by Michael E. Tipping & Christopher M. Bishop
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).

[m,n] = size(X);
mu = mean(X,2);
Expand Down
2 changes: 1 addition & 1 deletion chapter12/dimPcaLs.m
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
% Reference:
% Pattern Recognition and Machine Learning by Christopher M. Bishop
% EM algorithms for PCA and SPCA by Sam Roweis
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
[d,n] = size(X);
X = bsxfun(@minus,X,mean(X,2));
W = rand(d,p);
Expand Down
3 changes: 1 addition & 2 deletions chapter12/dimPcaVb.m
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
% Reference:
% Pattern Recognition and Machine Learning by Christopher M. Bishop
% Probabilistic Principal Component Analysis by Michael E. Tipping & Christopher M. Bishop
% Written by Michael Chen ([email protected]).

% Written by Mo Chen ([email protected]).
[m,n] = size(X);
if nargin < 3
a0 = 1e-4;
Expand Down
2 changes: 1 addition & 1 deletion chapter13/discreternd.m
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function x = discreternd(p, n)
% Sampling from a discrete distribution (multinomial).
% Written by Michael Chen ([email protected]).
% Written by Mo Chen ([email protected]).
if nargin == 1
n = 1;
end
Expand Down
Loading

0 comments on commit 95369fa

Please sign in to comment.