[regression] finished main text and exercises
This commit is contained in:
@@ -1,32 +1,42 @@
|
||||
% x, y from exercise 8.3
|
||||
function [p, ps, mses] = gradientDescent(x, y, func, p0, epsilon, threshold)
|
||||
% Gradient descent for fitting a function to data pairs.
|
||||
%
|
||||
% Arguments: x, vector of the x-data values.
|
||||
% y, vector of the corresponding y-data values.
|
||||
% func, function handle func(x, p)
|
||||
% p0, vector with initial parameter values
|
||||
% epsilon: factor multiplying the gradient.
|
||||
% threshold: minimum value for gradient
|
||||
%
|
||||
% Returns: p, vector with the final parameter values.
|
||||
% ps: 2D-vector with all the parameter vectors traversed.
|
||||
% mses: vector with the corresponding mean squared errors
|
||||
|
||||
% some arbitrary values for the slope and the intercept to start with:
|
||||
position = [-2.0, 10.0];
|
||||
|
||||
% gradient descent:
|
||||
gradient = [];
|
||||
errors = [];
|
||||
count = 1;
|
||||
eps = 0.0001;
|
||||
while isempty(gradient) || norm(gradient) > 0.1
|
||||
gradient = meanSquaredGradient(x, y, position);
|
||||
errors(count) = meanSquaredError(x, y, position);
|
||||
position = position - eps .* gradient;
|
||||
count = count + 1;
|
||||
p = p0;
|
||||
gradient = ones(1, length(p0)) * 1000.0;
|
||||
ps = [];
|
||||
mses = [];
|
||||
while norm(gradient) > threshold
|
||||
ps = [ps, p(:)];
|
||||
mses = [mses, meanSquaredError(x, y, func, p)];
|
||||
gradient = meanSquaredGradient(x, y, func, p);
|
||||
p = p - epsilon * gradient;
|
||||
end
|
||||
end
|
||||
|
||||
function mse = meanSquaredError(x, y, func, p)
|
||||
mse = mean((y - func(x, p)).^2);
|
||||
end
|
||||
|
||||
function gradmse = meanSquaredGradient(x, y, func, p)
|
||||
gradmse = zeros(size(p, 1), size(p, 2));
|
||||
h = 1e-5; % stepsize for derivatives
|
||||
mse = meanSquaredError(x, y, func, p);
|
||||
for i = 1:length(p) % for each coordinate ...
|
||||
pi = p;
|
||||
pi(i) = pi(i) + h; % displace i-th parameter
|
||||
msepi = meanSquaredError(x, y, func, pi);
|
||||
gradmse(i) = (msepi - mse)/h;
|
||||
end
|
||||
end
|
||||
|
||||
figure()
|
||||
subplot(2,1,1)
|
||||
hold on
|
||||
scatter(x, y, 'displayname', 'data')
|
||||
xx = min(x):0.01:max(x);
|
||||
yy = position(1).*xx + position(2);
|
||||
plot(xx, yy, 'displayname', 'fit')
|
||||
xlabel('Input')
|
||||
ylabel('Output')
|
||||
grid on
|
||||
legend show
|
||||
subplot(2,1,2)
|
||||
plot(errors)
|
||||
xlabel('optimization steps')
|
||||
ylabel('error')
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
function [p, ps, mses] = gradientDescentPower(x, y, p0, epsilon, threshold)
|
||||
% Gradient descent for fitting a power-law.
|
||||
%
|
||||
% Arguments: x, vector of the x-data values.
|
||||
% y, vector of the corresponding y-data values.
|
||||
% p0, vector with initial values for c and alpha.
|
||||
% epsilon: factor multiplying the gradient.
|
||||
% threshold: minimum value for gradient
|
||||
%
|
||||
% Returns: p, vector with the final parameter values.
|
||||
% ps: 2D-vector with all the parameter tuples traversed.
|
||||
% mses: vector with the corresponding mean squared errors
|
||||
|
||||
p = p0;
|
||||
gradient = ones(1, length(p0)) * 1000.0;
|
||||
ps = [];
|
||||
mses = [];
|
||||
while norm(gradient) > threshold
|
||||
ps = [ps, p(:)];
|
||||
mses = [mses, meanSquaredErrorPower(x, y, p)];
|
||||
gradient = meanSquaredGradientPower(x, y, p);
|
||||
p = p - epsilon * gradient;
|
||||
end
|
||||
end
|
||||
|
||||
function mse = meanSquaredErrorPower(x, y, p)
|
||||
mse = mean((y - p(1)*x.^p(2)).^2);
|
||||
end
|
||||
|
||||
function gradmse = meanSquaredGradientPower(x, y, p)
|
||||
gradmse = zeros(size(p, 1), size(p, 2));
|
||||
h = 1e-5; % stepsize for derivatives
|
||||
mse = meanSquaredErrorPower(x, y, p);
|
||||
for i = 1:length(p) % for each coordinate ...
|
||||
pi = p;
|
||||
pi(i) = pi(i) + h; % displace i-th parameter
|
||||
msepi = meanSquaredErrorPower(x, y, pi);
|
||||
gradmse(i) = (msepi - mse)/h;
|
||||
end
|
||||
end
|
||||
|
||||
@@ -3,7 +3,7 @@ meansquarederrorline; % generate data
|
||||
p0 = [2.0, 1.0];
|
||||
eps = 0.00001;
|
||||
thresh = 50.0;
|
||||
[pest, ps, mses] = gradientDescentPower(x, y, p0, eps, thresh);
|
||||
[pest, ps, mses] = gradientDescent(x, y, @powerLaw, p0, eps, thresh);
|
||||
pest
|
||||
|
||||
subplot(2, 2, 1); % top left panel
|
||||
@@ -22,7 +22,7 @@ subplot(1, 2, 2); % right panel
|
||||
hold on;
|
||||
% generate x-values for plottig the fit:
|
||||
xx = min(x):0.01:max(x);
|
||||
yy = pest(1) * xx.^pest(2);
|
||||
yy = powerLaw(xx, pest);
|
||||
plot(xx, yy);
|
||||
plot(x, y, 'o'); % plot original data
|
||||
xlabel('Size [m]');
|
||||
|
||||
Reference in New Issue
Block a user