Void’s Vault

Knowledge source for efficiency.

SLAM With GraphSLAM Filtering Algorithm

I recently implemented the GraphSLAM algorithm with known correspondence from the book Probabilistic Robotics.

The code describes itself, so there’s not much to say. I suggest you read about this algorithm, because just reading the code may not be enough to really understand the idea of GraphSLAM.

I also heard that GraphSLAM induce an additionnal error. Workarounds can be found in existing literature. Unfortunately, despite my efforts, GraphSLAM seems to diverge when we have more than one iteration. My guess would be that I forgot a wrap to pi somewhere. Still, the main lines of the algorithm are there. Enjoy!

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
%----------------------------Simulation_Script.m---------------------------------------
clearvars;

%Description of the script:
%   This script launches a GraphSLAM simulation. It will first setup the
%   ground truth, then will call the GraphSLAM algorithm. After that, the
%   script will draw the following at every timestep:
%       Robot's real pose in blue (circle with a line);
%       Robot's estimated pose in red (circle with a line);
%       The sensor's range in black;
%       Landmarks' real position are the black dots;
%       The observed landmarks are estimated at the red crosses;
%       The red ellipse is the error of the robot's pose estimation.
%
%   When the robot will detect an already discovered landmark, the error on
%   it's estimation will be greatly reduced.
%   
%   The basic GraphSLAM algorithm does not give the error over landmarks'
%   pose estimation.
%
%   Known issue about my algorithm:
%       When I raise the number of iterations, the algorithm should use the
%       new estimation of the positions of the landmarks and of the robot
%       and improve the estimate. However, my algorithm seems to lack
%       something that causes the estimation to diverge as the number of
%       iterations grow. 
%       Maybe this problem is due to the simulation itself. Otherwise,
%       maybe the initial estimates were too close to the real positions
%       mixed with the big values of R and Q are causing the algorithm to
%       diverge.

% environnement parameters
max_range = 2;
map_width = 20;
map_height = 20;
Landmarks_Count = 100; %Number of landmarks in the map
dt = 1; %Timestep

iterations = 1; %Number of iterations inside the GraphSLAM algorithm

x = map_width*rand(1,Landmarks_Count);
y = map_height*rand(1,Landmarks_Count) - map_height/2;

% environment
features = [x; y];
robotPos = [0; 0; 0];

% noise
sV = 0.05; %Noise over translation velocity of the robot
sW = 0.1*pi/180; %Noise over angular velocity of the robot
sRange = 0.1; %Noise over range measurements
sAngle = 10.0*pi/180; %Noise over bearing measurement
sSignature = 0.0001; %Noise over signature measurement (close to nothing here since we assume known correspondence)

%Noise Covariance Matrix used by par GraphSLAM
R = [0.3 0 0;
     0 0.3 0;
     0 0 0.1]; % Covariance of robot movement [x,y,theta]
Q = [0.2 0 0;
     0 0.5 0;
     0 0 0.001]; % Covariance of measurements [range;bearing;signature]

U = [0.5*ones(1,95) ; ...
     0.001*ones(1,31) 10*pi/180*ones(1,18) 0.001*ones(1,10) 5*pi/180*ones(1,18) 0.001*ones(1,18)]; %Commands

Z = 0;
C = 0;
GT = zeros(3,size(U,2)+1);
GT(:,1) = robotPos;

%Ground Truth evaluation
for t = 1:size(U,2)
    % robot movement
    v = U(1,t) + sV*randn;
    w = U(2,t) + sW*randn;
    robotPos(1) = robotPos(1) + -v/w*sin(robotPos(3)) + v/w*sin(wrapToPi(robotPos(3)+w*dt));
    robotPos(2) = robotPos(2) + v/w*cos(robotPos(3)) - v/w*cos(wrapToPi(robotPos(3)+w*dt));
    robotPos(3) = wrapToPi(robotPos(3) + w*dt);

    % measurements
    relativePos = features - repmat(robotPos(1:2,:),1,Landmarks_Count);
    ranges = sqrt(sum(relativePos.^2,1));
    % we only keep visible measurements (depending on the maximum scanning range)
    visible = find(ranges <= max_range);
    measures = relativePos(:,visible);
    ranges = ranges(visible);
    angles = wrapToPi(atan2(measures(2,:), measures(1,:)) - robotPos(3));
    % We add noise over measurements
    ranges = ranges + sRange*randn(1,numel(ranges));
    angles = wrapToPi(angles + sAngle*randn(1,numel(angles)));

    % we store the measurements
    if(numel(visible) > 0)
        Z(1:3,1:numel(visible),t) = [ranges;angles;visible];
        C(1,1:numel(visible),t) = visible;
    else
        Z(1:3,1:1,t) = [0;0;0];
        C(1,1:1,t) = 0;
    end
    %and we store the ground truth
    GT(:, t+1) = robotPos;
end

features_estimates = features + 0.1*randn(2,Landmarks_Count); %Use noisy feature initial estimates
%Call to graphSLAM
[mu, sigma] = GraphSLAM_Known_Correspondence(U, Z, C, features_estimates, R, Q, iterations);

for t = 1:size(U,2)
    display(sprintf('Iteration %d',t));

    % Drawing of Ground Truth
    figure(1)
    clf
    axis([0 map_width -map_height/2 map_height/2])
    hold on
    plot(features(1,:), features(2,:), '.k')
    plot(GT(1,t), GT(2,t), 'bo')
    line([GT(1,t) GT(1,t)+0.5*cos(GT(3,t))], [GT(2,t) GT(2,t)+0.5*sin(GT(3,t))], 'color', 'b')
    plotCircle(GT(1,t), GT(2,t), max_range, 'k')

    for indexZ = 1:numel(Z(1,:,t))
        % Drawing of the landmarks
        plot( Z(1,indexZ,t) * cos(wrapToPi(Z(2,indexZ,t)+GT(3,t))) +GT(1,t) , Z(1,indexZ,t) * sin(wrapToPi(Z(2,indexZ,t)+GT(3,t))) + GT(2,t) ,'bx');
    end


    %Drawing of the estimated landmarks after GraphSlam
    for i = 96*3+1:3:numel(mu)-2
        if max([mu(i) mu(i+1)] ~= [0 0])
            plot( mu(i), mu(i+1),'r+');
        end
    end

    %Drawing of the estimated robot's pose after GraphSLAM
    plot(mu(3*t+1,1), mu(3*t+2,1), 'ro')
    line([mu(3*t+1,1) mu(3*t+1,1)+0.5*cos(mu(3*t+3,1))], [mu(3*t+2,1) mu(3*t+2,1)+0.5*sin(mu(3*t+3,1))], 'color', 'r')
    C = sigma(3*t+1:3*t+2,3*t+1:3*t+2);
    plotError([mu(3*t+1,1) mu(3*t+2,1)], C, 'r', 50)

    pause(0.1);
end
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
%----------------------------GraphSLAM_Known_Correspondence.m---------------------------------------
function [mu, sigma] = GraphSLAM_Known_Correspondence(u, z, c, m, R, Q, loop)
% This function is the main of the GraphSLAM algorithm for the full SLAM 
% problem with known correspondence.
% 
% Input parameters:
%   u : The list of commands that the robot received, for every time step.
%       Form : [v w]' for every time step.
%   z : Measurements for every time step.
%       Form : [range(:); bearing(:); signature(:)] for every time step.
%   c : The correspondence matrix. 
%   m : An estimate of the initial position of landmarks
%   R : Movement noise covariance matrix
%   Q : Measurement noise covariance matrix
%   loop: Number of interations of the GraphSLAM algorithm
%
% Output parameters:
%   mu      - Estimated pose of the robot and positions of the landmarks
%   sigma   - Covariance matrix of the pose estimation.
% 

mu = GraphSLAM_initialize(u,m,z);
offsetToJ = 3*(numel(u(1,:))+1);

for iter = 1:loop
    [omega, epsilon] = GraphSLAM_linearize(u,z,c,mu,R,Q);
    [omega_, epsilon_] = GraphSLAM_reduce(omega, epsilon, offsetToJ);
    [mu, sigma] = GraphSLAM_solve(omega_, epsilon_, omega, epsilon, offsetToJ);
end

end
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
%----------------------------GraphSLAM_initialize.m---------------------------------------
function [mu] = GraphSLAM_initialize(u,m,z)
% This function initialize the pose estimation matrix for the GraphSLAM
% algorithm.
% It assumes that dt(delta time) is 1.
% 
% Input parameters:
%   u : The list of commands that the robot received, for every time step.
%       Form : [v w]' for every time step.
%   m : An estimate of the initial position of landmarks
%   z   : measurements
%
% Output parameter:
%   mu: The vector containing estimates of the robot's poses over time, and the estimated position of features. 
% 

mu = zeros(3*size(u,2)+1,1);

for t = 1:1:numel(u(1,:))
    v = u(1,t);
    w = u(2,t);
    theta =  mu(3*(t-1)+3 ,1);
    move = [(-v/w*sin(wrapToPi(theta)) + v/w*sin(wrapToPi(theta + w)));
            (v/w*cos(wrapToPi(theta)) - v/w*cos(wrapToPi(theta + w)));
            w];

    mu(3*t+1:3*t+3 ,1) = mu(3*(t-1)+1:3*(t-1)+3 ,1) + move;
    mu(3*t+3,1) = wrapToPi(mu(3*t+3,1));
end

%Initial estimation of the landmarks' position.
offset = numel(mu);
%for j = 1:1:numel(m(1,:))
%    mu(offset+3*(j-1)+1 : offset+3*j) = [m(1,j); m(2,j); j];
%end

%Efforts in vain to set the landmarks' positions in a different way
for j = 1:1:numel(m(1,:))
    mu(offset+3*(j-1)+1 : offset+3*j) = [0 ; 0 ; j];
end

for t = 1:1:numel(z(1,1,:))
    for i = 1:1:numel(z(1,:,t))
        j = round(z(3,i,t));

        if max(mu(offset+3*(j-1)+1 : offset+3*j-1) == [0;0])
            toSet = [(mu(3*t+1) + z(1,i,t)*cos(z(2,i,t)+mu(3*t+3)));
                     (mu(3*t+2) + z(1,i,t)*sin(z(2,i,t)+mu(3*t+3)));
                     j];

            mu(offset+3*(j-1)+1 : offset+3*j) = toSet;
        end
    end
end
end
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
%----------------------------GraphSLAM_linearize.m---------------------------------------

function [omega, epsilon] = GraphSLAM_linearize(u,z,c,mu,R,Q)
% This function gradually constructs the information matrix omega and the
% information vector epsilon through linearization, by locally adding
% submatrices in accordance with the information obtained from each
% measurement and each control.

% It assumes that dt(delta time) is 1.
% 
% Input parameters:
%   u : The list of commands that the robot received, for every time step.
%       Form : [v w]' for every time step.
%   z : Measurements for every time step.
%       Form : [range(:); bearing(:); signature(:)] for every time step.
%   c : The correspondence matrix. 
%   mu: The vector containing estimates of the robot's poses over time, and the estimated position of features. 
%   R : Covariance(3x3 matrix) of the additional motion noise [x,y,theta]
%   Q : Covariance(3x3 matrix) of the additional measurement noise 
%       Form : [range;bearing;signature]
%
% Output parameters:
%   omega   : Information matrix (Canonical parameterization)
%   epsilon : Information vector (Canonical parameterization)
% 
INF = 999999;
tOffset = 3*(numel(u(1,:))+1);

featuresCount = (numel(mu) - tOffset)/3;
%set omega and epsilon to 0
omega = zeros(tOffset+3*featuresCount , tOffset+3*featuresCount);
epsilon = zeros(tOffset+3*featuresCount , 1);

%Add inf*I to omega at x0
omega(1:3,1:3) = eye(3) * INF;

for t = 1:1:numel(u(1,:))
    v = u(1,t);
    w = u(2,t);
    theta   = mu(3*t); %mu at t-1

    %Pose estimation (xhat)
    move = [(-v/w*sin(wrapToPi(theta)) + v/w*sin(wrapToPi(theta + w)));
            (v/w*cos(wrapToPi(theta)) - v/w*cos(wrapToPi(theta + w)));
            w];
    xhat = mu(3*(t-1)+1:3*(t-1)+3 ,1) + move;
    xhat(3) = wrapToPi(xhat(3));


    G = [1 0 (-v/w*cos(wrapToPi(theta)) + v/w*cos(wrapToPi(theta + w)));
         0 1 (-v/w*sin(wrapToPi(theta)) + v/w*sin(wrapToPi(theta + w)));
         0 0 1];

    %Here, the algorithm at p348 tells to add  the following:
    %[-G'; ones(3)] * (eye(3)/R) * [-G ones(3)];
    %But in fact, we have to add identity matrix instead of ones:
    %[-G'; eye(3)]  * (eye(3)/R) * [-G eye(3)];
  toAdd = [-G'; eye(3)] * (eye(3)/R) * [-G eye(3)]; % 6x6 matrix, page 351 tells to chop it in 4  3x3 matrix
  range1 = 3*t-2:3*t;
  range2 = 3*t+1:3*t+3;
  
    %Add toAdd to omega
    omega(range1,range1) = omega(range1,range1) + toAdd(1:3,1:3);
  omega(range1,range2) = omega(range1,range2) + toAdd(1:3,4:6);
  omega(range2,range1) = omega(range2,range1) + toAdd(4:6,1:3);
  omega(range2,range2) = omega(range2,range2) + toAdd(4:6,4:6);

    %Same problem here. The algorithm at p348 tells to add  the following:
    %[-G'; ones(3)'] * (eye(3)/R) * (xhat-G*mu)
    %But in fact, we have to add identity matrix instead of ones:
    %[-G'; eye(3)']  * (eye(3)/R) * (xhat-G*mu)
  toAdd = [-G'; eye(3)'] * (eye(3)/R) * (xhat-G*mu(3*t-2 : 3*t ,1)); %6x1 matrix, page 351 tells to chop it in 2 3x1 vectors
    %In fact, it is told that toAdd should be of vertical dimension of 5,
    %so we should split it in a 3x1 and a 2x1. but in my algorithm, 
    %mj = 3x1 vector for simplicity. It does not change the behavior
  
    %Add toAdd to epsilon
    epsilon(range1) = epsilon(range1) + toAdd(1:3);
    epsilon(range2) = epsilon(range2) + toAdd(4:6);
end

%measurement update
for t = 1:1:numel(z(1,1,:))
    %define mu_t for code that is easier to read.
    mu_t_x     = mu(3*t+1);
    mu_t_y     = mu(3*t+2);
    mu_t_theta = mu(3*t+3);
    mu_t = mu(3*t+1 : 3*t+3);

    for i = 1:1:numel(z(1,:,t))
        j = c(1,i,t);
        if j > 0 %landmark has been seen during the t-th timestep
            %define mu_j to simplify the code
            mu_j_x     = mu(tOffset + 3*j-2);
            mu_j_y     = mu(tOffset + 3*j-1);
            mu_j = mu(tOffset + 3*j-2 : tOffset + 3*j);

            d = [mu_j_x - mu_t_x; %d is the difference between landmark estimation and current position
                 mu_j_y - mu_t_y];

            q = d'*d;
            %estimated measurement, zhat. It is suggested to pay great
            %attention to the implementation of this line, since the
            %angular expressions can be shifted arbitrarily by 2pi.
            zhat = [sqrt(q);
                   wrapToPi(atan2(d(2), d(1))-mu_t_theta);
                   round(z(3,i,t))];

            H = (1/q) * [-sqrt(q)*d(1) -sqrt(q)*d(2) 0 sqrt(q)*d(1) sqrt(q)*d(2) 0;
                         d(2) -d(1) -q -d(2) d(1) 0;
                         0 0 0 0 0 q];

            toAdd = H'*(eye(3)/Q)*H; %add this to omega at xt and mj

            range1 = 3*t+1:3*t+3; %position of xt
            range2 = tOffset + (3*(j-1)) + 1 : tOffset + (3*(j-1)) + 3; %position of mj

            omega(range1,range1) = omega(range1,range1) + toAdd(1:3,1:3);
            omega(range1,range2) = omega(range1,range2) + toAdd(1:3,4:6);
            omega(range2,range1) = omega(range2,range1) + toAdd(4:6,1:3);
            omega(range2,range2) = omega(range2,range2) + toAdd(4:6,4:6);

            %we calculate z-zhat+Hmu first because there might be a wrapToPi issue
            z_zhat_Hmu = z(:,i,t)-zhat + H*[mu_t ; mu_j];
            z_zhat_Hmu(2) = wrapToPi(z_zhat_Hmu(2));

            toAdd = H' * (eye(3)/Q) * (z_zhat_Hmu); %add this to epsilon at xt and mj

            epsilon(range1) = epsilon(range1) + toAdd(1:3);
            epsilon(range2) = epsilon(range2) + toAdd(4:6);
        end
    end
end

end
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
%----------------------------GraphSLAM_reduce.m---------------------------------------

function [omegaTilda, epsilonTilda] = GraphSLAM_reduce(omega,epsilon, offsetToJ)
% This function reduces the dimensionality of the information matrix omega
% and of the information vector epsilon. This algorithm takes as input
% omega and epsilon defined over the full space of map features and poses,
% and outputs a reduced matrix and vector defined over the space of all
% poses (but not the map). This transformation is achieved by removing
% features one-at-a-time.
% 
% Input parameters:
%   omega     : Information matrix (Canonical parameterization)
%   epsilon   : Information vector (Canonical parameterization)
%   offsetToJ : offset in omega and in epsilon to get to features
%
% Output parameters:
%   omegaTilda   : Reduced information matrix (Canonical parameterization)
%   epsilonTilda : Reduced information vector (Canonical parameterization)
% 
omega_ = omega;
epsilon_ = epsilon;
featuresCount = (numel(omega(:,1)) - offsetToJ)/3;

for j=1:1:featuresCount

    %let tau(j) be the set of all poses xt at which j was observed
    %According to the marginalization lemma, we can use the following matrix
    %omega_x0t_x0t   = omega_(1:offsetToJ                  , 1:offsetToJ);

    %The matrix omega_x0t_j and its transpose is non-zero only for elements in tau(j), 
    %so we can use equations (11.35) and (11.36) from [Probabilistic Robotics]
    omega_x0t_j     = omega_(1:offsetToJ                  , offsetToJ+3*j-2:offsetToJ+3*j);
    omega_j_x0t     = omega_(offsetToJ+3*j-2:offsetToJ+3*j, 1:offsetToJ);
    omega_j_j       = omega_(offsetToJ+3*j-2:offsetToJ+3*j, offsetToJ+3*j-2:offsetToJ+3*j);

    %If feature have been measured at least one
    if max(max(omega_j_j ~= zeros(3))) > 0
        %The vector epsilon_j is used to update epsilon_
        epsilon_j = epsilon_(offsetToJ+3*j-2:offsetToJ+3*j);
        %Substract from epsilon_. We also need to set the above vector to 0 in epsilon_, 
        %but it's useless since th region won't be used further and will be removed soon anyway.
        toSubstract = (omega_x0t_j * (eye(3)/omega_j_j) * epsilon_j);
        epsilon_(1:offsetToJ) = epsilon_(1:offsetToJ) - toSubstract;

        %Substract from omega_. We also need to set the three above matrix to 0 in omega_, 
        %but it's useless since these regions won't be used further and will be removed soon.
        toSubstract = omega_x0t_j * (eye(3)/omega_j_j) * omega_j_x0t;
        omega_(1:offsetToJ, 1:offsetToJ) = omega_(1:offsetToJ, 1:offsetToJ) - toSubstract;
    end
end

%Reduction of the information matrix and vector, since we should have done
%what is required to remove the information from map's rows and columns.
%Here, that is not the case because we omitted the substractions on these
%rows and columns, as we knew we would remove them anyway.
omegaTilda = omega_(1:offsetToJ, 1:offsetToJ);
epsilonTilda = epsilon_(1:offsetToJ);
end
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
%----------------------------GraphSLAM_solve.m---------------------------------------

function [mu, sigma] = GraphSLAM_solve(omegaTilda,epsilonTilda, omega, epsilon, offsetToJ)
% This function reduces the dimensionality of the information matrix omega
% and of the information vector epsilon. This algorithm takes as input
% omega and epsilon defined over the full space of map features and poses,
% and outputs a reduced matrix and vector defined over the space of all
% poses (but not the map). This transformation is achieved by removing
% features one-at-a-time.
% 
% Input parameters:
%   omegaTilda  : Reduced information matrix (Canonical parameterization)
%   epsilonTilda: Reduced information vector (Canonical parameterization)
%   omega       : Information matrix (Canonical parameterization)
%   epsilon     : Information vector (Canonical parameterization)
%   offsetToJ   : offset in omega and in epsilon to get to features
%
% Output parameters:
%   mu    : The vector containing estimates of the robot's poses over time, and the estimated position of features. 
%   sigma : The error over the robot's poses estimates.
% 

sigma   = eye(size(omegaTilda))/omegaTilda;
mu      = sigma * epsilonTilda;

%Wrap To Pi
for i = 3:3:offsetToJ
    mu(i,1) = wrapToPi(mu(i,1));
end

muTilda = mu(1:numel(epsilonTilda));
featuresCount = (numel(omega(:,1)) - offsetToJ)/3;

for j=1:1:featuresCount
    %let tau(j) be the set of all poses xt at which j was observed
    %According to the marginalization lemma, we can use the following matrix.
    %The matrix omega_j_x0t and its transpose is non-zero only for elements in tau(j), 
    %so we can use equations (11.35) and (11.36) from [Probabilistic Robotics]
    omega_j_x0t     = omega(offsetToJ+3*j-2:offsetToJ+3*j, 1:offsetToJ);
    omega_j_j       = omega(offsetToJ+3*j-2:offsetToJ+3*j, offsetToJ+3*j-2:offsetToJ+3*j);
    epsilon_j       = epsilon(offsetToJ+3*j-2:offsetToJ+3*j);

    if max(max(omega_j_j ~= zeros(3))) > 0
        %There's an erratum on line 6 of the algorithm, page 349. The correct line is the following:
        mu(offsetToJ+3*j-2:offsetToJ+3*j) = (eye(3)/(omega_j_j)) * (epsilon_j - (omega_j_x0t * muTilda));
    end
end

%add zeros at the end of mu to fit with original size of mu (for example,
%if landmark with the biggest id have never been measured. Useful only for
%the drawing part at the end.
numberOfMissingFeatures = (numel(omega(:,1)) - numel(mu))/3;
mu = [mu ; zeros(3*(numberOfMissingFeatures),1)];
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
%----------------------------Utility functions---------------------------------------
%----------------------------plotCircle.m---------------------------------------
function plotCircle(x,y,r,symbol,n)
    if nargin < 5
    n = 3600;
    end
    ang = linspace(0, 2*pi, n);
    xp = r*cos(ang);
    yp = r*sin(ang);
    plot(x+xp, y+yp, symbol);
end
%----------------------------plotError.m---------------------------------------
function plotError(center,C,symbol,n)
    % center: 2x1 position vector
    % C: 2x2 covariance matrix
    % symbol: plot symbol, eg 'r'
    % n: number of points
    if nargin < 4
    n=100; % Number of points around ellipse
    end
    p=0:pi/n:2*pi; % angles around a circle
    [eigvec,eigval] = eig(C); % Compute eigen-stuff
    xy = [cos(p'),sin(p')] * sqrt(eigval) * eigvec'; % Transformation
    x = center(1) + xy(:,1);
    y = center(2) + xy(:,2);
    plot(x, y, symbol)
end