Merge branch 'master' of https://whale.am28.uni-tuebingen.de/git/teaching/scientificComputing
This commit is contained in:
@@ -1,10 +1,21 @@
|
||||
How to make a new project
|
||||
-------------------------
|
||||
Copy `project_template/` to your `project_NAME/` and adapt according to your needs.
|
||||
Rename `template.tex` to `NAME.tex` and write questions.
|
||||
Put data that are needed for the project into the `data/` subfolder.
|
||||
Put your solution into the `code/` subfolder.
|
||||
Don't forget to add the project files to git (`git add FILENAMES`).
|
||||
|
||||
- Copy `project_template/` to your `project_NAME/` and adapt according to your needs.
|
||||
- Rename `template.tex` to `NAME.tex` and write questions.
|
||||
- Put code needed for the project into the project's root directory.
|
||||
- Put data that are needed for the project into the `data/` subfolder.
|
||||
- Put your solution into the `solution/` subfolder.
|
||||
- Put code that is needed to generate some data for the project,
|
||||
but which is not part of the project, into the `code/` subfolder.
|
||||
- Don't forget to add the project files to git (`git add FILENAMES`).
|
||||
|
||||
|
||||
Upload projects to Ilias
|
||||
------------------------
|
||||
|
||||
Simply upload ALL zip files into one folder or Uebungseinheit.
|
||||
Provide an additional file that links project names to students.
|
||||
|
||||
|
||||
Projects
|
||||
@@ -12,7 +23,7 @@ Projects
|
||||
|
||||
1) project_activation_curve
|
||||
medium
|
||||
Write questions
|
||||
also normalize activation curve to maximum.
|
||||
|
||||
2) project_adaptation_fit
|
||||
OK, medium
|
||||
@@ -34,7 +45,6 @@ OK, medium-difficult
|
||||
|
||||
7) project_ficurves
|
||||
OK, medium
|
||||
Maybe add correlation test or fit statistics
|
||||
|
||||
8) project_lif
|
||||
OK, difficult
|
||||
@@ -42,7 +52,6 @@ no statistics
|
||||
|
||||
9) project_mutualinfo
|
||||
OK, medium
|
||||
Example code is missing
|
||||
|
||||
10) project_noiseficurves
|
||||
OK, simple-medium
|
||||
|
||||
16
projects/project_activation_curve/solution/ivcurve.m
Normal file
16
projects/project_activation_curve/solution/ivcurve.m
Normal file
@@ -0,0 +1,16 @@
|
||||
function [vsteps, peakcurrents] = ivcurve(vsteps, time, currents, tmax)
|
||||
|
||||
peakcurrents = zeros(1, length(vsteps));
|
||||
for k = 1:length(peakcurrents)
|
||||
c = currents((time>0.0)&(time<tmax), k);
|
||||
minc = min(c);
|
||||
maxc = max(c);
|
||||
if abs(minc) > maxc
|
||||
peakcurrents(k) = minc;
|
||||
else
|
||||
peakcurrents(k) = maxc;
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
50
projects/project_activation_curve/solution/main.m
Normal file
50
projects/project_activation_curve/solution/main.m
Normal file
@@ -0,0 +1,50 @@
|
||||
%% plot data:
|
||||
x = load('../data/WT_01.mat');
|
||||
wtdata = x.data;
|
||||
plotcurrents(wtdata.t, wtdata.I);
|
||||
|
||||
x = load('../data/A1622D_01.mat');
|
||||
addata = x.data;
|
||||
plotcurrents(addata.t, addata.I);
|
||||
|
||||
%% I-V curve:
|
||||
[wtsteps, wtpeaks] = ivcurve(wtdata.steps, wtdata.t, wtdata.I, 100.0);
|
||||
|
||||
[adsteps, adpeaks] = ivcurve(addata.steps, addata.t, addata.I, 100.0);
|
||||
|
||||
figure();
|
||||
plot(wtsteps, wtpeaks, '-b');
|
||||
hold on;
|
||||
plot(adsteps, adpeaks, '-r');
|
||||
hold off;
|
||||
|
||||
%% reversal potential:
|
||||
wtE = reversalpotential(wtsteps, wtpeaks);
|
||||
adE = reversalpotential(adsteps, adpeaks);
|
||||
|
||||
%% activation curve:
|
||||
wtg = wtpeaks./(wtsteps - wtE);
|
||||
adg = adpeaks./(adsteps - adE);
|
||||
|
||||
wtinfty = wtg(wtsteps<40.0)/mean(wtg((wtsteps>=20.0)&(wtsteps<=40.0)));
|
||||
adinfty = adg(adsteps<40.0)/mean(adg((adsteps>=20.0)&(adsteps<=40.0)));
|
||||
wtsteps = wtsteps(wtsteps<40.0);
|
||||
adsteps = adsteps(adsteps<40.0);
|
||||
|
||||
figure();
|
||||
plot(wtsteps, wtinfty, '-b');
|
||||
hold on;
|
||||
plot(adsteps, adinfty, '-r');
|
||||
|
||||
%% boltzmann fit:
|
||||
bf = @(p, v) 1.0./(1.0+exp(-p(1)*(v - p(2))));
|
||||
p = lsqcurvefit(bf, [1.0, -40.0], wtsteps, wtinfty);
|
||||
wtfit = bf(p, wtsteps);
|
||||
p = lsqcurvefit(bf, [1.0, -40.0], adsteps, adinfty);
|
||||
adfit = bf(p, adsteps);
|
||||
|
||||
plot(wtsteps, wtfit, '-b');
|
||||
plot(wtsteps, adfit, '-r');
|
||||
hold off;
|
||||
|
||||
|
||||
13
projects/project_activation_curve/solution/plotcurrents.m
Normal file
13
projects/project_activation_curve/solution/plotcurrents.m
Normal file
@@ -0,0 +1,13 @@
|
||||
function plotcurrents(time, currents)
|
||||
|
||||
figure();
|
||||
hold on;
|
||||
for k = 1:size(currents, 2)
|
||||
plot(time, currents(:, k))
|
||||
end
|
||||
hold off;
|
||||
xlabel('Time [ms]')
|
||||
ylabel('Current')
|
||||
|
||||
end
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
function E = reversalpotential(vsteps, currents)
|
||||
p = polyfit(vsteps((vsteps>=20.0)&(vsteps<50.0)), currents((vsteps>=20.0)&(vsteps<50.0)), 1);
|
||||
E = -p(2)/p(1);
|
||||
end
|
||||
@@ -11,6 +11,49 @@
|
||||
|
||||
|
||||
%%%%%%%%%%%%%% Questions %%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
The mutual information is a measure from information theory that is
|
||||
used in neuroscience to quantify, for example, how much information a
|
||||
spike train carries about a sensory stimulus. It quantifies the
|
||||
dependence of an output $y$ (e.g. a spike train) on some input $x$
|
||||
(e.g. a sensory stimulus).
|
||||
|
||||
The probability of each of $n$ input values $x = {x_1, x_2, ... x_n}$
|
||||
is given by the corresponding probabilty distribution $P(x)$. The entropy
|
||||
\begin{equation}
|
||||
\label{entropy}
|
||||
H[x] = - \sum_{x} P(x) \log_2 P(x)
|
||||
\end{equation}
|
||||
is a measure for the surprise of getting a specific value of $x$. For
|
||||
example, if from two possible values '1' and '2', the probability of
|
||||
getting a '1' is close to one ($P(1) \approx 1$) then the probability
|
||||
of getting a '2' is close to zero ($P(2) \approx 0$). For this case
|
||||
the entropy, the surprise level, is almost zero, because both $0 \log
|
||||
0 = 0$ and $1 \log 1 = 0$. It is not surprising at all that you almost
|
||||
always get a '1'. The entropy is largest for equally likely outcomes
|
||||
of $x$. If getting a '1' or a '2' is equally likely then you will be
|
||||
most surprised by each new number you get, because you can not predict
|
||||
them.
|
||||
|
||||
Mutual information measures information transmitted between an input
|
||||
and an output. It is computed from the probability distributions of
|
||||
the input, $P(x)$, the output $P(y)$ and their joint distribution
|
||||
$P(x,y)$:
|
||||
\begin{equation}
|
||||
\label{mi}
|
||||
I[x:y] = \sum_{x}\sum_{y} P(x,y) \log_2\frac{P(x,y)}{P(x)P(y)}
|
||||
\end{equation}
|
||||
where the sums go over all possible values of $x$ and $y$. The mutual
|
||||
information can be also expressed in terms of entropies. Mutual
|
||||
information is the entropy of the outputs $y$ reduced by the entropy
|
||||
of the outputs given the input:
|
||||
\begin{equation}
|
||||
\label{mientropy}
|
||||
I[x:y] = E[y] - E[x|y]
|
||||
\end{equation}
|
||||
|
||||
The following project is meant to explore the concept of mutual
|
||||
information with the help of a simple example.
|
||||
|
||||
\begin{questions}
|
||||
\question A subject was presented two possible objects for a very
|
||||
brief time ($50$\,ms). The task of the subject was to report which of
|
||||
@@ -19,40 +62,56 @@
|
||||
object was reported by the subject.
|
||||
|
||||
\begin{parts}
|
||||
\part Plot the data appropriately.
|
||||
\part Plot the raw data (no sums or probabilities) appropriately.
|
||||
|
||||
\part Compute and plot the probability distributions of presented
|
||||
and reported objects.
|
||||
|
||||
\part Compute a 2-d histogram that shows how often different
|
||||
combinations of reported and presented came up.
|
||||
|
||||
\part Normalize the histogram such that it sums to one (i.e. make
|
||||
it a probability distribution $P(x,y)$ where $x$ is the presented
|
||||
object and $y$ is the reported object). Compute the probability
|
||||
distributions $P(x)$ and $P(y)$ in the same way.
|
||||
object and $y$ is the reported object).
|
||||
|
||||
\part Use the computed probability distributions to compute the mutual
|
||||
information \eqref{mi} that the answers provide about the
|
||||
actually presented object.
|
||||
|
||||
\part Use that probability distribution to compute the mutual
|
||||
information
|
||||
\[ I[x:y] = \sum_{x\in\{1,2\}}\sum_{y\in\{1,2\}} P(x,y)
|
||||
\log_2\frac{P(x,y)}{P(x)P(y)}\]
|
||||
that the answers provide about the actually presented object.
|
||||
|
||||
The mutual information is a measure from information theory that is
|
||||
used in neuroscience to quantify, for example, how much information
|
||||
a spike train carries about a sensory stimulus.
|
||||
|
||||
\part What is the maximally achievable mutual information (try to
|
||||
find out by generating your own dataset which naturally should
|
||||
yield maximal information)?
|
||||
|
||||
\part Use bootstrapping (permutation test) to compute the $95\%$
|
||||
confidence interval for the mutual information estimate in the
|
||||
dataset from {\tt decisions.mat}.
|
||||
\part Use a permutation test to compute the $95\%$ confidence
|
||||
interval for the mutual information estimate in the dataset from
|
||||
{\tt decisions.mat}. Does the measured mutual information indicate
|
||||
signifikant information transmission?
|
||||
|
||||
\end{parts}
|
||||
|
||||
\question What is the maximally achievable mutual information?
|
||||
|
||||
\begin{parts}
|
||||
\part Show this numerically by generating your own datasets which
|
||||
naturally should yield maximal information. Consider different
|
||||
distributions of $P(x)$.
|
||||
|
||||
\part Compare the maximal mutual information with the corresponding
|
||||
entropy \eqref{entropy}.
|
||||
\end{parts}
|
||||
|
||||
\question What is the minimum possible mutual information?
|
||||
|
||||
This is the mutual information between an output is independent of the
|
||||
input.
|
||||
|
||||
How is the joint distribution $P(x,y)$ related to the marginls
|
||||
$P(x)$ and $P(y)$ if $x$ and $y$ are independent? What is the value
|
||||
of the logarithm in eqn.~\eqref{mi} in this case? So what is the
|
||||
resulting value for the mutual information?
|
||||
|
||||
\end{questions}
|
||||
|
||||
|
||||
|
||||
|
||||
Hint: You may encounter a problem when computing the mutual
|
||||
information whenever $P(x,y)$ equals zero. For treating this special
|
||||
case think about (plot it) what the limit of $x \log x$ is for $x$
|
||||
approaching zero. Use this information to fix the computation of the
|
||||
mutual information.
|
||||
|
||||
\end{document}
|
||||
|
||||
8
projects/project_mutualinfo/solution/mi.m
Normal file
8
projects/project_mutualinfo/solution/mi.m
Normal file
@@ -0,0 +1,8 @@
|
||||
function I = mi(nxy)
|
||||
pxy = nxy / sum(nxy(:));
|
||||
px = sum(nxy, 2) / sum(nxy(:));
|
||||
py = sum(nxy, 1) / sum(nxy(:));
|
||||
pi = pxy .* log2(pxy./(px*py));
|
||||
pi(nxy == 0) = 0.0;
|
||||
I = sum(pi(:));
|
||||
end
|
||||
90
projects/project_mutualinfo/solution/mutualinfo.m
Normal file
90
projects/project_mutualinfo/solution/mutualinfo.m
Normal file
@@ -0,0 +1,90 @@
|
||||
%% load data:
|
||||
x = load('../data/decisions.mat');
|
||||
presented = x.presented;
|
||||
reported = x.reported;
|
||||
|
||||
%% plot data:
|
||||
figure()
|
||||
plot(presented, 'ob', 'markersize', 10, 'markerfacecolor', 'b');
|
||||
hold on;
|
||||
plot(reported, 'or', 'markersize', 5, 'markerfacecolor', 'r');
|
||||
hold off
|
||||
ylim([0.5, 2.5])
|
||||
|
||||
p1 = sum(presented == 1);
|
||||
p2 = sum(presented == 2);
|
||||
r1 = sum(reported == 1);
|
||||
r2 = sum(reported == 2);
|
||||
figure()
|
||||
bar([p1, p2, r1, r2]);
|
||||
set(gca, 'XTickLabel', {'p1', 'p2', 'r1', 'r2'});
|
||||
|
||||
%% histogram:
|
||||
nxy = zeros(2, 2);
|
||||
for x = [1, 2]
|
||||
for y = [1, 2]
|
||||
nxy(x, y) = sum((presented == x) & (reported == y));
|
||||
end
|
||||
end
|
||||
figure()
|
||||
bar3(nxy)
|
||||
set(gca, 'XTickLabel', {'p1', 'p2'});
|
||||
set(gca, 'YTickLabel', {'r1', 'r2'});
|
||||
|
||||
%% normalized histogram:
|
||||
pxy = nxy / sum(nxy(:));
|
||||
figure()
|
||||
imagesc(pxy)
|
||||
|
||||
px = sum(nxy, 2) / sum(nxy(:));
|
||||
py = sum(nxy, 1) / sum(nxy(:));
|
||||
|
||||
%% mutual information:
|
||||
miv = mi(nxy);
|
||||
|
||||
%% permutation:
|
||||
np = 10000;
|
||||
mis = zeros(np, 1);
|
||||
for k = 1:np
|
||||
ppre = presented(randperm(length(presented)));
|
||||
prep = reported(randperm(length(reported)));
|
||||
pnxy = zeros(2, 2);
|
||||
for x = [1, 2]
|
||||
for y = [1, 2]
|
||||
pnxy(x, y) = sum((ppre == x) & (prep == y));
|
||||
end
|
||||
end
|
||||
mis(k) = mi(pnxy);
|
||||
end
|
||||
alpha = sum(mis>miv)/length(mis);
|
||||
fprintf('signifikance: %g\n', alpha);
|
||||
bins = [0.0:0.025:0.4];
|
||||
hist(mis, bins)
|
||||
hold on;
|
||||
plot([miv, miv], [0, np/10], '-r')
|
||||
hold off;
|
||||
xlabel('MI')
|
||||
ylabel('Count')
|
||||
|
||||
%% maximum MI:
|
||||
n = 100000;
|
||||
pxs = [0:0.01:1.0];
|
||||
mis = zeros(length(pxs), 1);
|
||||
for k = 1:length(pxs)
|
||||
p = rand(n, 1);
|
||||
nxy = zeros(2, 2);
|
||||
nxy(1, 1) = sum(p<pxs(k));
|
||||
nxy(2, 2) = length(p) - nxy(1, 1);
|
||||
mis(k) = mi(nxy);
|
||||
%nxy(1, 2) = 0;
|
||||
%nxy(2, 1) = 0;
|
||||
%mi(nxy)
|
||||
end
|
||||
figure();
|
||||
plot(pxs, mis);
|
||||
hold on;
|
||||
plot([px(1), px(1)], [0, 1], '-r')
|
||||
hold off;
|
||||
xlabel('p(x=1)')
|
||||
ylabel('Max MI=Entropy')
|
||||
|
||||
@@ -9,49 +9,50 @@
|
||||
|
||||
\input{../instructions.tex}
|
||||
|
||||
You are recording the activity of neurons that differ in the strength
|
||||
of their intrinsic noise in response to constant stimuli of intensity
|
||||
$I$ (think of that, for example, as a current $I$ injected via a
|
||||
patch-electrode into the neuron).
|
||||
|
||||
We first characterize the neurons by their tuning curves (also called
|
||||
intensity-response curve). That is, what is the mean firing rate of
|
||||
the neuron's response as a function of the constant input current $I$?
|
||||
|
||||
In the second part we demonstrate how intrinsic noise can be useful
|
||||
for encoding stimuli on the example of the so called ``subthreshold
|
||||
stochastic resonance''.
|
||||
|
||||
The neuron is implemented in the file \texttt{lifspikes.m}. Call it
|
||||
with the following parameters:\\[-7ex]
|
||||
\begin{lstlisting}
|
||||
trials = 10;
|
||||
tmax = 50.0;
|
||||
current = 10.0; % the constant input current I
|
||||
Dnoise = 1.0; % noise strength
|
||||
spikes = lifspikes(trials, current, tmax, Dnoise);
|
||||
\end{lstlisting}
|
||||
The returned \texttt{spikes} is a cell array with \texttt{trials}
|
||||
elements, each being a vector of spike times (in seconds) computed for
|
||||
a duration of \texttt{tmax} seconds. The input current is set via the
|
||||
\texttt{current} variable, the strength of the intrinsic noise via
|
||||
\texttt{Dnoise}. If \texttt{current} is a single number, then an input
|
||||
current of that intensity is simulated for \texttt{tmax}
|
||||
seconds. Alternatively, \texttt{current} can be a vector containing an
|
||||
input current that changes in time. In this case, \texttt{tmax} is
|
||||
ignored, and you have to provide a value for the input current for
|
||||
every 0.0001\,seconds.
|
||||
|
||||
Think of calling the \texttt{lifspikes()} function as a simple way of
|
||||
doing an electrophysiological experiment. You are presenting a
|
||||
stimulus with a constant intensity $I$ that you set. The neuron
|
||||
responds to this stimulus, and you record this response. After
|
||||
detecting the timepoints of the spikes in your recordings you get what
|
||||
the \texttt{lifspikes()} function returns. In addition you can record
|
||||
from different neurons with different noise properties by setting the
|
||||
\texttt{Dnoise} parameter to different values.
|
||||
|
||||
\begin{questions}
|
||||
\question You are recording the activity of a neuron in response to
|
||||
constant stimuli of intensity $I$ (think of that, for example,
|
||||
as a current $I$ injected via a patch-electrode into the neuron).
|
||||
|
||||
Measure the tuning curve (also called the intensity-response curve) of the
|
||||
neuron. That is, what is the mean firing rate of the neuron's response
|
||||
as a function of the constant input current $I$?
|
||||
|
||||
How does the intensity-response curve of a neuron depend on the
|
||||
level of the intrinsic noise of the neuron?
|
||||
|
||||
How can intrinsic noise be usefull for encoding stimuli?
|
||||
|
||||
The neuron is implemented in the file \texttt{lifspikes.m}. Call it
|
||||
with the following parameters:\\[-7ex]
|
||||
\begin{lstlisting}
|
||||
trials = 10;
|
||||
tmax = 50.0;
|
||||
current = 10.0; % the constant input current I
|
||||
Dnoise = 1.0; % noise strength
|
||||
spikes = lifspikes(trials, current, tmax, Dnoise);
|
||||
\end{lstlisting}
|
||||
The returned \texttt{spikes} is a cell array with \texttt{trials}
|
||||
elements, each being a vector of spike times (in seconds) computed
|
||||
for a duration of \texttt{tmax} seconds. The input current is set
|
||||
via the \texttt{current} variable, the strength of the intrinsic
|
||||
noise via \texttt{Dnoise}. If \texttt{current} is a single number,
|
||||
then an input current of that intensity is simulated for
|
||||
\texttt{tmax} seconds. Alternatively, \texttt{current} can be a
|
||||
vector containing an input current that changes in time. In this
|
||||
case, \texttt{tmax} is ignored, and you have to provide a value
|
||||
for the input current for every 0.0001\,seconds.
|
||||
|
||||
Think of calling the \texttt{lifspikes()} function as a simple way
|
||||
of doing an electrophysiological experiment. You are presenting a
|
||||
stimulus with a constant intensity $I$ that you set. The neuron
|
||||
responds to this stimulus, and you record this response. After
|
||||
detecting the timepoints of the spikes in your recordings you get
|
||||
what the \texttt{lifspikes()} function returns. In addition you
|
||||
can record from different neurons with different noise properties
|
||||
by setting the \texttt{Dnoise} parameter to different values.
|
||||
|
||||
\question Tuning curves
|
||||
\begin{parts}
|
||||
\part First set the noise \texttt{Dnoise=0} (no noise). Compute
|
||||
and plot the neuron's $f$-$I$ curve, i.e. the mean firing rate
|
||||
@@ -64,37 +65,43 @@ spikes = lifspikes(trials, current, tmax, Dnoise);
|
||||
|
||||
\part Compute the $f$-$I$ curves of neurons with various noise
|
||||
strengths \texttt{Dnoise}. Use for example $D_{noise} = 10^{-3}$,
|
||||
$10^{-2}$, and $10^{-1}$.
|
||||
$10^{-2}$, and $10^{-1}$. Depending on the resulting curves you
|
||||
might want to try additional noise levels.
|
||||
|
||||
How does the intrinsic noise influence the response curve?
|
||||
How does the intrinsic noise level influence the tuning curves?
|
||||
|
||||
What are possible sources of this intrinsic noise?
|
||||
|
||||
\part Show spike raster plots and interspike interval histograms
|
||||
of the responses for some interesting values of the input and the
|
||||
noise strength. For example, you might want to compare the
|
||||
responses of the four different neurons to the same input, or by
|
||||
the same resulting mean firing rate.
|
||||
responses of the different neurons to the same input, or by the
|
||||
same resulting mean firing rate.
|
||||
|
||||
How do the responses differ?
|
||||
\end{parts}
|
||||
|
||||
\question Subthreshold stochastic resonance
|
||||
|
||||
Let's now use as an input to the neuron a 1\,s long sine wave $I(t)
|
||||
= I_0 + A \sin(2\pi f t)$ with offset current $I_0$, amplitude $A$,
|
||||
and frequency $f$. Set $I_0=5$, $A=4$, and $f=5$\,Hz.
|
||||
|
||||
\part Let's now use as an input to the neuron a 1\,s long sine
|
||||
wave $I(t) = I_0 + A \sin(2\pi f t)$ with offset current $I_0$,
|
||||
amplitude $A$, and frequency $f$. Set $I_0=5$, $A=4$, and
|
||||
$f=5$\,Hz.
|
||||
\begin{parts}
|
||||
\part Do you get a response of the noiseless ($D_{noise}=0$) neuron?
|
||||
|
||||
Do you get a response of the noiseless ($D_{noise}=0$) neuron?
|
||||
\part What happens if you increase the noise strength?
|
||||
|
||||
What happens if you increase the noise strength?
|
||||
\part What happens at really large noise strengths?
|
||||
|
||||
What happens at really large noise strengths?
|
||||
\part Generate some example plots that illustrate your findings.
|
||||
|
||||
Generate some example plots that illustrate your findings.
|
||||
|
||||
Explain the encoding of the sine wave based on your findings
|
||||
\part Explain the encoding of the sine wave based on your findings
|
||||
regarding the $f$-$I$ curves.
|
||||
|
||||
\end{parts}
|
||||
\part Why is this phenomenon called ``subthreshold stochastic resonance''?
|
||||
|
||||
\end{parts}
|
||||
|
||||
\end{questions}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
\documentclass[a4paper,12pt,pdftex]{exam}
|
||||
|
||||
\newcommand{\ptitle}{EOD waveform}
|
||||
\newcommand{\ptitle}{Power analysis}
|
||||
\input{../header.tex}
|
||||
\firstpagefooter{Supervisor: Peter Pilz}{phone: 29 74835}%
|
||||
{email: peter.pilz@uni-tuebingen.de}
|
||||
|
||||
Reference in New Issue
Block a user