This commit is contained in:
Jan Benda 2021-01-18 18:58:54 +01:00
commit 6a643cd347
13 changed files with 394 additions and 259 deletions

View File

@ -222,11 +222,26 @@
{\vspace{-2ex}\lstset{#1}\noindent\minipage[t]{1\linewidth}}%
{\endminipage}
%%%%% english, german, code and file terms: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% english and german terms: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{ifthen}
% \enterm[en-index]{term}: Typeset term and add it to the index of english terms
%
% \determ[de-index]{term}: Typeset term and add it to the index of german terms
%
% \entermde[en-index]{de-index}{term}: Typeset term and add it to the index of english terms
% and de-index to the index of german terms
%
% how to specificy an index:
% \enterm{term} - just put term into the index
% \enterm[en-index]{term} - typeset term and put en-index into the index
% \enterm[statistics!mean]{term} - mean is a subentry of statistics
% \enterm[statistics!average|see{statistics!mean}]{term} - cross reference to statistics mean
% \enterm[statistics@\textbf{statistics}]{term} - put index at statistics but use
% \textbf{statistics} for typesetting in the index
% \enterm[english index entry]{<english term>}
% typeset the term in italics and add it (or the optional argument) to
% typeset the term in italics and add it (or rather the optional argument) to
% the english index.
\newcommand{\enterm}[2][]{\textit{#2}\ifthenelse{\equal{#1}{}}{\protect\sindex[enterm]{#2}}{\protect\sindex[enterm]{#1}}}

View File

@ -1,16 +1,11 @@
function [counts, bins] = counthist(spikes, w)
% Compute and plot histogram of spike counts.
function counthist(spikes, w)
% Plot histogram of spike counts.
%
% [counts, bins] = counthist(spikes, w)
% counthist(spikes, w)
%
% Arguments:
% spikes: a cell array of vectors of spike times in seconds
% w: observation window duration in seconds for computing the counts
%
% Returns:
% counts: the histogram of counts normalized to probabilities
% bins: the bin centers for the histogram
% w: duration of window in seconds for computing the counts
% collect spike counts:
tmax = spikes{1}(end);
n = [];
@ -21,12 +16,12 @@ function [counts, bins] = counthist(spikes, w)
% for tk = 0:w:tmax-w
% nn = sum((times >= tk) & (times < tk+w));
% %nn = length(find((times >= tk) & (times < tk+w)));
% n = [n nn];
% n = [n, nn];
% end
% alternative 2: use the hist() function to do that!
tbins = 0.5*w:w:tmax-0.5*w;
nn = hist(times, tbins);
n = [n nn];
n = [n, nn];
end
% histogram of spike counts:
@ -36,9 +31,7 @@ function [counts, bins] = counthist(spikes, w)
counts = counts / sum(counts);
% plot:
if nargout == 0
bar(bins, counts);
xlabel('counts k');
ylabel('P(k)');
end
bar(bins, counts);
xlabel('counts k');
ylabel('P(k)');
end

View File

@ -8,7 +8,7 @@ function [pdf, centers] = isihist(isis, binwidth)
% binwidth: optional width in seconds to be used for the isi bins
%
% Returns:
% pdf: vector with probability density of interspike intervals in Hz
% pdf: vector with pdf of interspike intervals in Hertz
% centers: vector with centers of interspikeintervalls in seconds
if nargin < 2

View File

@ -5,11 +5,9 @@ function isivec = isis(spikes)
%
% Arguments:
% spikes: a cell array of vectors of spike times in seconds
% isivec: a column vector with all the interspike intervalls
%
% Returns:
% isivec: a column vector with all the interspike intervalls
isivec = [];
for k = 1:length(spikes)
difftimes = diff(spikes{k});

View File

@ -1,15 +1,15 @@
function isicorr = isiserialcorr(isivec, maxlag)
function [isicorr, lags] = isiserialcorr(isivec, maxlag)
% serial correlation of interspike intervals
%
% isicorr = isiserialcorr(isivec, maxlag)
%
% Arguments:
% isivec: vector of interspike intervals in seconds
% maxlag: the maximum lag in seconds
% maxlag: the maximum lag
%
% Returns:
% isicorr: vector with the serial correlations for lag 0 to maxlag
% lags: vector with the lags corresponding to isicorr
lags = 0:maxlag;
isicorr = zeros(size(lags));
for k = 1:length(lags)
@ -21,14 +21,4 @@ function isicorr = isiserialcorr(isivec, maxlag)
isicorr(k) = corr(isivec(1:end-lag), isivec(lag+1:end));
end
end
if nargout == 0
% plot:
plot(lags, isicorr, '-b');
hold on;
scatter(lags, isicorr, 50.0, 'b', 'filled');
hold off;
xlabel('Lag k')
ylabel('\rho_k')
end
end

View File

@ -0,0 +1,16 @@
function isicorr = plotisiserialcorr(isivec, maxlag)
% plot serial correlation of interspike intervals
%
% plotisiserialcorr(isivec, maxlag)
%
% Arguments:
% isivec: vector of interspike intervals in seconds
% maxlag: the maximum lag
[isicorr, lags] = isiserialcorr(isivec, maxlag);
plot(lags, isicorr, '-b');
hold on;
scatter(lags, isicorr, 20.0, 'b', 'filled');
hold off;
xlabel('Lag k')
ylabel('\rho_k')
end

View File

@ -2,21 +2,35 @@ function rasterplot(spikes, tmax)
% Display a spike raster of the spike times given in spikes.
%
% rasterplot(spikes, tmax)
%
% Arguments:
% spikes: a cell array of vectors of spike times in seconds
% tmax: plot spike raster upto tmax seconds
% tmax: plot spike raster up to tmax seconds
in_msecs = tmax < 1.5
spiketimes = [];
trials = [];
ntrials = length(spikes);
for k = 1:ntrials
times = spikes{k};
times = times(times<tmax);
if tmax < 1.5
times = 1000.0*times; % conversion to ms
end
for i = 1:length( times )
line([times(i) times(i)],[k-0.4 k+0.4], 'Color', 'k');
if in_msecs
times = 1000.0*times; % conversion to ms
end
% (x,y) pairs for start and stop of stroke
% plus nan separating strokes:
spiketimes = [spiketimes, ...
[times(:)'; times(:)'; times(:)'*nan]];
trials = [trials, ...
[ones(1, length(times)) * (k-0.4); ...
ones(1, length(times)) * (k+0.4); ...
ones(1, length(times)) * nan]];
end
if tmax < 1.5
% convert matrices into simple vectors of (x,y) pairs:
spiketimes = spiketimes(:);
trials = trials(:);
% plotting this is lightning fast:
plot(spiketimes, trials, 'k')
if in_msecs
xlabel('Time [ms]');
xlim([0.0 1000.0*tmax]);
else

View File

@ -5,8 +5,8 @@ from plotstyle import *
rate = 20.0
trials = 10
duration = 200.0
trials = 20
duration = 100.0
def hompoisson(rate, trials, duration) :
@ -28,7 +28,7 @@ def plot_count_hist(ax, spikes, win, pmax):
counts.extend(c)
cb = np.arange(0.0, 15.5, 1.0)
h, b = np.histogram(counts, cb, density=True)
ax.bar(b[:-1], h, bar_fac, **fsA)
ax.bar(b[:-1], h, bar_fac, align='center', **fsA)
ax.plot(cb, poisson.pmf(cb, rate*win), **lsBm)
ax.plot(cb, poisson.pmf(cb, rate*win), **psBm)
ax.text(0.9, 0.9, 'T=%.0fms' % (1000.0*win), transform=ax.transAxes, ha='right')
@ -44,6 +44,8 @@ if __name__ == "__main__":
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.subplots_adjust(**adjust_fs(fig, top=0.5, right=1.5))
plot_count_hist(ax1, spikes, 0.02, 0.7)
plot_count_hist(ax2, spikes, 0.2, 0.25)
ax1.set_yticks(np.arange(0.0, 0.7, 0.2))
plot_count_hist(ax2, spikes, 0.2, 0.22)
ax2.set_yticks(np.arange(0.0, 0.25, 0.1))
plt.savefig('countexamples.pdf')
plt.close()

View File

@ -76,32 +76,53 @@ def plot_count_fano(ax1, ax2, spikes):
counts.extend(c)
mean_counts[k] = np.mean(counts)
var_counts[k] = np.var(counts)
ax1.plot(mean_counts, var_counts, **lsA)
ax1.plot(mean_counts, var_counts, zorder=100, **lsA)
ax1.set_xlabel('Mean count')
ax1.set_xlim(0.0, 20.0)
ax1.set_ylim(0.0, 20.0)
ax1.set_xticks(np.arange(0.0, 21.0, 10.0))
ax1.set_yticks(np.arange(0.0, 21.0, 10.0))
ax2.plot(1000.0*wins, var_counts/mean_counts, **lsB)
ax2.set_xlabel('Window', 'ms')
ax2.set_ylim(0.0, 1.2)
ax2.set_xscale('log')
ax2.set_xticks([10, 100, 1000])
ax2.set_xticklabels(['10', '100', '1000'])
ax2.xaxis.set_minor_locator(mpt.NullLocator())
ax2.set_yticks(np.arange(0.0, 1.2, 0.5))
def plot_fano(ax, spikes):
wins = np.logspace(-2, 0.0, 200)
mean_counts = np.zeros(len(wins))
var_counts = np.zeros(len(wins))
for k, win in enumerate(wins):
counts = []
for times in spikes:
c, _ = np.histogram(times, np.arange(0.0, duration, win))
counts.extend(c)
mean_counts[k] = np.mean(counts)
var_counts[k] = np.var(counts)
ax.plot(1000.0*wins, var_counts/mean_counts, **lsB)
ax.set_xlabel('Window', 'ms')
ax.set_ylim(0.0, 1.2)
ax.set_xscale('log')
ax.set_xticks([10, 100, 1000])
ax.set_xticklabels(['10', '100', '1000'])
ax.xaxis.set_minor_locator(mpt.NullLocator())
ax.set_yticks(np.arange(0.0, 1.2, 0.5))
if __name__ == "__main__":
homspikes = hompoisson(rate, trials, duration)
inhspikes = oupifspikes(rate, trials, duration, dt, 0.3, drate, tau)
fig, axs = plt.subplots(2, 2)
fig.subplots_adjust(**adjust_fs(fig, top=0.5, right=1.5))
plot_count_fano(axs[0,0], axs[0,1], homspikes)
axs[0,0].text(0.1, 0.95, 'Poisson', transform=axs[0,0].transAxes)
axs[0,0].set_xlabel('')
axs[0,1].set_xlabel('')
axs[0,0].xaxis.set_major_formatter(mpt.NullFormatter())
axs[0,1].xaxis.set_major_formatter(mpt.NullFormatter())
plot_count_fano(axs[1,0], axs[1,1], inhspikes)
axs[1,0].text(0.1, 0.95, 'OU noise', transform=axs[1,0].transAxes)
fig.text(0.01, 0.58, 'Count variance', va='center', rotation='vertical')
fig.text(0.53, 0.58, 'Fano factor', va='center', rotation='vertical')
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.subplots_adjust(**adjust_fs(fig, top=0.5, right=2.0))
plot_fano(ax1, homspikes)
ax1.set_ylabel('Fano factor')
ax1.text(0.1, 0.95, 'Poisson', transform=ax1.transAxes)
plot_fano(ax2, inhspikes)
ax2.axhline(1.0, **lsGrid)
ax2.text(0.1, 0.95, 'OU noise', transform=ax2.transAxes)
plt.savefig('fanoexamples.pdf')
plt.close()

View File

@ -93,6 +93,8 @@ def plot_hom_isih(ax):
ax.set_ylim(0.0, 31.0)
ax.set_xticks(np.arange(0.0, 151.0, 50.0))
ax.set_yticks(np.arange(0.0, 31.0, 10.0))
tt = np.linspace(0.0, 0.15, 100)
ax.plot(1000.0*tt, rate*np.exp(-rate*tt), **lsB)
plotisih(ax, isis(homspikes), 0.005)

View File

@ -13,9 +13,9 @@ information. Analyzing the statistics of spike times and their
relation to sensory stimuli or motor actions is central to
neuroscientific research. With multi-electrode arrays it is nowadays
possible to record from hundreds or even thousands of neurons
simultaneously. The open challenge is how to analyze such data sets in
order to understand how neural systems work. Let's start with the
basics in this chapter.
simultaneously. The open challenge is how to analyze such huge data
sets in smart ways in order to gain insights into the way neural
systems work. Let's start with the basics in this chapter.
The result of the pre-processing of electrophysiological recordings
are series of spike times, which are termed \enterm[spike train]{spike
@ -27,15 +27,16 @@ process]{Punktprozess}{point processes}.
\begin{figure}[bt]
\includegraphics[width=1\textwidth]{rasterexamples}
\titlecaption{\label{rasterexamplesfig}Raster plot.}{Raster plots of
ten trials of data illustrating the times of action
potentials. Each vertical stroke illustrates the time at which an
action potential was observed. Each row displays the events of one
trial. Shown is a stationary point process (homogeneous point
process with a rate $\lambda=20$\;Hz, left) and an non-stationary
point process with a rate that varies in time (noisy perfect
integrate-and-fire neuron driven by Ohrnstein-Uhlenbeck noise with
a time-constant $\tau=100$\,ms, right).}
\titlecaption{\label{rasterexamplesfig}Raster plots of spike
trains.}{Raster plots of ten trials of data illustrating the times
of action potentials. Each vertical stroke illustrates the time at
which an action potential was observed. Each row displays the
events of one trial. Shown is a stationary point process
(homogeneous point process with a rate $\lambda=20$\;Hz, left) and
an non-stationary point process with a rate that varies in time
(noisy perfect integrate-and-fire neuron driven by
Ornstein-Uhlenbeck noise with a time-constant $\tau=100$\,ms,
right).}
\end{figure}
@ -59,9 +60,10 @@ process]{Punktprozess}{point processes}.
\begin{figure}[tb]
\includegraphics{pointprocesssketch}
\titlecaption{\label{pointprocesssketchfig} Statistics of point
processes.}{A point process is a sequence of instances in time
$t_i$ that can be also characterized by inter-event intervals
$T_i=t_{i+1}-t_i$ and event counts $n_i$.}
processes.}{A temporal point process is a sequence of events in
time, $t_i$, that can be also characterized by the corresponding
inter-event intervals $T_i=t_{i+1}-t_i$ and event counts $n_i$,
i.e. the number of events that occurred so far.}
\end{figure}
\noindent
@ -77,30 +79,108 @@ plot in which each vertical line indicates the time of an event. The
event from two different point processes are shown in
\figref{rasterexamplesfig}. In addition to the event times, point
processes can be described using the intervals $T_i=t_{i+1}-t_i$
between successive events or the number of observed events within a
certain time window $n_i$ (\figref{pointprocesssketchfig}).
\begin{exercise}{rasterplot.m}{}
Implement a function \varcode{rasterplot()} that displays the times of
action potentials within the first \varcode{tmax} seconds in a raster
plot. The spike times (in seconds) recorded in the individual trials
are stored as vectors of times within a cell array.
\end{exercise}
between successive events or the number of observed events $n_i$
within a certain time window (\figref{pointprocesssketchfig}).
In \enterm[point process!stationary]{stationary} point processes the
statistics does not change over time. In particular, the rate of the
process, the number of events per time, is constant. The homogeneous
point process shown in \figref{rasterexamplesfig} on the left is an
example of a stationary point process. Although locally within each
trial there are regions with many events per time and others with long
intervals between events, the average number of events within a small
time window over trials does not change in time. In the first sections
of this chapter we introduce various statistics for characterizing
stationary point processes.
On the other hand, the example shown in \figref{rasterexamplesfig} on
the right is a non-stationary point process. The rate of the events in
all trials first decreases and then increases again. This common
change in rate may have been caused by some sensory input to the
neuron. How to estimate temporal changes in event rates (firing rates)
is covered in section~\ref{nonstationarysec}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Homogeneous Poisson process}
Before we are able to start analyzing point processes, we need some
data. As before, because we are working with computers, we can easily
simulate them. To get us started we here briefly introduce the
homogeneous Poisson process. The Poisson process is to point processes
what the Gaussian distribution is to the statistics of real-valued
data. It is a standard against which everything is compared.
In a \entermde[Poisson
process!homogeneous]{Poissonprozess!homogener}{homogeneous Poisson
process} events at every given time, that is within every small time
window of width $\Delta t$ occur with the same constant
probability. This probability is independent of absolute time and
independent of any events occurring before. To observe an event right
after an event is as likely as to observe an event at some specific
time later on.
The simplest way to simulate events of a homogeneous Poisson process
is based on the exponential distribution \eqref{hompoissonexponential}
of event intervals. We randomly draw intervals from this distribution
and then sum them up to convert them to event times. The only
parameter of a homogeneous Poisson process is its rate. It defines how
many events per time are expected.
Here is a function that generates several trials of a homogeneous
Poisson process:
\lstinputlisting[caption={hompoissonspikes.m}]{hompoissonspikes.m}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Raster plot}
Let's generate some events with this function and display them in a
raster plot as in \figref{rasterexamplesfig}. For the raster plot we
need to draw for each event a line at the corresponding time of the
event and at the height of the corresponding trial. You can go with a
one for-loop through the trials and then with another for-loop through
the event times and every time plot a two-point line. This, however,
is very slow. The fastest way is to concatenate the coordinates of all
strokes into large vectors, separate the events by \varcode{nan}
entries, and pass this to a single call of \varcode{plot()}:
\pageinputlisting[caption={rasterplot.m}]{rasterplot.m}
Adapt this function to your needs and use it where ever possible to
illustrate your spike train data to your readers. They appreciate
seeing your raw data and being able to judge the data for themselves
before you go on analyzing firing rates, interspike-interval
correlations, etc.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Interval statistics}
The intervals $T_i=t_{i+1}-t_i$ between successive events are real
positive numbers. In the context of action potentials they are
referred to as \entermde{Interspikeintervalle}{interspike
intervals}. The statistics of interspike intervals are described
using common measures for describing the statistics of stochastic
real-valued variables:
Let's start with the interval statistics of stationary point
processes. The intervals $T_i=t_{i+1}-t_i$ between successive events
are real positive numbers. In the context of action potentials they
are referred to as \entermde[interspike
interval]{Interspikeintervall}{interspike intervals}, in short
\entermde[ISI|see{interspike
interval}]{ISI|see{Interspikeintervall}}{ISI}s. For analyzing event
intervals we can use all the usual statistics that we know from
describing univariate data sets of real numbers:
\begin{figure}[t]
\includegraphics[width=0.96\textwidth]{isihexamples}\vspace{-2ex}
\includegraphics[width=0.96\textwidth]{isihexamples}
\titlecaption{\label{isihexamplesfig}Interspike-interval
histograms}{of the spike trains shown in \figref{rasterexamplesfig}.}
histograms}{of the spike trains shown in
\figref{rasterexamplesfig}. The intervals of the homogeneous
Poisson process (left) are exponentially distributed according to
\eqnref{hompoissonexponential} (red). Typically for many sensory
neurons that fire more regularly is an ISI histogram like the one
shown on the right. There is a preferred interval where the
distribution peaks. The distribution falls off quickly towards
smaller intervals, and very small intervals are absent, probably
because of refractoriness of the spike generator. The tail of the
distribution again approaches an exponential distribution as for
the Poisson process.}
\end{figure}
\begin{exercise}{isis.m}{}
@ -110,44 +190,99 @@ real-valued variables:
trial are stored as vectors within a cell-array.
\end{exercise}
%\subsection{First order interval statistics}
\begin{itemize}
\item Probability density $p(T)$ of the intervals $T$
(\figref{isihexamplesfig}). Normalized to $\int_0^{\infty} p(T) \; dT
= 1$.
\item Average interval: $\mu_{ISI} = \langle T \rangle =
\frac{1}{n}\sum\limits_{i=1}^n T_i$.
\item Standard deviation of the interspike intervals: $\sigma_{ISI} = \sqrt{\langle (T - \langle T
\rangle)^2 \rangle}$\vspace{1ex}
\item \entermde[coefficient of variation]{Variationskoeffizient}{Coefficient of variation}:
$CV_{ISI} = \frac{\sigma_{ISI}}{\mu_{ISI}}$.
\item \entermde[diffusion coefficient]{Diffusionskoeffizient}{Diffusion coefficient}: $D_{ISI} =
\frac{\sigma_{ISI}^2}{2\mu_{ISI}^3}$.
(\figref{isihexamplesfig}). Normalized to $\int_0^{\infty} p(T) \;
dT = 1$. Commonly referred to as the \enterm[interspike
interval!histogram]{interspike interval histogram}. Its shape
reveals many interesting aspects like locking or bursting that
cannot be inferred from the mean or standard deviation. A particular
reference is the exponential distribution of intervals
\begin{equation}
\label{hompoissonexponential}
p_{exp}(T) = \lambda e^{-\lambda T}
\end{equation}
of a homogeneous Poisson spike train with rate $\lambda$.
\item Mean interval
\begin{equation}
\label{meanisi}
\mu_{ISI} = \langle T \rangle = \frac{1}{n}\sum_{i=1}^n T_i
\end{equation}
The average time it takes from one event to the next. For stationary
point processes the inverse of the mean interval is identical with
the mean rate $\lambda$ (number of events per time, see below) of
the process.
\item Standard deviation of intervals
\begin{equation}
\label{stdisi}
\sigma_{ISI} = \sqrt{\langle (T - \langle T \rangle)^2 \rangle}
\end{equation}
Periodically spiking neurons have little variability in their
intervals, whereas many cortical neurons cover a wide range with
their intervals. The standard deviation of homogeneous Poisson spike
trains also equals the inverse rate. Whether the standard deviation
of intervals is low or high, however, is better quantified by the
\item \entermde[coefficient of
variation]{Variationskoeffizient}{Coefficient of variation}, the
standard deviation of the ISIs relative to their mean:
\begin{equation}
\label{cvisi}
CV_{ISI} = \frac{\sigma_{ISI}}{\mu_{ISI}}
\end{equation}
Homogeneous Poisson spike trains have an $CV$ of exactly one. The
lower the $CV$ the more regularly a neuron is firing. $CV$s larger than
one are also possible in spike trains with small intervals separated
by really long ones.
%\item \entermde[diffusion coefficient]{Diffusionskoeffizient}{Diffusion coefficient}: $D_{ISI} =
% \frac{\sigma_{ISI}^2}{2\mu_{ISI}^3}$.
\end{itemize}
\begin{exercise}{isihist.m}{}
Implement a function \varcode{isiHist()} that calculates the normalized
interspike interval histogram. The function should take two input
arguments; (i) a vector of interspike intervals and (ii) the width
of the bins used for the histogram. It further returns the
Implement a function \varcode{isihist()} that calculates the
normalized interspike interval histogram. The function should take
two input arguments; (i) a vector of interspike intervals and (ii)
the width of the bins used for the histogram. It returns the
probability density as well as the centers of the bins.
\end{exercise}
\begin{exercise}{plotisihist.m}{}
Implement a function that takes the return values of
\varcode{isiHist()} as input arguments and then plots the data. The
Implement a function that takes the returned values of
\varcode{isihist()} as input arguments and then plots the data. The
plot should show the histogram with the x-axis scaled to
milliseconds and should be annotated with the average ISI, the
standard deviation and the coefficient of variation.
standard deviation, and the coefficient of variation of the ISIs
(\figref{isihexamplesfig}).
\end{exercise}
\subsection{Interval correlations}
So called \entermde[return map]{return map}{return maps} are used to
illustrate interdependencies between successive interspike
intervals. The return map plots the delayed interval $T_{i+k}$ against
the interval $T_i$. The parameter $k$ is called the \enterm{lag}
(\determ{Verz\"ogerung}) $k$. Stationary and non-stationary return
maps are distinctly different \figref{returnmapfig}.
Intervals are not just numbers without an order, like weights of
tigers. Intervals are temporally ordered and there could be temporal
structure in the sequence of intervals. For example, short intervals
could be followed by more longer ones, and vice versa. Such
dependencies in the sequence of intervals do not show up in the
interval histogram. We need additional measures to also quantify the
temporal structure of the sequence of intervals.
We can use the same techniques we know for visualizing and quantifying
correlations in bivariate data sets, i.e. scatter plots and
correlation coefficients. We form $(x,y)$ data pairs by taking the
series of intervals $T_i$ as $x$-data values and pairing them with the
$k$-th next intervals $T_{i+k}$ as $y$-data values. The parameter $k$
is called \enterm{lag} (\determ{Verz\"ogerung}). For lag one we pair
each interval with the next one. A \entermde{return map}{return map}
illustrates dependencies between successive intervals by simply
plotting the intervals $T_{i+k}$ against the intervals $T_i$ in a
scatter plot (\figref{returnmapfig}). For Poisson spike trains there
is no structure beyond the one expected from the exponential
interspike interval distribution, hinting at neighboring interspike
intervals being independent of each other. For the spike train based
on an Ornstein-Uhlenbeck process the return map is more clustered
along the diagonal, hinting at a positive correlation between
succeeding intervals. That is, short intervals are more likely to be
followed by short ones and long intervals more likely by long
ones. This temporal structure was already clearly visible in each
trial of the spike raster shown in \figref{rasterexamplesfig} on the
right.
\begin{figure}[tp]
\includegraphics[width=1\textwidth]{serialcorrexamples}
@ -165,22 +300,46 @@ maps are distinctly different \figref{returnmapfig}.
the mean firing rate of the spike trains.}
\end{figure}
Such dependencies can be further quantified using the
Such dependencies can be further quantified by
\entermde[correlation!serial]{Korrelation!serielle}{serial
correlations} \figref{returnmapfig}. The serial correlation is the
correlation coefficient of the intervals $T_i$ and the intervals
delayed by the lag $T_{i+k}$:
\[ \rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)}
= {\rm corr}(T_{i+k}, T_i) \] The resulting correlation coefficient
$\rho_k$ is usually plotted against the lag $k$
\figref{returnmapfig}. $\rho_0=1$ is the correlation of each interval
with itself and is always 1.
correlations}. These quantify the correlations between successive
intervals by Pearson's correlation coefficients between the intervals
$T_{i+k}$ and $T_i$ in dependence on lag $k$:
\begin{equation}
\label{serialcorrelation}
\rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)}
= {\rm corr}(T_{i+k}, T_i)
\end{equation}
The serial correlations $\rho_k$ are usually plotted against the lag
$k$ for a small range of lags (\figref{returnmapfig}). $\rho_0=1$ is
the correlation of each interval with itself and always equals one.
If the serial correlations all equal zero, $\rho_k =0$ for $k>0$, then
the length of an interval is independent of all the previous
ones. Such a process is a \enterm{renewal process}
(\determ{Erneuerungsprozess}). Each event, each action potential,
erases the history. The occurrence of the next event is independent of
what happened before. To a first approximation an action potential
erases all information about the past from the membrane voltage and
thus spike trains may approximate renewal processes.
However, other variables like the intracellular calcium concentration
or the states of slowly switching ion channels may carry information
from one interspike interval to the next and thus introduce
correlations between intervals. Such non-renewal dynamics is then
characterized by the non-zero serial correlations
(\figref{returnmapfig}).
\begin{exercise}{isiserialcorr.m}{}
Implement a function \varcode{isiserialcorr()} that takes a vector of
interspike intervals as input argument and calculates the serial
correlation. The function should further plot the serial
correlation.
Implement a function \varcode{isiserialcorr()} that takes a vector
of interspike intervals as input argument and calculates the serial
correlations up to some maximum lag.
\end{exercise}
\begin{exercise}{plotisiserialcorr.m}{}
Implement a function \varcode{plotisiserialcorr()} that takes a
vector of interspike intervals as input argument and generates a
plot of the serial correlations.
\end{exercise}
@ -189,11 +348,11 @@ with itself and is always 1.
\begin{figure}[t]
\includegraphics{countexamples}
\titlecaption{\label{countstatsfig}Count statistics.}{The
distribution of the number of events $k$ (blue) counted within
windows of 20\,ms (left) or 200\,ms duration (right) of the
homogeneous Poisson spike train with a rate of 20\,Hz shown in
\figref{rasterexamplesfig}. For Poisson spike trains these
\titlecaption{\label{countstatsfig}Count statistics.}{Probability
distributions of counting $k$ events $k$ (blue) within windows of
20\,ms (left) or 200\,ms duration (right) of a homogeneous Poisson
spike train with a rate of 20\,Hz
(\figref{rasterexamplesfig}). For Poisson spike trains these
distributions are given by Poisson distributions (red).}
\end{figure}
@ -204,7 +363,7 @@ is the average number of spikes counted within some time interval $W$
\label{firingrate}
r = \frac{\langle n \rangle}{W}
\end{equation}
and is neasured in Hertz. The average of the spike counts is taken
and is measured in Hertz. The average of the spike counts is taken
over trials. For stationary spike trains (no change in statistics, in
particular the firing rate, over time), the firing rate based on the
spike count equals the inverse average interspike interval
@ -216,28 +375,44 @@ split into many segments $i$, each of duration $W$, and the number of
events $n_i$ in each of the segments can be counted. The integer event
counts can be quantified in the usual ways:
\begin{itemize}
\item Histogram of the counts $n_i$ (\figref{countstatsfig}).
\item Histogram of the counts $n_i$ appropriately normalized to
probability distributions. For homogeneous Poisson spike trains with
rate $\lambda$ the resulting probability distributions follow a
Poisson distribution (\figref{countstatsfig}), where the probability
of counting $k$ events within a time window $W$ is given by
\begin{equation}
\label{poissondist}
P(k) = \frac{(\lambda W)^k e^{\lambda W}}{k!}
\end{equation}
\item Average number of counts: $\mu_n = \langle n \rangle$.
\item Variance of counts:
$\sigma_n^2 = \langle (n - \langle n \rangle)^2 \rangle$.
\end{itemize}
Because spike counts are unitless and positive numbers, the
Because spike counts are unitless and positive numbers the
\begin{itemize}
\item \entermde{Fano Faktor}{Fano factor} (variance of counts divided
by average count): $F = \frac{\sigma_n^2}{\mu_n}$.
by average count)
\begin{equation}
\label{fano}
F = \frac{\sigma_n^2}{\mu_n}
\end{equation}
is a commonly used measure for quantifying the variability of event
counts relative to the mean number of events. In particular for
homogeneous Poisson processes the Fano factor equals one,
independently of the time window $W$.
\end{itemize}
is an additional measure quantifying event counts.
Note that all of these statistics depend on the chosen window length
$W$. The average spike count, for example, grows linearly with $W$ for
sufficiently large time windows: $\langle n \rangle = r W$,
\eqnref{firingrate}. Doubling the counting window doubles the spike
count. As does the spike-count variance (\figref{fanofig}). At smaller
time windows the statistics of the event counts might depend on the
particular duration of the counting window. There might be an optimal
time window for which the variance of the spike count is minimal. The
Fano factor plotted as a function of the time window illustrates such
properties of point processes (\figref{fanofig}).
Note that all of these statistics depend in general on the chosen
window length $W$. The average spike count, for example, grows
linearly with $W$ for sufficiently large time windows: $\langle n
\rangle = r W$, \eqnref{firingrate}. Doubling the counting window
doubles the spike count. As does the spike-count variance
(\figref{fanofig}). At smaller time windows the statistics of the
event counts might depend on the particular duration of the counting
window. There might be an optimal time window for which the variance
of the spike count is minimal. The Fano factor plotted as a function
of the time window illustrates such properties of point processes
(\figref{fanofig}).
This also has consequences for information transmission in neural
systems. The lower the variance in spike count relative to the
@ -246,126 +421,33 @@ information encoded in the mean spike count is transmitted.
\begin{figure}[t]
\includegraphics{fanoexamples}
\titlecaption{\label{fanofig}
Count variance and Fano factor.}{Variance of event counts as a
function of mean counts obtained by varying the duration of the
count window (left). Dividing the count variance by the respective
mean results in the Fano factor that can be plotted as a function
of the count window (right). For Poisson spike trains the variance
always equals the mean counts and consequently the Fano factor
equals one irrespective of the count window (top). A spike train
with positive correlations between interspike intervals (caused by
Ohrnstein-Uhlenbeck noise) has a minimum in the Fano factor, that
\titlecaption{\label{fanofig} Fano factor.}{Counting events in time
windows of given duration and then dividing the variance of the
counts by their mean results in the Fano factor. Here, the Fano
factor is plotted as a function of the duration of the window used
to count events. For Poisson spike trains the variance always
equals the mean counts and consequently the Fano factor equals one
irrespective of the count window (left). A spike train with
positive correlations between interspike intervals (caused by an
Ornstein-Uhlenbeck process) has a minimum in the Fano factor, that
is an analysis window for which the relative count variance is
minimal somewhere close to the correlation time scale of the
interspike intervals (bottom).}
interspike intervals (right).}
\end{figure}
\begin{exercise}{counthist.m}{}
Implement a function \varcode{counthist()} that calculates and plots
the distribution of spike counts observed in a certain time
window. The function should take two input arguments: (i) a
cell-array of vectors containing the spike times in seconds observed
in a number of trials, and (ii) the duration of the time window that
is used to evaluate the counts.
\end{exercise}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Homogeneous Poisson process}
The Gaussian distribution is, because of the central limit theorem,
the standard distribution for continuous measures. The equivalent in
the realm of point processes is the
\entermde[distribution!Poisson]{Verteilung!Poisson-}{Poisson distribution}.
In a \entermde[Poisson process!homogeneous]{Poissonprozess!homogener}{homogeneous Poisson
process} the events occur at a fixed rate $\lambda=\text{const}$ and
are independent of both the time $t$ and occurrence of previous events
(\figref{hompoissonfig}). The probability of observing an event within
a small time window of width $\Delta t$ is given by
\begin{equation}
\label{hompoissonprob}
P = \lambda \cdot \Delta t \; .
\end{equation}
In an \entermde[Poisson process!inhomogeneous]{Poissonprozess!inhomogener}{inhomogeneous Poisson
process}, however, the rate $\lambda$ depends on time: $\lambda =
\lambda(t)$.
\begin{exercise}{poissonspikes.m}{}
Implement a function \varcode{poissonspikes()} that uses a homogeneous
Poisson process to generate events at a given rate for a certain
duration and a number of trials. The rate should be given in Hertz
and the duration of the trials is given in seconds. The function
should return the event times in a cell-array. Each entry in this
array represents the events observed in one trial. Apply
\eqnref{hompoissonprob} to generate the event times.
\end{exercise}
\begin{figure}[t]
\includegraphics[width=1\textwidth]{poissonraster100hz}
\titlecaption{\label{hompoissonfig}Rasterplot of spikes of a
homogeneous Poisson process with a rate $\lambda=100$\,Hz.}{}
\end{figure}
\begin{figure}[t]
\includegraphics[width=0.45\textwidth]{poissonisihexp20hz}\hfill
\includegraphics[width=0.45\textwidth]{poissonisihexp100hz}
\titlecaption{\label{hompoissonisihfig}Distribution of interspike
intervals of two Poisson processes.}{The processes differ in their
rate (left: $\lambda=20$\,Hz, right: $\lambda=100$\,Hz). The red
lines indicate the corresponding exponential interval distribution
\eqnref{poissonintervals}.}
\end{figure}
The homogeneous Poisson process has the following properties:
\begin{itemize}
\item Intervals $T$ are exponentially distributed (\figref{hompoissonisihfig}):
\begin{equation}
\label{poissonintervals}
p(T) = \lambda e^{-\lambda T} \; .
\end{equation}
\item The average interval is $\mu_{ISI} = \frac{1}{\lambda}$ .
\item The variance of the intervals is $\sigma_{ISI}^2 = \frac{1}{\lambda^2}$ .
\item Thus, the coefficient of variation is always $CV_{ISI} = 1$ .
\item The serial correlation is $\rho_k =0$ for $k>0$, since the
occurrence of an event is independent of all previous events. Such a
process is also called a \enterm{renewal process} (\determ{Erneuerungsprozess}).
\item The number of events $k$ within a temporal window of duration
$W$ is Poisson distributed:
\begin{equation}
\label{poissoncounts}
P(k) = \frac{(\lambda W)^ke^{\lambda W}}{k!}
\end{equation}
(\figref{hompoissoncountfig})
\item The Fano Faktor is always $F=1$ .
\end{itemize}
\begin{exercise}{hompoissonspikes.m}{}
Implement a function \varcode{hompoissonspikes()} that uses a
homogeneous Poisson process to generate spike events at a given rate
for a certain duration and a number of trials. The rate should be
given in Hertz and the duration of the trials is given in
seconds. The function should return the event times in a
cell-array. Each entry in this array represents the events observed
in one trial. Apply \eqnref{poissonintervals} to generate the event
times.
window. The function should take two input arguments: a cell-array
of vectors containing the spike times in seconds observed in a
number of trials, and the duration of the time window that is used
to evaluate the counts.
\end{exercise}
\begin{figure}[t]
\includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz10ms}\hfill
\includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz100ms}
\titlecaption{\label{hompoissoncountfig}Distribution of counts of a
Poisson spike train.}{The count statistics was generated for two
different windows of width $W=10$\,ms (left) and width $W=100$\,ms
(right). The red line illustrates the corresponding Poisson
distribution \eqnref{poissoncounts}.}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Time-dependent firing rate}
\label{nonstationarysec}
So far we have discussed stationary spike trains. The statistical properties
of these did not change within the observation time (stationary point
@ -533,6 +615,7 @@ relevate time-scale.
\end{exercise}
\section{Spike-triggered Average}
\label{stasec}
The graphical representation of the neuronal activity alone is not
sufficient tot investigate the relation between the neuronal response

View File

@ -85,7 +85,7 @@ def plot_inhomogeneous_spikes(ax):
if __name__ == "__main__":
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=cm_size(figure_width, 0.5*figure_width))
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.subplots_adjust(**adjust_fs(fig, left=4.0, right=1.0, top=1.2))
plot_homogeneous_spikes(ax1)
plot_inhomogeneous_spikes(ax2)

View File

@ -116,9 +116,10 @@
\includechapter{pointprocesses}
% add a chapter on simulating point-processes/spike trains
% (Poisson spike trains, integrate-and-fire models)
% (Poisson spike trains, LMP models, integrate-and-fire models, full HH like models)
% add chapter on information theory, mutual information, stimulus reconstruction, coherence
% move STA here!
% add a chapter on Bayesian inference (the Neuroscience of it and a
% bit of application for statistical problems).