From bd610a9b1dbafdc9375cd0027bca041dc7f2ceea Mon Sep 17 00:00:00 2001 From: Jan Benda Date: Sat, 16 Jan 2021 10:50:27 +0100 Subject: [PATCH 1/3] [pointprocesses] imrpoved count and fano plots --- pointprocesses/lecture/countexamples.py | 10 ++++++---- pointprocesses/lecture/fanoexamples.py | 11 ++++++++--- pointprocesses/lecture/pointprocesses.tex | 10 +++++----- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/pointprocesses/lecture/countexamples.py b/pointprocesses/lecture/countexamples.py index 7e0f98f..8c8a8f3 100644 --- a/pointprocesses/lecture/countexamples.py +++ b/pointprocesses/lecture/countexamples.py @@ -5,8 +5,8 @@ from plotstyle import * rate = 20.0 -trials = 10 -duration = 200.0 +trials = 20 +duration = 100.0 def hompoisson(rate, trials, duration) : @@ -28,7 +28,7 @@ def plot_count_hist(ax, spikes, win, pmax): counts.extend(c) cb = np.arange(0.0, 15.5, 1.0) h, b = np.histogram(counts, cb, density=True) - ax.bar(b[:-1], h, bar_fac, **fsA) + ax.bar(b[:-1], h, bar_fac, align='center', **fsA) ax.plot(cb, poisson.pmf(cb, rate*win), **lsBm) ax.plot(cb, poisson.pmf(cb, rate*win), **psBm) ax.text(0.9, 0.9, 'T=%.0fms' % (1000.0*win), transform=ax.transAxes, ha='right') @@ -44,6 +44,8 @@ if __name__ == "__main__": fig, (ax1, ax2) = plt.subplots(1, 2) fig.subplots_adjust(**adjust_fs(fig, top=0.5, right=1.5)) plot_count_hist(ax1, spikes, 0.02, 0.7) - plot_count_hist(ax2, spikes, 0.2, 0.25) + ax1.set_yticks(np.arange(0.0, 0.7, 0.2)) + plot_count_hist(ax2, spikes, 0.2, 0.22) + ax2.set_yticks(np.arange(0.0, 0.25, 0.1)) plt.savefig('countexamples.pdf') plt.close() diff --git a/pointprocesses/lecture/fanoexamples.py b/pointprocesses/lecture/fanoexamples.py index dc68c0e..228b473 100644 --- a/pointprocesses/lecture/fanoexamples.py +++ b/pointprocesses/lecture/fanoexamples.py @@ -76,23 +76,27 @@ def plot_count_fano(ax1, ax2, spikes): counts.extend(c) mean_counts[k] = np.mean(counts) var_counts[k] = np.var(counts) - ax1.plot(mean_counts, var_counts, **lsA) + ax1.plot(mean_counts, var_counts, zorder=100, **lsA) ax1.set_xlabel('Mean count') ax1.set_xlim(0.0, 20.0) ax1.set_ylim(0.0, 20.0) + ax1.set_xticks(np.arange(0.0, 21.0, 10.0)) + ax1.set_yticks(np.arange(0.0, 21.0, 10.0)) ax2.plot(1000.0*wins, var_counts/mean_counts, **lsB) ax2.set_xlabel('Window', 'ms') ax2.set_ylim(0.0, 1.2) ax2.set_xscale('log') ax2.set_xticks([10, 100, 1000]) ax2.set_xticklabels(['10', '100', '1000']) + ax2.xaxis.set_minor_locator(mpt.NullLocator()) + ax2.set_yticks(np.arange(0.0, 1.2, 0.5)) if __name__ == "__main__": homspikes = hompoisson(rate, trials, duration) inhspikes = oupifspikes(rate, trials, duration, dt, 0.3, drate, tau) fig, axs = plt.subplots(2, 2) - fig.subplots_adjust(**adjust_fs(fig, top=0.5, right=1.5)) + fig.subplots_adjust(**adjust_fs(fig, top=0.5, right=2.0)) plot_count_fano(axs[0,0], axs[0,1], homspikes) axs[0,0].text(0.1, 0.95, 'Poisson', transform=axs[0,0].transAxes) axs[0,0].set_xlabel('') @@ -100,8 +104,9 @@ if __name__ == "__main__": axs[0,0].xaxis.set_major_formatter(mpt.NullFormatter()) axs[0,1].xaxis.set_major_formatter(mpt.NullFormatter()) plot_count_fano(axs[1,0], axs[1,1], inhspikes) + axs[1,1].axhline(1.0, **lsGrid) axs[1,0].text(0.1, 0.95, 'OU noise', transform=axs[1,0].transAxes) fig.text(0.01, 0.58, 'Count variance', va='center', rotation='vertical') - fig.text(0.53, 0.58, 'Fano factor', va='center', rotation='vertical') + fig.text(0.51, 0.58, 'Fano factor', va='center', rotation='vertical') plt.savefig('fanoexamples.pdf') plt.close() diff --git a/pointprocesses/lecture/pointprocesses.tex b/pointprocesses/lecture/pointprocesses.tex index d13d0ab..65947c3 100644 --- a/pointprocesses/lecture/pointprocesses.tex +++ b/pointprocesses/lecture/pointprocesses.tex @@ -189,11 +189,11 @@ with itself and is always 1. \begin{figure}[t] \includegraphics{countexamples} - \titlecaption{\label{countstatsfig}Count statistics.}{The - distribution of the number of events $k$ (blue) counted within - windows of 20\,ms (left) or 200\,ms duration (right) of the - homogeneous Poisson spike train with a rate of 20\,Hz shown in - \figref{rasterexamplesfig}. For Poisson spike trains these + \titlecaption{\label{countstatsfig}Count statistics.}{Probability + distributions of counting $k$ events $k$ (blue) within windows of + 20\,ms (left) or 200\,ms duration (right) of a homogeneous Poisson + spike train with a rate of 20\,Hz + (\figref{rasterexamplesfig}). For Poisson spike trains these distributions are given by Poisson distributions (red).} \end{figure} From 0f0dfafd56f6eec2c0683a8f54f10f31d4132a78 Mon Sep 17 00:00:00 2001 From: Jan Benda Date: Sun, 17 Jan 2021 23:57:06 +0100 Subject: [PATCH 2/3] [pointprocesses] better incorporated Poisson spike train --- header.tex | 19 +- pointprocesses/lecture/isihexamples.py | 2 + pointprocesses/lecture/pointprocesses.tex | 495 ++++++++++++---------- 3 files changed, 283 insertions(+), 233 deletions(-) diff --git a/header.tex b/header.tex index 2f2360d..73288b8 100644 --- a/header.tex +++ b/header.tex @@ -222,11 +222,26 @@ {\vspace{-2ex}\lstset{#1}\noindent\minipage[t]{1\linewidth}}% {\endminipage} -%%%%% english, german, code and file terms: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%% english and german terms: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{ifthen} +% \enterm[en-index]{term}: Typeset term and add it to the index of english terms +% +% \determ[de-index]{term}: Typeset term and add it to the index of german terms +% +% \entermde[en-index]{de-index}{term}: Typeset term and add it to the index of english terms +% and de-index to the index of german terms +% +% how to specificy an index: +% \enterm{term} - just put term into the index +% \enterm[en-index]{term} - typeset term and put en-index into the index +% \enterm[statistics!mean]{term} - mean is a subentry of statistics +% \enterm[statistics!average|see{statistics!mean}]{term} - cross reference to statistics mean +% \enterm[statistics@\textbf{statistics}]{term} - put index at statistics but use +% \textbf{statistics} for typesetting in the index + % \enterm[english index entry]{} -% typeset the term in italics and add it (or the optional argument) to +% typeset the term in italics and add it (or rather the optional argument) to % the english index. \newcommand{\enterm}[2][]{\textit{#2}\ifthenelse{\equal{#1}{}}{\protect\sindex[enterm]{#2}}{\protect\sindex[enterm]{#1}}} diff --git a/pointprocesses/lecture/isihexamples.py b/pointprocesses/lecture/isihexamples.py index 9d0f7c0..c3e2d87 100644 --- a/pointprocesses/lecture/isihexamples.py +++ b/pointprocesses/lecture/isihexamples.py @@ -93,6 +93,8 @@ def plot_hom_isih(ax): ax.set_ylim(0.0, 31.0) ax.set_xticks(np.arange(0.0, 151.0, 50.0)) ax.set_yticks(np.arange(0.0, 31.0, 10.0)) + tt = np.linspace(0.0, 0.15, 100) + ax.plot(1000.0*tt, rate*np.exp(-rate*tt), **lsB) plotisih(ax, isis(homspikes), 0.005) diff --git a/pointprocesses/lecture/pointprocesses.tex b/pointprocesses/lecture/pointprocesses.tex index 65947c3..f1a57f9 100644 --- a/pointprocesses/lecture/pointprocesses.tex +++ b/pointprocesses/lecture/pointprocesses.tex @@ -34,7 +34,7 @@ process]{Punktprozess}{point processes}. trial. Shown is a stationary point process (homogeneous point process with a rate $\lambda=20$\;Hz, left) and an non-stationary point process with a rate that varies in time (noisy perfect - integrate-and-fire neuron driven by Ohrnstein-Uhlenbeck noise with + integrate-and-fire neuron driven by Ornstein-Uhlenbeck noise with a time-constant $\tau=100$\,ms, right).} \end{figure} @@ -87,190 +87,6 @@ certain time window $n_i$ (\figref{pointprocesssketchfig}). are stored as vectors of times within a cell array. \end{exercise} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Interval statistics} - -The intervals $T_i=t_{i+1}-t_i$ between successive events are real -positive numbers. In the context of action potentials they are -referred to as \entermde{Interspikeintervalle}{interspike - intervals}. The statistics of interspike intervals are described -using common measures for describing the statistics of stochastic -real-valued variables: - -\begin{figure}[t] - \includegraphics[width=0.96\textwidth]{isihexamples}\vspace{-2ex} - \titlecaption{\label{isihexamplesfig}Interspike-interval - histograms}{of the spike trains shown in \figref{rasterexamplesfig}.} -\end{figure} - -\begin{exercise}{isis.m}{} - Implement a function \varcode{isis()} that calculates the interspike - intervals from several spike trains. The function should return a - single vector of intervals. The spike times (in seconds) of each - trial are stored as vectors within a cell-array. -\end{exercise} - -%\subsection{First order interval statistics} -\begin{itemize} -\item Probability density $p(T)$ of the intervals $T$ - (\figref{isihexamplesfig}). Normalized to $\int_0^{\infty} p(T) \; dT - = 1$. -\item Average interval: $\mu_{ISI} = \langle T \rangle = - \frac{1}{n}\sum\limits_{i=1}^n T_i$. -\item Standard deviation of the interspike intervals: $\sigma_{ISI} = \sqrt{\langle (T - \langle T - \rangle)^2 \rangle}$\vspace{1ex} -\item \entermde[coefficient of variation]{Variationskoeffizient}{Coefficient of variation}: - $CV_{ISI} = \frac{\sigma_{ISI}}{\mu_{ISI}}$. -\item \entermde[diffusion coefficient]{Diffusionskoeffizient}{Diffusion coefficient}: $D_{ISI} = - \frac{\sigma_{ISI}^2}{2\mu_{ISI}^3}$. -\end{itemize} - -\begin{exercise}{isihist.m}{} - Implement a function \varcode{isiHist()} that calculates the normalized - interspike interval histogram. The function should take two input - arguments; (i) a vector of interspike intervals and (ii) the width - of the bins used for the histogram. It further returns the - probability density as well as the centers of the bins. -\end{exercise} - -\begin{exercise}{plotisihist.m}{} - Implement a function that takes the return values of - \varcode{isiHist()} as input arguments and then plots the data. The - plot should show the histogram with the x-axis scaled to - milliseconds and should be annotated with the average ISI, the - standard deviation and the coefficient of variation. -\end{exercise} - -\subsection{Interval correlations} -So called \entermde[return map]{return map}{return maps} are used to -illustrate interdependencies between successive interspike -intervals. The return map plots the delayed interval $T_{i+k}$ against -the interval $T_i$. The parameter $k$ is called the \enterm{lag} -(\determ{Verz\"ogerung}) $k$. Stationary and non-stationary return -maps are distinctly different \figref{returnmapfig}. - -\begin{figure}[tp] - \includegraphics[width=1\textwidth]{serialcorrexamples} - \titlecaption{\label{returnmapfig}Interspike-interval - correlations}{of the spike trains shown in - \figref{rasterexamplesfig}. Upper panels show the return maps and - lower panels the serial correlations of successive intervals - separated by lag $k$. All the interspike intervals of the - stationary spike trains are independent of each other --- this is - a so called \enterm{renewal process} - (\determ{Erneuerungsprozess}). In contrast, the ones of the - non-stationary spike trains show positive correlations that decay - for larger lags. The positive correlations in this example are - caused by a common stimulus that slowly increases and decreases - the mean firing rate of the spike trains.} -\end{figure} - -Such dependencies can be further quantified using the -\entermde[correlation!serial]{Korrelation!serielle}{serial - correlations} \figref{returnmapfig}. The serial correlation is the -correlation coefficient of the intervals $T_i$ and the intervals -delayed by the lag $T_{i+k}$: -\[ \rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)} -= {\rm corr}(T_{i+k}, T_i) \] The resulting correlation coefficient -$\rho_k$ is usually plotted against the lag $k$ -\figref{returnmapfig}. $\rho_0=1$ is the correlation of each interval -with itself and is always 1. - -\begin{exercise}{isiserialcorr.m}{} - Implement a function \varcode{isiserialcorr()} that takes a vector of - interspike intervals as input argument and calculates the serial - correlation. The function should further plot the serial - correlation. -\end{exercise} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Count statistics} - -\begin{figure}[t] - \includegraphics{countexamples} - \titlecaption{\label{countstatsfig}Count statistics.}{Probability - distributions of counting $k$ events $k$ (blue) within windows of - 20\,ms (left) or 200\,ms duration (right) of a homogeneous Poisson - spike train with a rate of 20\,Hz - (\figref{rasterexamplesfig}). For Poisson spike trains these - distributions are given by Poisson distributions (red).} -\end{figure} - -The most commonly used measure for characterizing spike trains is the -\enterm[firing rate!average]{average firing rate}. The firing rate $r$ -is the average number of spikes counted within some time interval $W$ -\begin{equation} - \label{firingrate} - r = \frac{\langle n \rangle}{W} -\end{equation} -and is neasured in Hertz. The average of the spike counts is taken -over trials. For stationary spike trains (no change in statistics, in -particular the firing rate, over time), the firing rate based on the -spike count equals the inverse average interspike interval -$1/\mu_{ISI}$. - -The firing rate based on an averaged spike counts is one example of -many statistics based on event counts. Stationary spike trains can be -split into many segments $i$, each of duration $W$, and the number of -events $n_i$ in each of the segments can be counted. The integer event -counts can be quantified in the usual ways: -\begin{itemize} -\item Histogram of the counts $n_i$ (\figref{countstatsfig}). -\item Average number of counts: $\mu_n = \langle n \rangle$. -\item Variance of counts: - $\sigma_n^2 = \langle (n - \langle n \rangle)^2 \rangle$. -\end{itemize} -Because spike counts are unitless and positive numbers, the -\begin{itemize} -\item \entermde{Fano Faktor}{Fano factor} (variance of counts divided - by average count): $F = \frac{\sigma_n^2}{\mu_n}$. -\end{itemize} -is an additional measure quantifying event counts. - -Note that all of these statistics depend on the chosen window length -$W$. The average spike count, for example, grows linearly with $W$ for -sufficiently large time windows: $\langle n \rangle = r W$, -\eqnref{firingrate}. Doubling the counting window doubles the spike -count. As does the spike-count variance (\figref{fanofig}). At smaller -time windows the statistics of the event counts might depend on the -particular duration of the counting window. There might be an optimal -time window for which the variance of the spike count is minimal. The -Fano factor plotted as a function of the time window illustrates such -properties of point processes (\figref{fanofig}). - -This also has consequences for information transmission in neural -systems. The lower the variance in spike count relative to the -averaged count, the higher the signal-to-noise ratio at which -information encoded in the mean spike count is transmitted. - -\begin{figure}[t] - \includegraphics{fanoexamples} - \titlecaption{\label{fanofig} - Count variance and Fano factor.}{Variance of event counts as a - function of mean counts obtained by varying the duration of the - count window (left). Dividing the count variance by the respective - mean results in the Fano factor that can be plotted as a function - of the count window (right). For Poisson spike trains the variance - always equals the mean counts and consequently the Fano factor - equals one irrespective of the count window (top). A spike train - with positive correlations between interspike intervals (caused by - Ohrnstein-Uhlenbeck noise) has a minimum in the Fano factor, that - is an analysis window for which the relative count variance is - minimal somewhere close to the correlation time scale of the - interspike intervals (bottom).} -\end{figure} - -\begin{exercise}{counthist.m}{} - Implement a function \varcode{counthist()} that calculates and plots - the distribution of spike counts observed in a certain time - window. The function should take two input arguments: (i) a - cell-array of vectors containing the spike times in seconds observed - in a number of trials, and (ii) the duration of the time window that - is used to evaluate the counts. -\end{exercise} - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Homogeneous Poisson process} @@ -303,45 +119,6 @@ In an \entermde[Poisson process!inhomogeneous]{Poissonprozess!inhomogener}{inhom \eqnref{hompoissonprob} to generate the event times. \end{exercise} -\begin{figure}[t] - \includegraphics[width=1\textwidth]{poissonraster100hz} - \titlecaption{\label{hompoissonfig}Rasterplot of spikes of a - homogeneous Poisson process with a rate $\lambda=100$\,Hz.}{} -\end{figure} - -\begin{figure}[t] - \includegraphics[width=0.45\textwidth]{poissonisihexp20hz}\hfill - \includegraphics[width=0.45\textwidth]{poissonisihexp100hz} - \titlecaption{\label{hompoissonisihfig}Distribution of interspike - intervals of two Poisson processes.}{The processes differ in their - rate (left: $\lambda=20$\,Hz, right: $\lambda=100$\,Hz). The red - lines indicate the corresponding exponential interval distribution - \eqnref{poissonintervals}.} -\end{figure} - -The homogeneous Poisson process has the following properties: -\begin{itemize} -\item Intervals $T$ are exponentially distributed (\figref{hompoissonisihfig}): - \begin{equation} - \label{poissonintervals} - p(T) = \lambda e^{-\lambda T} \; . - \end{equation} -\item The average interval is $\mu_{ISI} = \frac{1}{\lambda}$ . -\item The variance of the intervals is $\sigma_{ISI}^2 = \frac{1}{\lambda^2}$ . -\item Thus, the coefficient of variation is always $CV_{ISI} = 1$ . -\item The serial correlation is $\rho_k =0$ for $k>0$, since the - occurrence of an event is independent of all previous events. Such a - process is also called a \enterm{renewal process} (\determ{Erneuerungsprozess}). -\item The number of events $k$ within a temporal window of duration - $W$ is Poisson distributed: -\begin{equation} - \label{poissoncounts} - P(k) = \frac{(\lambda W)^ke^{\lambda W}}{k!} -\end{equation} -(\figref{hompoissoncountfig}) -\item The Fano Faktor is always $F=1$ . -\end{itemize} - \begin{exercise}{hompoissonspikes.m}{} Implement a function \varcode{hompoissonspikes()} that uses a homogeneous Poisson process to generate spike events at a given rate @@ -353,16 +130,272 @@ The homogeneous Poisson process has the following properties: times. \end{exercise} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Interval statistics} + +The intervals $T_i=t_{i+1}-t_i$ between successive events are real +positive numbers. In the context of action potentials they are +referred to as \entermde[interspike +interval]{Interspikeintervall}{interspike intervals}, in short +\entermde[ISI|see{interspike + interval}]{ISI|see{Interspikeintervall}}{ISI}s. The statistics of +interspike intervals are described using common measures for +describing the statistics of real-valued variables: + \begin{figure}[t] - \includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz10ms}\hfill - \includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz100ms} - \titlecaption{\label{hompoissoncountfig}Distribution of counts of a - Poisson spike train.}{The count statistics was generated for two - different windows of width $W=10$\,ms (left) and width $W=100$\,ms - (right). The red line illustrates the corresponding Poisson - distribution \eqnref{poissoncounts}.} + \includegraphics[width=0.96\textwidth]{isihexamples}\vspace{-2ex} + \titlecaption{\label{isihexamplesfig}Interspike-interval + histograms}{of the spike trains shown in \figref{rasterexamplesfig}.} \end{figure} +\begin{exercise}{isis.m}{} + Implement a function \varcode{isis()} that calculates the interspike + intervals from several spike trains. The function should return a + single vector of intervals. The spike times (in seconds) of each + trial are stored as vectors within a cell-array. +\end{exercise} + +\begin{itemize} +\item Probability density $p(T)$ of the intervals $T$ + (\figref{isihexamplesfig}). Normalized to $\int_0^{\infty} p(T) \; + dT = 1$. Commonly referred to as the \enterm[interspike + interval!histogram]{interspike interval histogram}. Its shape + reveals many interesting aspects like locking or bursting that + cannot be inferred from the mean or standard deviation. A particular + reference is the exponential distribution of intervals + \begin{equation} + \label{hompoissonexponential} + p_{exp}(T) = \lambda e^{-\lambda T} + \end{equation} + of a homogeneous Poisson spike train with rate $\lambda$. +\item Mean interval: $\mu_{ISI} = \langle T \rangle = + \frac{1}{n}\sum\limits_{i=1}^n T_i$. The average time it takes from + one event to the next. The inverse of the mean interval is identical + with the mean rate $\lambda$ (number of events per time, see below) + of the process. +\item Standard deviation of intervals: $\sigma_{ISI} = \sqrt{\langle + (T - \langle T \rangle)^2 \rangle}$. Periodically spiking neurons + have little variability in their intervals, whereas many cortical + neurons cover a wide range with their intervals. The standard + deviation of homogeneous Poisson spike trains, $\sigma_{ISI} = + \frac{1}{\lambda}$, also equals the inverse rate. Whether the + standard deviation of intervals is low or high, however, is better + quantified by the +\item \entermde[coefficient of + variation]{Variationskoeffizient}{Coefficient of variation}, the + standard deviation of the ISIs relative to their mean: + \begin{equation} + \label{cvisi} + CV_{ISI} = \frac{\sigma_{ISI}}{\mu_{ISI}} + \end{equation} + Homogeneous Poisson spike trains have an CV of exactly one. The + lower the CV the more regularly firing a neuron is firing. CVs + larger than one are also possible in spike trains with small + intervals separated by really long ones. +%\item \entermde[diffusion coefficient]{Diffusionskoeffizient}{Diffusion coefficient}: $D_{ISI} = +% \frac{\sigma_{ISI}^2}{2\mu_{ISI}^3}$. +\end{itemize} + +\begin{exercise}{isihist.m}{} + Implement a function \varcode{isiHist()} that calculates the normalized + interspike interval histogram. The function should take two input + arguments; (i) a vector of interspike intervals and (ii) the width + of the bins used for the histogram. It further returns the + probability density as well as the centers of the bins. +\end{exercise} + +\begin{exercise}{plotisihist.m}{} + Implement a function that takes the return values of + \varcode{isiHist()} as input arguments and then plots the data. The + plot should show the histogram with the x-axis scaled to + milliseconds and should be annotated with the average ISI, the + standard deviation and the coefficient of variation. +\end{exercise} + +\subsection{Interval correlations} +Intervals are not just numbers without an order, like weights of +tigers. Intervals are temporally ordered and there could be temporal +structure in the sequence of intervals. For example, short intervals +could be followed by more longer ones, and vice versa. Such +dependencies in the sequence of intervals do not show up in the +interval histogram. We need additional measures to also quantify the +temporal structure of the sequence of intervals. + +We can use the same techniques we know for visualizing and quantifying +correlations in bivariate data sets, i.e. scatter plots and +correlation coefficients. We form $(x,y)$ data pairs by taking the +series of intervals $T_i$ as $x$-data values and pairing them with the +$k$-th next intervals $T_{i+k}$ as $y$-data values. The parameter $k$ +is called \enterm{lag} (\determ{Verz\"ogerung}). For lag one we pair +each interval with the next one. A \entermde[return map]{return + map}{Return map} illustrates dependencies between successive +intervals by simply plotting the intervals $T_{i+k}$ against the +intervals $T_i$ in a scatter plot (\figref{returnmapfig}). For Poisson +spike trains there is no structure beyond the one expected from the +exponential interspike interval distribution, hinting at neighboring +interspike intervals being independent of each other. For the spike +train based on an Ornstein-Uhlenbeck process the return map is more +clustered along the diagonal, hinting at a positive correlation +between succeeding intervals. That is, short intervals are more likely +to be followed by short ones and long intervals more likely by long +ones. This temporal structure was already clearly visible in the spike +raster shown in \figref{rasterexamplesfig}. + +\begin{figure}[tp] + \includegraphics[width=1\textwidth]{serialcorrexamples} + \titlecaption{\label{returnmapfig}Interspike-interval + correlations}{of the spike trains shown in + \figref{rasterexamplesfig}. Upper panels show the return maps and + lower panels the serial correlations of successive intervals + separated by lag $k$. All the interspike intervals of the + stationary spike trains are independent of each other --- this is + a so called \enterm{renewal process} + (\determ{Erneuerungsprozess}). In contrast, the ones of the + non-stationary spike trains show positive correlations that decay + for larger lags. The positive correlations in this example are + caused by a common stimulus that slowly increases and decreases + the mean firing rate of the spike trains.} +\end{figure} + +Such dependencies can be further quantified by +\entermde[correlation!serial]{Korrelation!serielle}{serial + correlations}. These are the correlation coefficients between the +intervals $T_{i+k}$ and $T_i$ in dependence on lag $k$: +\begin{equation} + \label{serialcorrelation} + \rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)} += {\rm corr}(T_{i+k}, T_i) +\end{equation} +The serial correlations $\rho_k$ are usually plotted against the lag +$k$ for a range small range of lags +(\figref{returnmapfig}). $\rho_0=1$ is the correlation of each +interval with itself and always equals one. + +If the serial correlations all equal zero, $\rho_k =0$ for $k>0$, then +the length of an interval is independent of all the previous +ones. Such a process is a \enterm{renewal process} +(\determ{Erneuerungsprozess}). Each event, each action potential, +erases the history. The occurrence of the next event is independent of +what happened before. To a first approximation an action potential +erases all information about the past from the membrane voltage and +thus spike trains may approximate renewal processes. + +However, other variables like the intracellular calcium concentration +or the states of slowly switching ion channels may carry information +from one interspike interval to the next and thus introducing +correlations. Such non-renewal dynamics can then be described by the +non-zero serial correlations (\figref{returnmapfig}). + +\begin{exercise}{isiserialcorr.m}{} + Implement a function \varcode{isiserialcorr()} that takes a vector of + interspike intervals as input argument and calculates the serial + correlation. The function should further plot the serial + correlation. +\end{exercise} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Count statistics} + +\begin{figure}[t] + \includegraphics{countexamples} + \titlecaption{\label{countstatsfig}Count statistics.}{Probability + distributions of counting $k$ events $k$ (blue) within windows of + 20\,ms (left) or 200\,ms duration (right) of a homogeneous Poisson + spike train with a rate of 20\,Hz + (\figref{rasterexamplesfig}). For Poisson spike trains these + distributions are given by Poisson distributions (red).} +\end{figure} + +The most commonly used measure for characterizing spike trains is the +\enterm[firing rate!average]{average firing rate}. The firing rate $r$ +is the average number of spikes counted within some time interval $W$ +\begin{equation} + \label{firingrate} + r = \frac{\langle n \rangle}{W} +\end{equation} +and is measured in Hertz. The average of the spike counts is taken +over trials. For stationary spike trains (no change in statistics, in +particular the firing rate, over time), the firing rate based on the +spike count equals the inverse average interspike interval +$1/\mu_{ISI}$. + +The firing rate based on an averaged spike counts is one example of +many statistics based on event counts. Stationary spike trains can be +split into many segments $i$, each of duration $W$, and the number of +events $n_i$ in each of the segments can be counted. The integer event +counts can be quantified in the usual ways: +\begin{itemize} +\item Histogram of the counts $n_i$. For homogeneous Poisson spike + trains with rate $\lambda$ the resulting probability distributions + follow a Poisson distribution (\figref{countstatsfig}), where the + probability of counting $k$ events within a time window $W$ is given by + \begin{equation} + \label{poissondist} + P(k) = \frac{(\lambda W)^k e^{\lambda W}}{k!} + \end{equation} +\item Average number of counts: $\mu_n = \langle n \rangle$. +\item Variance of counts: + $\sigma_n^2 = \langle (n - \langle n \rangle)^2 \rangle$. +\end{itemize} +Because spike counts are unitless and positive numbers, the +\begin{itemize} +\item \entermde{Fano Faktor}{Fano factor} (variance of counts divided + by average count) + \begin{equation} + \label{fano} + F = \frac{\sigma_n^2}{\mu_n} + \end{equation} + is a commonly used measure for quantifying the variability of event + counts relative to the mean number of events. In particular for + homogeneous Poisson processes the Fano factor equals one, + independently of the time window $W$. +\end{itemize} +is an additional measure quantifying event counts. + +Note that all of these statistics depend in general on the chosen +window length $W$. The average spike count, for example, grows +linearly with $W$ for sufficiently large time windows: $\langle n +\rangle = r W$, \eqnref{firingrate}. Doubling the counting window +doubles the spike count. As does the spike-count variance +(\figref{fanofig}). At smaller time windows the statistics of the +event counts might depend on the particular duration of the counting +window. There might be an optimal time window for which the variance +of the spike count is minimal. The Fano factor plotted as a function +of the time window illustrates such properties of point processes +(\figref{fanofig}). + +This also has consequences for information transmission in neural +systems. The lower the variance in spike count relative to the +averaged count, the higher the signal-to-noise ratio at which +information encoded in the mean spike count is transmitted. + +\begin{figure}[t] + \includegraphics{fanoexamples} + \titlecaption{\label{fanofig} + Count variance and Fano factor.}{Variance of event counts as a + function of mean counts obtained by varying the duration of the + count window (left). Dividing the count variance by the respective + mean results in the Fano factor that can be plotted as a function + of the count window (right). For Poisson spike trains the variance + always equals the mean counts and consequently the Fano factor + equals one irrespective of the count window (top). A spike train + with positive correlations between interspike intervals (caused by + an Ornstein-Uhlenbeck process) has a minimum in the Fano factor, + that is an analysis window for which the relative count variance + is minimal somewhere close to the correlation time scale of the + interspike intervals (bottom).} +\end{figure} + +\begin{exercise}{counthist.m}{} + Implement a function \varcode{counthist()} that calculates and plots + the distribution of spike counts observed in a certain time + window. The function should take two input arguments: (i) a + cell-array of vectors containing the spike times in seconds observed + in a number of trials, and (ii) the duration of the time window that + is used to evaluate the counts. +\end{exercise} + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Time-dependent firing rate} From 20332e2013bdcb5781df804c73dc9c2bfac49068 Mon Sep 17 00:00:00 2001 From: Jan Benda Date: Mon, 18 Jan 2021 13:13:35 +0100 Subject: [PATCH 3/3] [pointprocesses] improved chapter --- pointprocesses/code/counthist.m | 25 +- pointprocesses/code/isihist.m | 2 +- pointprocesses/code/isis.m | 2 - pointprocesses/code/isiserialcorr.m | 16 +- pointprocesses/code/plotisiserialcorr.m | 16 ++ pointprocesses/code/rasterplot.m | 30 +- pointprocesses/lecture/fanoexamples.py | 40 ++- pointprocesses/lecture/pointprocesses.tex | 326 +++++++++++++--------- pointprocesses/lecture/rasterexamples.py | 2 +- scientificcomputing-script.tex | 3 +- 10 files changed, 270 insertions(+), 192 deletions(-) create mode 100644 pointprocesses/code/plotisiserialcorr.m diff --git a/pointprocesses/code/counthist.m b/pointprocesses/code/counthist.m index 67b4cae..2f5b746 100644 --- a/pointprocesses/code/counthist.m +++ b/pointprocesses/code/counthist.m @@ -1,16 +1,11 @@ -function [counts, bins] = counthist(spikes, w) -% Compute and plot histogram of spike counts. +function counthist(spikes, w) +% Plot histogram of spike counts. % -% [counts, bins] = counthist(spikes, w) +% counthist(spikes, w) % % Arguments: % spikes: a cell array of vectors of spike times in seconds -% w: observation window duration in seconds for computing the counts -% -% Returns: -% counts: the histogram of counts normalized to probabilities -% bins: the bin centers for the histogram - +% w: duration of window in seconds for computing the counts % collect spike counts: tmax = spikes{1}(end); n = []; @@ -21,12 +16,12 @@ function [counts, bins] = counthist(spikes, w) % for tk = 0:w:tmax-w % nn = sum((times >= tk) & (times < tk+w)); % %nn = length(find((times >= tk) & (times < tk+w))); -% n = [n nn]; +% n = [n, nn]; % end % alternative 2: use the hist() function to do that! tbins = 0.5*w:w:tmax-0.5*w; nn = hist(times, tbins); - n = [n nn]; + n = [n, nn]; end % histogram of spike counts: @@ -36,9 +31,7 @@ function [counts, bins] = counthist(spikes, w) counts = counts / sum(counts); % plot: - if nargout == 0 - bar(bins, counts); - xlabel('counts k'); - ylabel('P(k)'); - end + bar(bins, counts); + xlabel('counts k'); + ylabel('P(k)'); end diff --git a/pointprocesses/code/isihist.m b/pointprocesses/code/isihist.m index 00b02c3..a24989c 100644 --- a/pointprocesses/code/isihist.m +++ b/pointprocesses/code/isihist.m @@ -8,7 +8,7 @@ function [pdf, centers] = isihist(isis, binwidth) % binwidth: optional width in seconds to be used for the isi bins % % Returns: -% pdf: vector with probability density of interspike intervals in Hz +% pdf: vector with pdf of interspike intervals in Hertz % centers: vector with centers of interspikeintervalls in seconds if nargin < 2 diff --git a/pointprocesses/code/isis.m b/pointprocesses/code/isis.m index 65a346b..509dcc5 100644 --- a/pointprocesses/code/isis.m +++ b/pointprocesses/code/isis.m @@ -5,11 +5,9 @@ function isivec = isis(spikes) % % Arguments: % spikes: a cell array of vectors of spike times in seconds -% isivec: a column vector with all the interspike intervalls % % Returns: % isivec: a column vector with all the interspike intervalls - isivec = []; for k = 1:length(spikes) difftimes = diff(spikes{k}); diff --git a/pointprocesses/code/isiserialcorr.m b/pointprocesses/code/isiserialcorr.m index 21bb474..1b813fc 100644 --- a/pointprocesses/code/isiserialcorr.m +++ b/pointprocesses/code/isiserialcorr.m @@ -1,15 +1,15 @@ -function isicorr = isiserialcorr(isivec, maxlag) +function [isicorr, lags] = isiserialcorr(isivec, maxlag) % serial correlation of interspike intervals % % isicorr = isiserialcorr(isivec, maxlag) % % Arguments: % isivec: vector of interspike intervals in seconds -% maxlag: the maximum lag in seconds +% maxlag: the maximum lag % % Returns: % isicorr: vector with the serial correlations for lag 0 to maxlag - +% lags: vector with the lags corresponding to isicorr lags = 0:maxlag; isicorr = zeros(size(lags)); for k = 1:length(lags) @@ -21,14 +21,4 @@ function isicorr = isiserialcorr(isivec, maxlag) isicorr(k) = corr(isivec(1:end-lag), isivec(lag+1:end)); end end - - if nargout == 0 - % plot: - plot(lags, isicorr, '-b'); - hold on; - scatter(lags, isicorr, 50.0, 'b', 'filled'); - hold off; - xlabel('Lag k') - ylabel('\rho_k') - end end diff --git a/pointprocesses/code/plotisiserialcorr.m b/pointprocesses/code/plotisiserialcorr.m new file mode 100644 index 0000000..3f21418 --- /dev/null +++ b/pointprocesses/code/plotisiserialcorr.m @@ -0,0 +1,16 @@ +function isicorr = plotisiserialcorr(isivec, maxlag) +% plot serial correlation of interspike intervals +% +% plotisiserialcorr(isivec, maxlag) +% +% Arguments: +% isivec: vector of interspike intervals in seconds +% maxlag: the maximum lag + [isicorr, lags] = isiserialcorr(isivec, maxlag); + plot(lags, isicorr, '-b'); + hold on; + scatter(lags, isicorr, 20.0, 'b', 'filled'); + hold off; + xlabel('Lag k') + ylabel('\rho_k') +end diff --git a/pointprocesses/code/rasterplot.m b/pointprocesses/code/rasterplot.m index 7b28820..ccbfe60 100644 --- a/pointprocesses/code/rasterplot.m +++ b/pointprocesses/code/rasterplot.m @@ -2,21 +2,35 @@ function rasterplot(spikes, tmax) % Display a spike raster of the spike times given in spikes. % % rasterplot(spikes, tmax) +% +% Arguments: % spikes: a cell array of vectors of spike times in seconds -% tmax: plot spike raster upto tmax seconds - +% tmax: plot spike raster up to tmax seconds + in_msecs = tmax < 1.5 + spiketimes = []; + trials = []; ntrials = length(spikes); for k = 1:ntrials times = spikes{k}; times = times(times0$, then the length of an interval is independent of all the previous @@ -282,15 +325,21 @@ thus spike trains may approximate renewal processes. However, other variables like the intracellular calcium concentration or the states of slowly switching ion channels may carry information -from one interspike interval to the next and thus introducing -correlations. Such non-renewal dynamics can then be described by the -non-zero serial correlations (\figref{returnmapfig}). +from one interspike interval to the next and thus introduce +correlations between intervals. Such non-renewal dynamics is then +characterized by the non-zero serial correlations +(\figref{returnmapfig}). \begin{exercise}{isiserialcorr.m}{} - Implement a function \varcode{isiserialcorr()} that takes a vector of - interspike intervals as input argument and calculates the serial - correlation. The function should further plot the serial - correlation. + Implement a function \varcode{isiserialcorr()} that takes a vector + of interspike intervals as input argument and calculates the serial + correlations up to some maximum lag. +\end{exercise} + +\begin{exercise}{plotisiserialcorr.m}{} + Implement a function \varcode{plotisiserialcorr()} that takes a + vector of interspike intervals as input argument and generates a + plot of the serial correlations. \end{exercise} @@ -326,10 +375,11 @@ split into many segments $i$, each of duration $W$, and the number of events $n_i$ in each of the segments can be counted. The integer event counts can be quantified in the usual ways: \begin{itemize} -\item Histogram of the counts $n_i$. For homogeneous Poisson spike - trains with rate $\lambda$ the resulting probability distributions - follow a Poisson distribution (\figref{countstatsfig}), where the - probability of counting $k$ events within a time window $W$ is given by +\item Histogram of the counts $n_i$ appropriately normalized to + probability distributions. For homogeneous Poisson spike trains with + rate $\lambda$ the resulting probability distributions follow a + Poisson distribution (\figref{countstatsfig}), where the probability + of counting $k$ events within a time window $W$ is given by \begin{equation} \label{poissondist} P(k) = \frac{(\lambda W)^k e^{\lambda W}}{k!} @@ -338,7 +388,7 @@ counts can be quantified in the usual ways: \item Variance of counts: $\sigma_n^2 = \langle (n - \langle n \rangle)^2 \rangle$. \end{itemize} -Because spike counts are unitless and positive numbers, the +Because spike counts are unitless and positive numbers the \begin{itemize} \item \entermde{Fano Faktor}{Fano factor} (variance of counts divided by average count) @@ -351,7 +401,6 @@ Because spike counts are unitless and positive numbers, the homogeneous Poisson processes the Fano factor equals one, independently of the time window $W$. \end{itemize} -is an additional measure quantifying event counts. Note that all of these statistics depend in general on the chosen window length $W$. The average spike count, for example, grows @@ -372,33 +421,33 @@ information encoded in the mean spike count is transmitted. \begin{figure}[t] \includegraphics{fanoexamples} - \titlecaption{\label{fanofig} - Count variance and Fano factor.}{Variance of event counts as a - function of mean counts obtained by varying the duration of the - count window (left). Dividing the count variance by the respective - mean results in the Fano factor that can be plotted as a function - of the count window (right). For Poisson spike trains the variance - always equals the mean counts and consequently the Fano factor - equals one irrespective of the count window (top). A spike train - with positive correlations between interspike intervals (caused by - an Ornstein-Uhlenbeck process) has a minimum in the Fano factor, - that is an analysis window for which the relative count variance - is minimal somewhere close to the correlation time scale of the - interspike intervals (bottom).} + \titlecaption{\label{fanofig} Fano factor.}{Counting events in time + windows of given duration and then dividing the variance of the + counts by their mean results in the Fano factor. Here, the Fano + factor is plotted as a function of the duration of the window used + to count events. For Poisson spike trains the variance always + equals the mean counts and consequently the Fano factor equals one + irrespective of the count window (left). A spike train with + positive correlations between interspike intervals (caused by an + Ornstein-Uhlenbeck process) has a minimum in the Fano factor, that + is an analysis window for which the relative count variance is + minimal somewhere close to the correlation time scale of the + interspike intervals (right).} \end{figure} \begin{exercise}{counthist.m}{} Implement a function \varcode{counthist()} that calculates and plots the distribution of spike counts observed in a certain time - window. The function should take two input arguments: (i) a - cell-array of vectors containing the spike times in seconds observed - in a number of trials, and (ii) the duration of the time window that - is used to evaluate the counts. + window. The function should take two input arguments: a cell-array + of vectors containing the spike times in seconds observed in a + number of trials, and the duration of the time window that is used + to evaluate the counts. \end{exercise} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Time-dependent firing rate} +\label{nonstationarysec} So far we have discussed stationary spike trains. The statistical properties of these did not change within the observation time (stationary point @@ -566,6 +615,7 @@ relevate time-scale. \end{exercise} \section{Spike-triggered Average} +\label{stasec} The graphical representation of the neuronal activity alone is not sufficient tot investigate the relation between the neuronal response diff --git a/pointprocesses/lecture/rasterexamples.py b/pointprocesses/lecture/rasterexamples.py index 07b8628..7a9f4f0 100644 --- a/pointprocesses/lecture/rasterexamples.py +++ b/pointprocesses/lecture/rasterexamples.py @@ -85,7 +85,7 @@ def plot_inhomogeneous_spikes(ax): if __name__ == "__main__": - fig, (ax1, ax2) = plt.subplots(1, 2, figsize=cm_size(figure_width, 0.5*figure_width)) + fig, (ax1, ax2) = plt.subplots(1, 2) fig.subplots_adjust(**adjust_fs(fig, left=4.0, right=1.0, top=1.2)) plot_homogeneous_spikes(ax1) plot_inhomogeneous_spikes(ax2) diff --git a/scientificcomputing-script.tex b/scientificcomputing-script.tex index 13ca240..016a6a4 100644 --- a/scientificcomputing-script.tex +++ b/scientificcomputing-script.tex @@ -116,9 +116,10 @@ \includechapter{pointprocesses} % add a chapter on simulating point-processes/spike trains -% (Poisson spike trains, integrate-and-fire models) +% (Poisson spike trains, LMP models, integrate-and-fire models, full HH like models) % add chapter on information theory, mutual information, stimulus reconstruction, coherence +% move STA here! % add a chapter on Bayesian inference (the Neuroscience of it and a % bit of application for statistical problems).