diff --git a/bootstrap/lecture/Makefile b/bootstrap/lecture/Makefile index f7f02ba..de4182d 100644 --- a/bootstrap/lecture/Makefile +++ b/bootstrap/lecture/Makefile @@ -1,22 +1,29 @@ BASENAME=bootstrap + PYFILES=$(wildcard *.py) PYPDFFILES=$(PYFILES:.py=.pdf) -pdf : $(BASENAME)-chapter.pdf $(PYPDFFILES) +all : pdf + +# script: +pdf : $(BASENAME)-chapter.pdf -$(BASENAME)-chapter.pdf : $(BASENAME)-chapter.tex $(BASENAME).tex +$(BASENAME)-chapter.pdf : $(BASENAME)-chapter.tex $(BASENAME).tex $(PYPDFFILES) pdflatex -interaction=scrollmode $< | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex -interaction=scrollmode $< || true $(PYPDFFILES) : %.pdf : %.py python $< clean : - rm -f *~ $(BASENAME)-chapter.aux $(BASENAME)-chapter.log $(BASENAME)-chapter.out $(BASENAME).aux $(BASENAME).log + rm -f *~ + rm -f $(BASENAME).aux $(BASENAME).log + rm -f $(BASENAME)-chapter.aux $(BASENAME)-chapter.log $(BASENAME)-chapter.out + rm -f $(PYPDFFILES) $(GPTTEXFILES) cleanall : clean rm -f $(BASENAME)-chapter.pdf -watch : +watchpdf : while true; do ! make -q pdf && make pdf; sleep 0.5; done diff --git a/likelihood/lecture/Makefile b/likelihood/lecture/Makefile index 4e6367b..42b8e3d 100644 --- a/likelihood/lecture/Makefile +++ b/likelihood/lecture/Makefile @@ -1,22 +1,29 @@ BASENAME=likelihood + PYFILES=$(wildcard *.py) PYPDFFILES=$(PYFILES:.py=.pdf) -pdf : $(BASENAME)-chapter.pdf $(PYPDFFILES) +all : pdf + +# script: +pdf : $(BASENAME)-chapter.pdf -$(BASENAME)-chapter.pdf : $(BASENAME)-chapter.tex $(BASENAME).tex +$(BASENAME)-chapter.pdf : $(BASENAME)-chapter.tex $(BASENAME).tex $(PYPDFFILES) pdflatex -interaction=scrollmode $< | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex -interaction=scrollmode $< || true $(PYPDFFILES) : %.pdf : %.py python $< clean : - rm -f *~ $(BASENAME)-chapter.aux $(BASENAME)-chapter.log $(BASENAME)-chapter.out $(BASENAME).aux $(BASENAME).log + rm -f *~ + rm -f $(BASENAME).aux $(BASENAME).log + rm -f $(BASENAME)-chapter.aux $(BASENAME)-chapter.log $(BASENAME)-chapter.out + rm -f $(PYPDFFILES) $(GPTTEXFILES) cleanall : clean rm -f $(BASENAME)-chapter.pdf -watch : +watchpdf : while true; do ! make -q pdf && make pdf; sleep 0.5; done diff --git a/likelihood/lecture/likelihood.tex b/likelihood/lecture/likelihood.tex index 752d659..b69c5ec 100644 --- a/likelihood/lecture/likelihood.tex +++ b/likelihood/lecture/likelihood.tex @@ -1,11 +1,11 @@ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\chapter{\tr{Maximum likelihood estimation}{Maximum-Likelihood Methode}} +\chapter{\tr{Maximum likelihood estimation}{Maximum-Likelihood-Sch\"atzer}} In vielen Situationen wollen wir einen oder mehrere Parameter $\theta$ einer Wahrscheinlichkeitsverteilung sch\"atzen, so dass die Verteilung -die Daten $x_1, x_2, \ldots x_n$ am besten beschreibt. Bei der -Maximum-Likelihood-Methode w\"ahlen wir die Parameter so, dass die +die Daten $x_1, x_2, \ldots x_n$ am besten beschreibt. +Maximum-Likelihood-Sch\"atzer w\"ahlen die Parameter so, dass die Wahrscheinlichkeit, dass die Daten aus der Verteilung stammen, am gr\"o{\ss}ten ist. @@ -16,10 +16,9 @@ $\theta$'') die Wahrscheinlichkeits(dichte)verteilung von $x$ mit dem Parameter(n) $\theta$. Das k\"onnte die Normalverteilung \begin{equation} \label{normpdfmean} - p(x|\theta) = \frac{1}{\sqrt{2\pi \sigma^2}}e^{-\frac{(x-\theta)^2}{2\sigma^2}} + p(x|\theta) = \frac{1}{\sqrt{2\pi \sigma^2}}e^{-\frac{(x-\mu)^2}{2\sigma^2}} \end{equation} -sein mit -fester Standardverteilung $\sigma$ und dem Mittelwert $\mu$ als +sein mit dem Mittelwert $\mu$ und der Standardabweichung $\sigma$ als Parameter $\theta$. Wenn nun den $n$ unabh\"angigen Beobachtungen $x_1, x_2, \ldots x_n$ @@ -59,9 +58,10 @@ das Maximum der logarithmierten Likelihood (``Log-Likelihood'') gesucht: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Beispiel: Das arithmetische Mittel} -Wenn die Me{\ss}daten $x_1, x_2, \ldots x_n$ der Normalverteilung \eqnref{normpdfmean} -entstammen, und wir den Mittelwert $\mu$ als einzigen Parameter der Verteilung betrachten, -welcher Wert von $\theta$ maximiert dessen Likelhood? +Wenn die Me{\ss}daten $x_1, x_2, \ldots x_n$ der Normalverteilung +\eqnref{normpdfmean} entstammen, und wir den Mittelwert $\mu=\theta$ als +einzigen Parameter der Verteilung betrachten, welcher Wert von +$\theta$ maximiert dessen Likelhood? \begin{figure}[t] \includegraphics[width=1\textwidth]{mlemean} @@ -89,7 +89,7 @@ nach dem Parameter $\theta$ und setzen diese gleich Null: \Leftrightarrow \quad n \theta & = & \sum_{i=1}^n x_i \\ \Leftrightarrow \quad \theta & = & \frac{1}{n} \sum_{i=1}^n x_i \end{eqnarray*} -Der Maximum-Likelihood-Estimator ist das arithmetische Mittel der Daten. D.h. +Der Maximum-Likelihood-Sch\"atzer ist das arithmetische Mittel der Daten. D.h. das arithmetische Mittel maximiert die Wahrscheinlichkeit, dass die Daten aus einer Normalverteilung mit diesem Mittelwert gezogen worden sind. @@ -101,12 +101,12 @@ Normalverteilung mit diesem Mittelwert gezogen worden sind. die Log-Likelihood (aus der Summe der logarithmierten Wahrscheinlichkeiten) f\"ur den Mittelwert als Parameter. Vergleiche die Position der Maxima mit den aus den Daten berechneten - Mittelwerte. + Mittelwert. \end{exercise} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Kurvenfit als Maximum Likelihood Estimation} +\section{Kurvenfit als Maximum-Likelihood Sch\"atzung} Beim Kurvenfit soll eine Funktion $f(x;\theta)$ mit den Parametern $\theta$ an die Datenpaare $(x_i|y_i)$ durch Anpassung der Parameter $\theta$ gefittet werden. Wenn wir annehmen, dass die $y_i$ um die @@ -125,25 +125,29 @@ gegeben sind. Der Parameter $\theta$ soll so gew\"ahlt werden, dass die Log-Likelihood maximal wird. Der erste Term der Summe ist unabh\"angig von $\theta$ und kann deshalb bei der Suche nach dem -Maximum weggelassen werden. +Maximum weggelassen werden: \begin{eqnarray*} & = & - \frac{1}{2} \sum_{i=1}^n \left( \frac{y_i-f(x_i;\theta)}{\sigma_i} \right)^2 \end{eqnarray*} Anstatt nach dem Maximum zu suchen, k\"onnen wir auch das Vorzeichen der Log-Likelihood -umdrehen und nach dem Minimum suchen. Dabei k\"onnen wir auch den Faktor $1/2$ vor der Summe vernachl\"assigen --- auch das \"andert nichts an der Position des Minimums. +umdrehen und nach dem Minimum suchen. Dabei k\"onnen wir auch den Faktor $1/2$ vor der Summe vernachl\"assigen --- auch das \"andert nichts an der Position des Minimums: \begin{equation} + \label{chisqmin} \theta_{mle} = \text{argmin}_{\theta} \; \sum_{i=1}^n \left( \frac{y_i-f(x_i;\theta)}{\sigma_i} \right)^2 \;\; = \;\; \text{argmin}_{\theta} \; \chi^2 \end{equation} -Die Summer der quadratischen Abst\"ande normiert auf die jeweiligen +Die Summe der quadratischen Abst\"ande normiert auf die jeweiligen Standardabweichungen wird auch mit $\chi^2$ bezeichnet. Der Wert des -Parameters $\theta$ welcher den quadratischen Abstand minimiert ist +Parameters $\theta$, welcher den quadratischen Abstand minimiert, ist also identisch mit der Maximierung der Wahrscheinlichkeit, dass die Daten tats\"achlich aus der Funktion stammen k\"onnen. Minimierung des -$\chi^2$ ist also ein Maximum-Likelihood Estimate. +$\chi^2$ ist also eine Maximum-Likelihood Sch\"atzung. Aber nur, wenn +die Daten normalverteilt um die Funktion streuen! Bei anderen +Verteilungen m\"usste man die Log-Likelihood entsprechend +\eqnref{loglikelihood} ausrechnen und maximieren. \begin{figure}[t] \includegraphics[width=1\textwidth]{mlepropline} - \caption{\label{mleproplinefig} Maximum Likelihood Estimation der + \caption{\label{mleproplinefig} Maximum-Likelihood Sch\"atzung der Steigung einer Ursprungsgeraden.} \end{figure} @@ -165,33 +169,34 @@ und setzen diese gleich Null: \end{eqnarray} Damit haben wir nun einen anlytischen Ausdruck f\"ur die Bestimmung der Steigung $\theta$ des Regressionsgeraden gewonnen. Ein -Gradientenabstieg ist f\"ur das Fitten der Geradensteigung also gar nicht -n\"otig. Das gilt allgemein f\"ur das Fitten von Koeffizienten von -linear kombinierten Basisfunktionen. Parameter die nichtlinear in -einer Funktion enthalten sind k\"onnen aber nicht analytisch aus den -Daten berechnet werden. Da bleibt dann nur auf numerische Verfahren -zur Optimierung der Kostenfunktion, wie z.B. der Gradientenabstieg, -zur\"uckzugreifen. +Gradientenabstieg ist f\"ur das Fitten der Geradensteigung also gar +nicht n\"otig. Das gilt allgemein f\"ur das Fitten von Koeffizienten +von linear kombinierten Basisfunktionen. Parameter, die nichtlinear in +einer Funktion enthalten sind, k\"onnen im Gegensatz dazu nicht +analytisch aus den Daten berechnet werden. F\"ur diesen Fall bleibt +dann nur auf numerische Verfahren zur Optimierung der Kostenfunktion, +wie z.B. der Gradientenabstieg, zur\"uckzugreifen. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Fits von Wahrscheinlichkeitsverteilungen} Zum Abschluss betrachten wir noch den Fall, bei dem wir die Parameter einer Wahrscheinlichkeitsdichtefunktion (z.B. Mittelwert und -Standardabweichung der Normalverteilung) an ein Datenset fitten wolle. +Standardabweichung der Normalverteilung) an ein Datenset fitten wollen. Ein erster Gedanke k\"onnte sein, die Wahrscheinlichkeitsdichtefunktion durch Minimierung des quadratischen -Abstands an ein Histogram der Daten zu fitten. Das ist aber aus +Abstands an ein Histogramm der Daten zu fitten. Das ist aber aus folgenden Gr\"unden nicht die Methode der Wahl: (i) Wahrscheinlichkeitsdichten k\"onnen nur positiv sein. Darum k\"onnen insbesondere bei kleinen Werten die Daten nicht symmetrisch streuen, -wie es normalverteilte Daten machen sollten. (ii) Die Datenwerte sind -nicht unabh\"angig, da das normierte Histogram sich zu Eins -aufintegriert. Die beiden Annahmen normalverteilte und unabh\"angige Daten -die die Minimierung des quadratischen Abstands zu einem Maximum -Likelihood Estimator machen sind also verletzt. (iii) Das Histgramm -h\"angt von der Wahl der Klassenbreite ab. +wie es bei normalverteilten Daten der Fall ist. (ii) Die Datenwerte +sind nicht unabh\"angig, da das normierte Histogram sich zu Eins +aufintegriert. Die beiden Annahmen normalverteilte und unabh\"angige +Daten, die die Minimierung des quadratischen Abstands +\eqnref{chisqmin} zu einem Maximum-Likelihood Sch\"atzer machen, sind +also verletzt. (iii) Das Histogramm h\"angt von der Wahl der +Klassenbreite ab. Den direkten Weg, eine Wahrscheinlichkeitsdichtefunktion an ein Datenset zu fitten, haben wir oben schon bei dem Beispiel zur @@ -204,9 +209,10 @@ z.B. dem Gradientenabstieg, gel\"ost wird. \begin{figure}[t] \includegraphics[width=1\textwidth]{mlepdf} - \caption{\label{mlepdffig} Maximum Likelihood Estimation einer - Wahrscheinlichkeitsdichtefunktion. Links: die 100 Datenpunkte, die aus der Gammaverteilung - 2. Ordnung (rot) gezogen worden sind. Der Maximum-Likelihood-Fit ist orange dargestellt. - Rechts: das normierte Histogramm der Daten zusammen mit der \"uber Minimierung - des quadratischen Abstands zum Histogramm berechneten Fits ist potentiell schlechter.} + \caption{\label{mlepdffig} Maximum-Likelihood Sch\"atzung einer + Wahrscheinlichkeitsdichtefunktion. Links: die 100 Datenpunkte, die + aus der Gammaverteilung 2. Ordnung (rot) gezogen worden sind. Der + Maximum-Likelihood-Fit ist orange dargestellt. Rechts: das + normierte Histogramm der Daten zusammen mit dem \"uber Minimierung + des quadratischen Abstands zum Histogramm berechneten Fit.} \end{figure} diff --git a/pointprocesses/exercises/Makefile b/pointprocesses/exercises/Makefile new file mode 100644 index 0000000..0b63fb9 --- /dev/null +++ b/pointprocesses/exercises/Makefile @@ -0,0 +1,35 @@ +BASENAME=pointprocesses +TEXFILES=$(wildcard $(BASENAME)??.tex) +EXERCISES=$(TEXFILES:.tex=.pdf) +SOLUTIONS=$(EXERCISES:pointprocesses%=pointprocesses-solutions%) + +.PHONY: pdf exercises solutions watch watchexercises watchsolutions clean + +pdf : $(SOLUTIONS) $(EXERCISES) + +exercises : $(EXERCISES) + +solutions : $(SOLUTIONS) + +$(SOLUTIONS) : pointprocesses-solutions%.pdf : pointprocesses%.tex instructions.tex + { echo "\\documentclass[answers,12pt,a4paper,pdftex]{exam}"; sed -e '1d' $<; } > $(patsubst %.pdf,%.tex,$@) + pdflatex -interaction=scrollmode $(patsubst %.pdf,%.tex,$@) | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex -interaction=scrollmode $(patsubst %.pdf,%.tex,$@) || true + rm $(patsubst %.pdf,%,$@).[!p]* + +$(EXERCISES) : %.pdf : %.tex instructions.tex + pdflatex -interaction=scrollmode $< | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex -interaction=scrollmode $< || true + +watch : + while true; do ! make -q pdf && make pdf; sleep 0.5; done + +watchexercises : + while true; do ! make -q exercises && make exercises; sleep 0.5; done + +watchsolutions : + while true; do ! make -q solutions && make solutions; sleep 0.5; done + +clean : + rm -f *~ *.aux *.log *.out + +cleanup : clean + rm -f $(SOLUTIONS) $(EXERCISES) diff --git a/pointprocesses/exercises/instructions.tex b/pointprocesses/exercises/instructions.tex new file mode 100644 index 0000000..96ac4bc --- /dev/null +++ b/pointprocesses/exercises/instructions.tex @@ -0,0 +1,11 @@ +\vspace*{-6.5ex} +\begin{center} +\textbf{\Large Einf\"uhrung in die wissenschaftliche Datenverarbeitung}\\[1ex] +{\large Jan Grewe, Jan Benda}\\[-3ex] +Abteilung Neuroethologie \hfill --- \hfill Institut f\"ur Neurobiologie \hfill --- \hfill \includegraphics[width=0.28\textwidth]{UT_WBMW_Black_RGB} \\ +\end{center} + +\ifprintanswers% +\else + +\fi diff --git a/pointprocesses/exercises/pointprocesses01.tex b/pointprocesses/exercises/pointprocesses01.tex new file mode 100644 index 0000000..b4d927c --- /dev/null +++ b/pointprocesses/exercises/pointprocesses01.tex @@ -0,0 +1,202 @@ +\documentclass[12pt,a4paper,pdftex]{exam} + +\usepackage[german]{babel} +\usepackage{pslatex} +\usepackage[mediumspace,mediumqspace,Gray]{SIunits} % \ohm, \micro +\usepackage{xcolor} +\usepackage{graphicx} +\usepackage[breaklinks=true,bookmarks=true,bookmarksopen=true,pdfpagemode=UseNone,pdfstartview=FitH,colorlinks=true,citecolor=blue]{hyperref} + +%%%%% layout %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage[left=20mm,right=20mm,top=25mm,bottom=25mm]{geometry} +\pagestyle{headandfoot} +\ifprintanswers +\newcommand{\stitle}{: L\"osungen} +\else +\newcommand{\stitle}{} +\fi +\header{{\bfseries\large \"Ubung 6\stitle}}{{\bfseries\large Statistik}}{{\bfseries\large 27. Oktober, 2015}} +\firstpagefooter{Prof. Dr. Jan Benda}{Phone: 29 74573}{Email: +jan.benda@uni-tuebingen.de} +\runningfooter{}{\thepage}{} + +\setlength{\baselineskip}{15pt} +\setlength{\parindent}{0.0cm} +\setlength{\parskip}{0.3cm} +\renewcommand{\baselinestretch}{1.15} + +%%%%% listings %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{listings} +\lstset{ + language=Matlab, + basicstyle=\ttfamily\footnotesize, + numbers=left, + numberstyle=\tiny, + title=\lstname, + showstringspaces=false, + commentstyle=\itshape\color{darkgray}, + breaklines=true, + breakautoindent=true, + columns=flexible, + frame=single, + xleftmargin=1em, + xrightmargin=1em, + aboveskip=10pt +} + +%%%%% math stuff: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{amsmath} +\usepackage{amssymb} +\usepackage{bm} +\usepackage{dsfont} +\newcommand{\naZ}{\mathds{N}} +\newcommand{\gaZ}{\mathds{Z}} +\newcommand{\raZ}{\mathds{Q}} +\newcommand{\reZ}{\mathds{R}} +\newcommand{\reZp}{\mathds{R^+}} +\newcommand{\reZpN}{\mathds{R^+_0}} +\newcommand{\koZ}{\mathds{C}} + +%%%%% page breaks %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\newcommand{\continue}{\ifprintanswers% +\else +\vfill\hspace*{\fill}$\rightarrow$\newpage% +\fi} +\newcommand{\continuepage}{\ifprintanswers% +\newpage +\else +\vfill\hspace*{\fill}$\rightarrow$\newpage% +\fi} +\newcommand{\newsolutionpage}{\ifprintanswers% +\newpage% +\else +\fi} + +%%%%% new commands %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\newcommand{\qt}[1]{\textbf{#1}\\} +\newcommand{\pref}[1]{(\ref{#1})} +\newcommand{\extra}{--- Zusatzaufgabe ---\ \mbox{}} +\newcommand{\code}[1]{\texttt{#1}} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{document} + +\input{instructions} + + +\begin{questions} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\question \qt{Homogeneous Poisson process} +We use the Poisson process to generate spike trains on which we can test and imrpove some +standard analysis functions. + +A homogeneous Poisson process of rate $\lambda$ (measured in Hertz) is a point process +where the probability of an event is independent of time $t$ and independent of previous events. +The probability $P$ of an event within a bin of width $\Delta t$ is +\[ P = \lambda \cdot \Delta t \] +for sufficiently small $\Delta t$. +\begin{parts} + + \part Write a function that generates $n$ homogeneous Poisson spike trains of a given duration $T_{max}$ + with rate $\lambda$. + \begin{solution} + \lstinputlisting{hompoissonspikes.m} + \end{solution} + + \part Using this function, generate a few trials and display them in a raster plot. + \begin{solution} + \lstinputlisting{../code/spikeraster.m} + \begin{lstlisting} + spikes = hompoissonspikes( 10, 100.0, 0.5 ); + spikeraster( spikes ) + \end{lstlisting} + \mbox{}\\[-3ex] + \colorbox{white}{\includegraphics[width=0.7\textwidth]{poissonraster100hz}} + \end{solution} + + \part Write a function that extracts a single vector of interspike intervals + from the spike times returned by the first function. + \begin{solution} + \lstinputlisting{../code/isis.m} + \end{solution} + + \part Write a function that plots the interspike-interval histogram + from a vector of interspike intervals. The function should also + compute the mean, the standard deviation, and the CV of the intervals + and display the values in the plot. + \begin{solution} + \lstinputlisting{../code/isihist.m} + \end{solution} + + \part Compute histograms for Poisson spike trains with rate + $\lambda=100$\,Hz. Play around with $T_{max}$ and $n$ and the bin width + (start with 1\,ms) of the histogram. + How many + interspike intervals do you approximately need to get a ``nice'' + histogram? How long do you need to record from the neuron? + \begin{solution} + About 5000 intervals for 25 bins. This corresponds to a $5000 / 100\,\hertz = 50\,\second$ recording + of a neuron firing with 100\,\hertz. + \end{solution} + + \part Compare the histogram with the true distribution of intervals $T$ of the Poisson process + \[ p(T) = \lambda e^{-\lambda T} \] + for various rates $\lambda$. + \begin{solution} + \lstinputlisting{hompoissonisih.m} + \colorbox{white}{\includegraphics[width=0.48\textwidth]{poissonisih100hz}} + \colorbox{white}{\includegraphics[width=0.48\textwidth]{poissonisih20hz}} + \end{solution} + + \part What happens if you make the bin width of the histogram smaller than $\Delta t$ + used for generating the Poisson spikes? + \begin{solution} + The bins between the discretization have zero entries. Therefore + the other ones become higher than they should be. + \end{solution} + + \part Plot the mean interspike interval, the corresponding standard deviation, and the CV + as a function of the rate $\lambda$ of the Poisson process. + Compare the ../code with the theoretical expectations for the dependence on $\lambda$. + \begin{solution} + \lstinputlisting{hompoissonisistats.m} + \colorbox{white}{\includegraphics[width=0.98\textwidth]{poissonisistats}} + \end{solution} + + \part Write a function that computes serial correlations for the interspike intervals + for a range of lags. + The serial correlations $\rho_k$ at lag $k$ are defined as + \[ \rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)} \] + Use this function to show that interspike intervals of Poisson spikes are independent. + \begin{solution} + \lstinputlisting{../code/isiserialcorr.m} + \colorbox{white}{\includegraphics[width=0.98\textwidth]{poissonserial100hz}} + \end{solution} + + \part Write a function that generates from spike times + a histogram of spike counts in a count window of given duration $W$. + The function should also plot the Poisson distribution + \[ P(k) = \frac{(\lambda W)^ke^{\lambda W}}{k!} \] + for the rate $\lambda$ determined from the spike trains. + \begin{solution} + \lstinputlisting{../code/counthist.m} + \colorbox{white}{\includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz10ms}} + \colorbox{white}{\includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz100ms}} + \end{solution} + + \part Write a function that computes mean count, variance of count and the corresponding Fano factor + for a range of count window durations. The function should generate tow plots: one plotting + the count variance against the mean, the other one the Fano factor as a function of the window duration. + \begin{solution} + \lstinputlisting{../code/fano.m} + \colorbox{white}{\includegraphics[width=0.98\textwidth]{poissonfano100hz}} + \end{solution} + +\end{parts} + +\end{questions} + +\end{document} \ No newline at end of file diff --git a/pointprocesses/exercises/poisson.pdf b/pointprocesses/exercises/poisson.pdf deleted file mode 100644 index aa1756e..0000000 Binary files a/pointprocesses/exercises/poisson.pdf and /dev/null differ diff --git a/pointprocesses/exercises/poisson.tex b/pointprocesses/exercises/poisson.tex deleted file mode 100644 index 4b8f0f0..0000000 --- a/pointprocesses/exercises/poisson.tex +++ /dev/null @@ -1,160 +0,0 @@ -\documentclass[addpoints,10pt]{exam} -\usepackage{url} -\usepackage{color} -\usepackage{hyperref} -\usepackage{graphicx} - -\pagestyle{headandfoot} -\runningheadrule -\firstpageheadrule - -\firstpageheader{Scientific Computing}{Homogeneous Poisson process}{Oct 27, 2014} -%\runningheader{Homework 01}{Page \thepage\ of \numpages}{23. October 2014} -\firstpagefooter{}{}{} -\runningfooter{}{}{} -\pointsinmargin -\bracketedpoints - -%\printanswers -\shadedsolutions - -\usepackage[mediumspace,mediumqspace,Gray]{SIunits} % \ohm, \micro - -%%%%% listings %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\usepackage{listings} -\lstset{ - basicstyle=\ttfamily, - numbers=left, - showstringspaces=false, - language=Matlab, - breaklines=true, - breakautoindent=true, - columns=flexible, - frame=single, - captionpos=t, - xleftmargin=2em, - xrightmargin=1em, - aboveskip=10pt, - %title=\lstname, - title={\protect\filename@parse{\lstname}\protect\filename@base.\protect\filename@ext} - } - - -\begin{document} - -\sffamily -%%%%%%%%%%%%%% Questions %%%%%%%%%%%%%%%%%%%%%%%%% - -\begin{questions} - \question \textbf{Homogeneous Poisson process} - We use the Poisson process to generate spike trains on which we can test and imrpove some - standard analysis functions. - - A homogeneous Poisson process of rate $\lambda$ (measured in Hertz) is a point process - where the probability of an event is independent of time $t$ and independent of previous events. - The probability $P$ of an event within a bin of width $\Delta t$ is - \[ P = \lambda \cdot \Delta t \] - for sufficiently small $\Delta t$. - \begin{parts} - - \part Write a function that generates $n$ homogeneous Poisson spike trains of a given duration $T_{max}$ - with rate $\lambda$. - \begin{solution} - \lstinputlisting{hompoissonspikes.m} - \end{solution} - - \part Using this function, generate a few trials and display them in a raster plot. - \begin{solution} - \lstinputlisting{simulations/spikeraster.m} - \begin{lstlisting} -spikes = hompoissonspikes( 10, 100.0, 0.5 ); -spikeraster( spikes ) - \end{lstlisting} - \mbox{}\\[-3ex] - \colorbox{white}{\includegraphics[width=0.7\textwidth]{poissonraster100hz}} - \end{solution} - - \part Write a function that extracts a single vector of interspike intervals - from the spike times returned by the first function. - \begin{solution} - \lstinputlisting{simulations/isis.m} - \end{solution} - - \part Write a function that plots the interspike-interval histogram - from a vector of interspike intervals. The function should also - compute the mean, the standard deviation, and the CV of the intervals - and display the values in the plot. - \begin{solution} - \lstinputlisting{simulations/isihist.m} - \end{solution} - - \part Compute histograms for Poisson spike trains with rate - $\lambda=100$\,Hz. Play around with $T_{max}$ and $n$ and the bin width - (start with 1\,ms) of the histogram. - How many - interspike intervals do you approximately need to get a ``nice'' - histogram? How long do you need to record from the neuron? - \begin{solution} - About 5000 intervals for 25 bins. This corresponds to a $5000 / 100\,\hertz = 50\,\second$ recording - of a neuron firing with 100\,\hertz. - \end{solution} - - \part Compare the histogram with the true distribution of intervals $T$ of the Poisson process - \[ p(T) = \lambda e^{-\lambda T} \] - for various rates $\lambda$. - \begin{solution} - \lstinputlisting{hompoissonisih.m} - \colorbox{white}{\includegraphics[width=0.48\textwidth]{poissonisih100hz}} - \colorbox{white}{\includegraphics[width=0.48\textwidth]{poissonisih20hz}} - \end{solution} - - \part What happens if you make the bin width of the histogram smaller than $\Delta t$ - used for generating the Poisson spikes? - \begin{solution} - The bins between the discretization have zero entries. Therefore - the other ones become higher than they should be. - \end{solution} - - \part Plot the mean interspike interval, the corresponding standard deviation, and the CV - as a function of the rate $\lambda$ of the Poisson process. - Compare the simulations with the theoretical expectations for the dependence on $\lambda$. - \begin{solution} - \lstinputlisting{hompoissonisistats.m} - \colorbox{white}{\includegraphics[width=0.98\textwidth]{poissonisistats}} - \end{solution} - - \part Write a function that computes serial correlations for the interspike intervals - for a range of lags. - The serial correlations $\rho_k$ at lag $k$ are defined as - \[ \rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)} \] - Use this function to show that interspike intervals of Poisson spikes are independent. - \begin{solution} - \lstinputlisting{simulations/isiserialcorr.m} - \colorbox{white}{\includegraphics[width=0.98\textwidth]{poissonserial100hz}} - \end{solution} - - \part Write a function that generates from spike times - a histogram of spike counts in a count window of given duration $W$. - The function should also plot the Poisson distribution - \[ P(k) = \frac{(\lambda W)^ke^{\lambda W}}{k!} \] - for the rate $\lambda$ determined from the spike trains. - \begin{solution} - \lstinputlisting{simulations/counthist.m} - \colorbox{white}{\includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz10ms}} - \colorbox{white}{\includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz100ms}} - \end{solution} - - \part Write a function that computes mean count, variance of count and the corresponding Fano factor - for a range of count window durations. The function should generate tow plots: one plotting - the count variance against the mean, the other one the Fano factor as a function of the window duration. - \begin{solution} - \lstinputlisting{simulations/fano.m} - \colorbox{white}{\includegraphics[width=0.98\textwidth]{poissonfano100hz}} - \end{solution} - - \end{parts} - -\end{questions} - - -\end{document} diff --git a/pointprocesses/lecture/Makefile b/pointprocesses/lecture/Makefile index 7a243a5..4ac656e 100644 --- a/pointprocesses/lecture/Makefile +++ b/pointprocesses/lecture/Makefile @@ -1,142 +1,70 @@ BASENAME=pointprocesses -TEXFILE=$(BASENAME).tex -DVIFILE=$(BASENAME).dvi -PSFILE=$(BASENAME).ps -PDFFILE=$(BASENAME).pdf - -FOILSFILE=foils.pdf -THUMBNAILSFILE=thumbnails.pdf - -HTMLBASENAME=$(BASENAME)h -HTMLTEXFILE=$(BASENAME)h.tex -HTMLDIR=$(BASENAME)h +PYFILES=$(wildcard *.py) +PYPDFFILES=$(PYFILES:.py=.pdf) GPTFILES=$(wildcard *.gpt) GPTTEXFILES=$(GPTFILES:.gpt=.tex) -all: ps pdf talk again watchps watchpdf foils thumbs html html1 epsfigs clean cleanup cleanplots help -.PHONY: epsfigs +all: pdf slides thumbs + +# script: +pdf : $(BASENAME)-chapter.pdf +$(BASENAME)-chapter.pdf : $(BASENAME)-chapter.tex $(BASENAME).tex $(GPTTEXFILES) $(PYPDFFILES) + pdflatex -interaction=scrollmode $< | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex -interaction=scrollmode $< || true +# slides: +slides: $(BASENAME)-slides.pdf +$(BASENAME)-slides.pdf : $(BASENAME)-slides.tex $(GPTTEXFILES) $(PYPDFFILES) + pdflatex -interaction=scrollmode $< | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex -interaction=scrollmode $< || true + # thumbnails: -thumbs: $(THUMBNAILSFILE) -$(THUMBNAILSFILE): $(TEXFILE) $(GPTTEXFILES) +thumbs: $(BASENAME)-handout.pdf +$(BASENAME)-handout.pdf: $(BASENAME)-slides.tex $(GPTTEXFILES) sed -e 's/setboolean{presentation}{true}/setboolean{presentation}{false}/; s/usepackage{crop}/usepackage[frame]{crop}/' $< > thumbsfoils.tex pdflatex thumbsfoils | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex thumbsfoils || true - pdfnup --nup 2x4 --no-landscape --paper a4paper --trim "-1cm -1cm -1cm -1cm" --outfile $@ thumbsfoils.pdf '1-19' + pdfnup --nup 2x4 --no-landscape --paper a4paper --trim "-1cm -1cm -1cm -1cm" --outfile $@ thumbsfoils.pdf # 1-19 rm thumbsfoils.* -# transparencies: -foils: $(FOILSFILE) -$(FOILSFILE): $(TEXFILE) $(GPTTEXFILES) - sed -e 's/setboolean{presentation}{true}/setboolean{presentation}{false}/' $< > tfoils.tex - pdflatex tfoils | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex tfoils || true - pdfnup --nup 1x2 --orient portrait --trim "-1mm -1mm -1mm -1mm" --frame true --delta "1cm 1cm" --paper a4paper --outfile tfoils2.pdf tfoils.pdf - pdfnup --nup 1x1 --orient portrait --trim "-2cm -2cm -2cm -2cm" --paper a4paper --outfile $@ tfoils2.pdf - rm tfoils.* tfoils2.pdf - -# talk: -talk: $(PDFFILE) -pdf: $(PDFFILE) -$(PDFFILE): $(TEXFILE) $(GPTTEXFILES) - pdflatex -interaction=scrollmode $< | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex -interaction=scrollmode $< || true -# batchmode (no output, no stop on error) -# nonstopmode / scrollmode (no stop on error) -# errorstopmode (stop on error) - - -again : - pdflatex $(TEXFILE) - watchpdf : while true; do ! make -q pdf && make pdf; sleep 0.5; done -# html -html : $(HTMLTEXFILE) $(GPTTEXFILES) - rm -f $(HTMLDIR)/* - htlatex $< - mkdir -p $(HTMLDIR) - mv $(HTMLBASENAME).html $(HTMLDIR) - mv $(HTMLBASENAME)*.* $(HTMLDIR) - mv z*.gif $(HTMLDIR) - cd $(HTMLDIR); for i in *.gif; do convert -page +0+0 $$i tmp.gif; mv tmp.gif $$i; done; rmtex $(HTMLBASENAME) - -#$(HTMLTEXFILE) : $(TEXFILE) Makefile -# sed 's/setboolean{html}{false}/setboolean{html}{true}/; s/\\colorbox{white}{\(.*\)}/\1/g' $< > $@ - -html1 : $(HTMLTEXFILE) $(GPTTEXFILES) - latex2html -dir $(HTMLDIR) -mkdir -subdir -nonavigation -noinfo -image_type png -notransparent -white -split 0 $< - sed 's-Date:--' $(HTMLDIR)/$(HTMLDIR).html > tmp.html - cp tmp.html $(HTMLDIR)/index.html - mv tmp.html $(HTMLDIR)/$(HTMLDIR).html - -$(HTMLTEXFILE) : $(TEXFILE) - sed '/^%nohtml/,/^%endnohtml/d; s/\\colorbox{white}{\(.*\)}/\1/g' $< > $@ - - -# eps of all figures: -epsfigs: - mkdir -p epsfigs; \ - for i in $(GPTFILES); do \ - { sed -n -e '1,/\\begin{document}/p' $(TEXFILE); echo "\texpicture{$${i%%.*}}"; echo "\end{document}"; } > tmp.tex; \ - latex tmp.tex; \ - dvips tmp.dvi; \ - ps2eps tmp.ps; \ - mv tmp.eps epsfigs/$${i%%.*}.eps; \ - rm tmp.*; \ - done - - -# plots: -%.tex: %.gpt whitestyles.gp +watchslides : + while true; do ! make -q slides && make slides; sleep 0.5; done + +# python plots: +$(PYPDFFILES) : %.pdf: %.py + python $< + +# gnuplot plots: +$(GPTTEXFILES) : %.tex: %.gpt whitestyles.gp gnuplot whitestyles.gp $< epstopdf $*.eps clean : - rm -f *~ - rmtex $(BASENAME) - rm -f $(GPTTEXFILES) - -cleanup : - rm -f *~ - rmtex $(BASENAME) - rm -f $(PSFILE) $(PDFFILE) $(FOILSFILE) $(THUMBNAILSFILE) - rm -f $(GPTTEXFILES) - rm -f -r $(HTMLDIR) - - -cleanplots : - sed -n -e '/\\begin{document}/,/\\end{document}/p' $(TEXFILE) | fgrep '\input{' | grep -v '^%' | sed 's/.*input{\(.*\).tex}.*/\1.gpt/' > plot.fls - mkdir -p unusedplots - for i in *.gp*; do \ - grep -q $$i plot.fls || { grep -q $$i $$(> plot.fls - for i in $$( dat.fls - mkdir -p unuseddata - for i in *.dat; do \ - grep -q $$i dat.fls || mv $$i unuseddata; \ - done - rm dat.fls plot.fls + rm -f *~ + rm -f $(BASENAME).aux $(BASENAME).log + rm -f $(BASENAME)-chapter.aux $(BASENAME)-chapter.log $(BASENAME)-chapter.out + rm -f $(BASENAME)-slides.aux $(BASENAME)-slides.log $(BASENAME)-slides.out $(BASENAME)-slides.toc $(BASENAME)-slides.nav $(BASENAME)-slides.snm $(BASENAME)-slides.vrb + rm -f $(PYPDFFILES) $(GPTTEXFILES) + +cleanall : clean + rm -f $(BASENAME)-chapter.pdf $(BASENAME)-slides.pdf $(BASENAME)-handout.pdf help : @echo -e \ - "make pdf: make the pdf file of the talk.\n"\ - "make foils: make black&white postscript foils of the talk.\n"\ - "make thumbs: make color thumbnails of the talk.\n"\ - "make again: run latex and make the pdf file of the talk,\n"\ - " no matter whether you changed the .tex file or not.\n\n"\ - "make watchpdf: make the pdf file of the talk\n"\ + "make pdf: make the pdf file of the script.\n"\ + "make slides: make the pdf file of the slides.\n"\ + "make thumbs: make color thumbnails of the talk.\n"\ + "make watchpdf: make the pdf file of the script\n"\ + " whenever the tex file is modified.\n"\ + "make watchpdf: make the pdf file of the slides\n"\ " whenever the tex file is modified.\n"\ - "make html: make a html version of the paper (in $(HTMLDIR)).\n\n"\ "make clean: remove all intermediate files,\n"\ - " just leave the source files and the final .ps and .pdf files.\n"\ + " just leave the source files and the final .pdf files.\n"\ "make cleanup: remove all intermediate files as well as\n"\ - " the final .ps and .pdf files.\n"\ - "make cleanplots: move all unused .gpt and .dat files\n"\ - " into unusedplots/ and unuseddata/, respectively." + " the final .pdf files.\n"\ diff --git a/pointprocesses/lecture/isihexamples.py b/pointprocesses/lecture/isihexamples.py new file mode 100644 index 0000000..9d50fbc --- /dev/null +++ b/pointprocesses/lecture/isihexamples.py @@ -0,0 +1,101 @@ +import numpy as np +import matplotlib.pyplot as plt + +def hompoisson(rate, trials, duration) : + spikes = [] + for k in range(trials) : + times = [] + t = 0.0 + while t < duration : + t += np.random.exponential(1/rate) + times.append( t ) + spikes.append( times ) + return spikes + +def inhompoisson(rate, trials, dt) : + spikes = [] + p = rate*dt + for k in range(trials) : + x = np.random.rand(len(rate)) + times = dt*np.nonzero(x= vthresh : + v = vreset + times.append(k*dt) + spikes.append( times ) + return spikes + +def isis( spikes ) : + isi = [] + for k in xrange(len(spikes)) : + isi.extend(np.diff(spikes[k])) + return isi + +def plotisih( ax, isis, binwidth=None ) : + if binwidth == None : + nperbin = 200.0 # average number of isis per bin + bins = len(isis)/nperbin # number of bins + binwidth = np.max(isis)/bins + if binwidth < 5e-4 : # half a millisecond + binwidth = 5e-4 + h, b = np.histogram(isis, np.arange(0.0, np.max(isis)+binwidth, binwidth), density=True) + ax.text(0.9, 0.85, 'rate={:.0f}Hz'.format(1.0/np.mean(isis)), ha='right', transform=ax.transAxes) + ax.text(0.9, 0.75, 'mean={:.0f}ms'.format(1000.0*np.mean(isis)), ha='right', transform=ax.transAxes) + ax.text(0.9, 0.65, 'CV={:.2f}'.format(np.std(isis)/np.mean(isis)), ha='right', transform=ax.transAxes) + ax.set_xlabel('ISI [ms]') + ax.set_ylabel('p(ISI) [1/s]') + ax.bar( 1000.0*b[:-1], h, 1000.0*np.diff(b) ) + +# parameter: +rate = 20.0 +drate = 50.0 +trials = 10 +duration = 100.0 +dt = 0.001 +tau = 0.1; + +# homogeneous spike trains: +homspikes = hompoisson(rate, trials, duration) + +# OU noise: +rng = np.random.RandomState(54637281) +time = np.arange(0.0, duration, dt) +x = np.zeros(time.shape)+rate +n = rng.randn(len(time))*drate*tau/np.sqrt(dt)+rate +for k in xrange(1,len(x)) : + x[k] = x[k-1] + (n[k]-x[k-1])*dt/tau +x[x<0.0] = 0.0 + +# pif spike trains: +inhspikes = pifspikes(x, trials, dt, D=0.3) + +fig = plt.figure( figsize=(9,4) ) +ax = fig.add_subplot(1, 2, 1) +ax.set_title('stationary') +ax.set_xlim(0.0, 200.0) +ax.set_ylim(0.0, 40.0) +plotisih(ax, isis(homspikes)) + +ax = fig.add_subplot(1, 2, 2) +ax.set_title('non-stationary') +ax.set_xlim(0.0, 200.0) +ax.set_ylim(0.0, 40.0) +plotisih(ax, isis(inhspikes)) + +plt.tight_layout() +plt.savefig('isihexamples.pdf') +plt.show() diff --git a/pointprocesses/lecture/pointprocesses-chapter.tex b/pointprocesses/lecture/pointprocesses-chapter.tex new file mode 100644 index 0000000..8ecbb10 --- /dev/null +++ b/pointprocesses/lecture/pointprocesses-chapter.tex @@ -0,0 +1,271 @@ +\documentclass[12pt]{report} + +%%%%% title %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\title{\tr{Introduction to Scientific Computing}{Einf\"uhrung in die wissenschaftliche Datenverarbeitung}} +\author{Jan Benda\\Abteilung Neuroethologie\\[2ex]\includegraphics[width=0.3\textwidth]{UT_WBMW_Rot_RGB}} +\date{WS 15/16} + +%%%% language %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% \newcommand{\tr}[2]{#1} % en +% \usepackage[english]{babel} +\newcommand{\tr}[2]{#2} % de +\usepackage[german]{babel} + +%%%%% packages %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{pslatex} % nice font for pdf file +\usepackage[breaklinks=true,bookmarks=true,bookmarksopen=true,pdfpagemode=UseNone,pdfstartview=FitH,colorlinks=true,citecolor=blue]{hyperref} + +%%%% layout %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage[left=25mm,right=25mm,top=20mm,bottom=30mm]{geometry} +\setcounter{tocdepth}{1} + +%%%%% section style %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage[sf,bf,it,big,clearempty]{titlesec} +\setcounter{secnumdepth}{1} + + +%%%%% units %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage[mediumspace,mediumqspace,Gray]{SIunits} % \ohm, \micro + + +%%%%% figures %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{graphicx} +\usepackage{xcolor} +\pagecolor{white} + +\newcommand{\ruler}{\par\noindent\setlength{\unitlength}{1mm}\begin{picture}(0,6)% + \put(0,4){\line(1,0){170}}% + \multiput(0,2)(10,0){18}{\line(0,1){4}}% + \multiput(0,3)(1,0){170}{\line(0,1){2}}% + \put(0,0){\makebox(0,0){{\tiny 0}}}% + \put(10,0){\makebox(0,0){{\tiny 1}}}% + \put(20,0){\makebox(0,0){{\tiny 2}}}% + \put(30,0){\makebox(0,0){{\tiny 3}}}% + \put(40,0){\makebox(0,0){{\tiny 4}}}% + \put(50,0){\makebox(0,0){{\tiny 5}}}% + \put(60,0){\makebox(0,0){{\tiny 6}}}% + \put(70,0){\makebox(0,0){{\tiny 7}}}% + \put(80,0){\makebox(0,0){{\tiny 8}}}% + \put(90,0){\makebox(0,0){{\tiny 9}}}% + \put(100,0){\makebox(0,0){{\tiny 10}}}% + \put(110,0){\makebox(0,0){{\tiny 11}}}% + \put(120,0){\makebox(0,0){{\tiny 12}}}% + \put(130,0){\makebox(0,0){{\tiny 13}}}% + \put(140,0){\makebox(0,0){{\tiny 14}}}% + \put(150,0){\makebox(0,0){{\tiny 15}}}% + \put(160,0){\makebox(0,0){{\tiny 16}}}% + \put(170,0){\makebox(0,0){{\tiny 17}}}% + \end{picture}\par} + +% figures: +\setlength{\fboxsep}{0pt} +\newcommand{\texpicture}[1]{{\sffamily\footnotesize\input{#1.tex}}} +%\newcommand{\texpicture}[1]{\fbox{\sffamily\footnotesize\input{#1.tex}}} +%\newcommand{\texpicture}[1]{\setlength{\fboxsep}{2mm}\fbox{#1}} +%\newcommand{\texpicture}[1]{} +\newcommand{\figlabel}[1]{\textsf{\textbf{\large \uppercase{#1}}}} + +% maximum number of floats: +\setcounter{topnumber}{2} +\setcounter{bottomnumber}{0} +\setcounter{totalnumber}{2} + +% float placement fractions: +\renewcommand{\textfraction}{0.2} +\renewcommand{\topfraction}{0.8} +\renewcommand{\bottomfraction}{0.0} +\renewcommand{\floatpagefraction}{0.5} + +% spacing for floats: +\setlength{\floatsep}{12pt plus 2pt minus 2pt} +\setlength{\textfloatsep}{20pt plus 4pt minus 2pt} +\setlength{\intextsep}{12pt plus 2pt minus 2pt} + +% spacing for a floating page: +\makeatletter + \setlength{\@fptop}{0pt} + \setlength{\@fpsep}{8pt plus 2.0fil} + \setlength{\@fpbot}{0pt plus 1.0fil} +\makeatother + +% rules for floats: +\newcommand{\topfigrule}{\vspace*{10pt}{\hrule height0.4pt}\vspace*{-10.4pt}} +\newcommand{\bottomfigrule}{\vspace*{-10.4pt}{\hrule height0.4pt}\vspace*{10pt}} + +% captions: +\usepackage[format=plain,singlelinecheck=off,labelfont=bf,font={small,sf}]{caption} + +% put caption on separate float: +\newcommand{\breakfloat}{\end{figure}\begin{figure}[t]} + +% references to panels of a figure within the caption: +\newcommand{\figitem}[1]{\textsf{\bfseries\uppercase{#1}}} +% references to figures: +\newcommand{\panel}[1]{\textsf{\uppercase{#1}}} +\newcommand{\fref}[1]{\textup{\ref{#1}}} +\newcommand{\subfref}[2]{\textup{\ref{#1}}\,\panel{#2}} +% references to figures in normal text: +\newcommand{\fig}{Fig.} +\newcommand{\Fig}{Figure} +\newcommand{\figs}{Figs.} +\newcommand{\Figs}{Figures} +\newcommand{\figref}[1]{\fig~\fref{#1}} +\newcommand{\Figref}[1]{\Fig~\fref{#1}} +\newcommand{\figsref}[1]{\figs~\fref{#1}} +\newcommand{\Figsref}[1]{\Figs~\fref{#1}} +\newcommand{\subfigref}[2]{\fig~\subfref{#1}{#2}} +\newcommand{\Subfigref}[2]{\Fig~\subfref{#1}{#2}} +\newcommand{\subfigsref}[2]{\figs~\subfref{#1}{#2}} +\newcommand{\Subfigsref}[2]{\Figs~\subfref{#1}{#2}} +% references to figures within bracketed text: +\newcommand{\figb}{Fig.} +\newcommand{\figsb}{Figs.} +\newcommand{\figrefb}[1]{\figb~\fref{#1}} +\newcommand{\figsrefb}[1]{\figsb~\fref{#1}} +\newcommand{\subfigrefb}[2]{\figb~\subfref{#1}{#2}} +\newcommand{\subfigsrefb}[2]{\figsb~\subfref{#1}{#2}} + +% references to tables: +\newcommand{\tref}[1]{\textup{\ref{#1}}} +% references to tables in normal text: +\newcommand{\tab}{Tab.} +\newcommand{\Tab}{Table} +\newcommand{\tabs}{Tabs.} +\newcommand{\Tabs}{Tables} +\newcommand{\tabref}[1]{\tab~\tref{#1}} +\newcommand{\Tabref}[1]{\Tab~\tref{#1}} +\newcommand{\tabsref}[1]{\tabs~\tref{#1}} +\newcommand{\Tabsref}[1]{\Tabs~\tref{#1}} +% references to tables within bracketed text: +\newcommand{\tabb}{Tab.} +\newcommand{\tabsb}{Tab.} +\newcommand{\tabrefb}[1]{\tabb~\tref{#1}} +\newcommand{\tabsrefb}[1]{\tabsb~\tref{#1}} + + +%%%%% equation references %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%\newcommand{\eqref}[1]{(\ref{#1})} +\newcommand{\eqn}{\tr{Eq}{Gl}.} +\newcommand{\Eqn}{\tr{Eq}{Gl}.} +\newcommand{\eqns}{\tr{Eqs}{Gln}.} +\newcommand{\Eqns}{\tr{Eqs}{Gln}.} +\newcommand{\eqnref}[1]{\eqn~\eqref{#1}} +\newcommand{\Eqnref}[1]{\Eqn~\eqref{#1}} +\newcommand{\eqnsref}[1]{\eqns~\eqref{#1}} +\newcommand{\Eqnsref}[1]{\Eqns~\eqref{#1}} + + +%%%%% listings %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{listings} +\lstset{ + inputpath=../code, + basicstyle=\ttfamily\footnotesize, + numbers=left, + showstringspaces=false, + language=Matlab, + commentstyle=\itshape\color{darkgray}, + keywordstyle=\color{blue}, + stringstyle=\color{green}, + backgroundcolor=\color{blue!10}, + breaklines=true, + breakautoindent=true, + columns=flexible, + frame=single, + caption={\protect\filename@parse{\lstname}\protect\filename@base}, + captionpos=t, + xleftmargin=1em, + xrightmargin=1em, + aboveskip=10pt +} + +%%%%% math stuff: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{amsmath} +\usepackage{bm} +\usepackage{dsfont} +\newcommand{\naZ}{\mathds{N}} +\newcommand{\gaZ}{\mathds{Z}} +\newcommand{\raZ}{\mathds{Q}} +\newcommand{\reZ}{\mathds{R}} +\newcommand{\reZp}{\mathds{R^+}} +\newcommand{\reZpN}{\mathds{R^+_0}} +\newcommand{\koZ}{\mathds{C}} + + +%%%%% structure: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{ifthen} + +\newcommand{\code}[1]{\texttt{#1}} + +\newcommand{\source}[1]{ + \begin{flushright} + \color{gray}\scriptsize \url{#1} + \end{flushright} +} + +\newenvironment{definition}[1][]{\medskip\noindent\textbf{Definition}\ifthenelse{\equal{#1}{}}{}{ #1}:\newline}% + {\medskip} + +\newcounter{maxexercise} +\setcounter{maxexercise}{9} % show listings up to exercise maxexercise +\newcounter{theexercise} +\setcounter{theexercise}{1} +\newenvironment{exercise}[1][]{\medskip\noindent\textbf{\tr{Exercise}{\"Ubung} + \arabic{theexercise}:}\newline \newcommand{\exercisesource}{#1}}% + {\ifthenelse{\equal{\exercisesource}{}}{}{\ifthenelse{\value{theexercise}>\value{maxexercise}}{}{\medskip\lstinputlisting{\exercisesource}}}\medskip\stepcounter{theexercise}} + +\graphicspath{{figures/}} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{document} + +\include{pointprocesses} + +\end{document} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{\tr{Homogeneous Poisson process}{Homogener Poisson Prozess}} + +\begin{figure}[t] + \includegraphics[width=1\textwidth]{poissonraster100hz} + \caption{\label{hompoissonfig}Rasterplot von Poisson-Spikes.} +\end{figure} + +The probability $p(t)\delta t$ of an event occuring at time $t$ +is independent of $t$ and independent of any previous event +(independent of event history). + +The probability $P$ for an event occuring within a time bin of width $\Delta t$ +is +\[ P=\lambda \cdot \Delta t \] +for a Poisson process with rate $\lambda$. + +\subsection{Statistics of homogeneous Poisson process} + +\begin{figure}[t] + \includegraphics[width=0.45\textwidth]{poissonisihexp20hz}\hfill + \includegraphics[width=0.45\textwidth]{poissonisihexp100hz} + \caption{\label{hompoissonisihfig}Interspike interval histograms of poisson spike train.} +\end{figure} + +\begin{itemize} +\item Exponential distribution of intervals $T$: $p(T) = \lambda e^{-\lambda T}$ +\item Mean interval $\mu_{ISI} = \frac{1}{\lambda}$ +\item Variance of intervals $\sigma_{ISI}^2 = \frac{1}{\lambda^2}$ +\item Coefficient of variation $CV_{ISI} = 1$ +\item Serial correlation $\rho_k =0$ for $k>0$ (renewal process!) +\item Fano factor $F=1$ +\end{itemize} + +\subsection{Count statistics of Poisson process} + +\begin{figure}[t] + \includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz10ms}\hfill + \includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz100ms} + \caption{\label{hompoissoncountfig}Count statistics of poisson spike train.} +\end{figure} + +Poisson distribution: +\[ P(k) = \frac{(\lambda W)^ke^{\lambda W}}{k!} \] \ No newline at end of file diff --git a/pointprocesses/lecture/pointprocesses-slides.tex b/pointprocesses/lecture/pointprocesses-slides.tex new file mode 100644 index 0000000..ff49cb2 --- /dev/null +++ b/pointprocesses/lecture/pointprocesses-slides.tex @@ -0,0 +1,412 @@ +\documentclass{beamer} + +%%%%% title %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\title[]{Scientific Computing --- Point Processes} +\author[]{Jan Benda} +\institute[]{Neuroethology} +\date[]{WS 14/15} +\titlegraphic{\includegraphics[width=0.3\textwidth]{UT_WBMW_Rot_RGB}} + +%%%%% beamer %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\mode +{ + \usetheme{Singapore} + \setbeamercovered{opaque} + \usecolortheme{tuebingen} + \setbeamertemplate{navigation symbols}{} + \usefonttheme{default} + \useoutertheme{infolines} + % \useoutertheme{miniframes} +} + +%\AtBeginSection[] +%{ +% \begin{frame} +% \begin{center} +% \Huge \insertsectionhead +% \end{center} +% \end{frame} +%} + +\setbeamertemplate{blocks}[rounded][shadow=true] +\setcounter{tocdepth}{1} + +%%%%% packages %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage[english]{babel} +\usepackage{amsmath} +\usepackage{bm} +\usepackage{pslatex} % nice font for pdf file +%\usepackage{multimedia} + +\usepackage{dsfont} +\newcommand{\naZ}{\mathds{N}} +\newcommand{\gaZ}{\mathds{Z}} +\newcommand{\raZ}{\mathds{Q}} +\newcommand{\reZ}{\mathds{R}} +\newcommand{\reZp}{\mathds{R^+}} +\newcommand{\reZpN}{\mathds{R^+_0}} +\newcommand{\koZ}{\mathds{C}} + +%%%% graphics %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{graphicx} +\newcommand{\texpicture}[1]{{\sffamily\small\input{#1.tex}}} + +%%%%% listings %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{listings} +\lstset{ + basicstyle=\ttfamily, + numbers=left, + showstringspaces=false, + language=Matlab, + commentstyle=\itshape\color{darkgray}, + keywordstyle=\color{blue}, + stringstyle=\color{green}, + backgroundcolor=\color{blue!10}, + breaklines=true, + breakautoindent=true, + columns=flexible, + frame=single, + captionpos=b, + xleftmargin=1em, + xrightmargin=1em, + aboveskip=10pt + } + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{document} + +\begin{frame}[plain] + \frametitle{} + \vspace{-1cm} + \titlepage % erzeugt Titelseite +\end{frame} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{frame} + \frametitle{Content} + \tableofcontents +\end{frame} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Point processes} + +\begin{frame} + \frametitle{Point process} + \vspace{-3ex} + \texpicture{pointprocessscetchA} + + A point process is a stochastic (or random) process that generates a sequence of events + at times $\{t_i\}$, $t_i \in \reZ$. + + For each point process there is an underlying continuous-valued + process evolving in time. The associated point process occurs when + the underlying continuous process crosses a threshold. + Examples: + \begin{itemize} + \item Spikes/heartbeat: generated by the dynamics of the membrane potential of neurons/heart cells. + \item Earth quakes: generated by the pressure dynamics between the tectonic plates on either side of a geological fault line. + \item Onset of cricket/frogs/birds/... songs: generated by the dynamics of the state of a nervous system. + \end{itemize} +\end{frame} + +\begin{frame} + \frametitle{Point process} + \texpicture{pointprocessscetchB} +\end{frame} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Homogeneous Poisson process} + +\begin{frame} + \frametitle{Homogeneous Poisson process} + The probability $p(t)\delta t$ of an event occuring at time $t$ + is independent of $t$ and independent of any previous event + (independent of event history). + + The probability $P$ for an event occuring within a time bin of width $\Delta t$ + is + \[ P=\lambda \cdot \Delta t \] + for a Poisson process with rate $\lambda$. + \includegraphics[width=1\textwidth]{poissonraster100hz} +\end{frame} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Interval statistics} + +\begin{frame} + \frametitle{Rate} + Rate of events $r$ (``spikes per time'') measured in Hertz. + \begin{itemize} + \item Number of events $N$ per observation time $W$: $r = \frac{N}{W}$ + \item Without boundary effects: $r = \frac{N-1}{t_N-t_1}$ + \item Inverse interval: $r = \frac{1}{\mu_{ISI}}$ + \end{itemize} +\end{frame} + +\begin{frame} + \frametitle{(Interspike) interval statistics} + \begin{itemize} + \item Histogram $p(T)$ of intervals $T$. Normalized to $\int_0^{\infty} p(T) \; dT = 1$ + \item Mean interval $\mu_{ISI} = \langle T \rangle = \frac{1}{n}\sum\limits_{i=1}^n T_i$ + \item Variance of intervals $\sigma_{ISI}^2 = \langle (T - \langle T \rangle)^2 \rangle$\vspace{1ex} + \item Coefficient of variation $CV_{ISI} = \frac{\sigma_{ISI}}{\mu_{ISI}}$ + \item Diffusion coefficient $D_{ISI} = \frac{\sigma_{ISI}^2}{2\mu_{ISI}^3}$ + \vfill + \end{itemize} + \includegraphics[width=0.45\textwidth]{poissonisih100hz}\hfill + \includegraphics[width=0.45\textwidth]{lifisih16} +\end{frame} + +\begin{frame} + \frametitle{Interval statistics of homogeneous Poisson process} + \begin{itemize} + \item Exponential distribution of intervals $T$: $p(T) = \lambda e^{-\lambda T}$ + \item Mean interval $\mu_{ISI} = \frac{1}{\lambda}$ + \item Variance of intervals $\sigma_{ISI}^2 = \frac{1}{\lambda^2}$ + \item Coefficient of variation $CV_{ISI} = 1$ + \end{itemize} + \vfill + \includegraphics[width=0.45\textwidth]{poissonisihexp20hz}\hfill + \includegraphics[width=0.45\textwidth]{poissonisihexp100hz} +\end{frame} + +\begin{frame} + \frametitle{Interval return maps} + Scatter plot between succeeding intervals separated by lag $k$. + \vfill + Poisson process $\lambda=100$\,Hz: + \includegraphics[width=1\textwidth]{poissonreturnmap100hz}\hfill +\end{frame} + +\begin{frame} + \frametitle{Serial interval correlations} + Correlation coefficients between succeeding intervals separated by lag $k$: + \[ \rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)} \] + \begin{itemize} + \item $\rho_0=1$ (correlation of each interval with itself). + \item Poisson process: $\rho_k =0$ for $k>0$ (renewal process!) + \end{itemize} + \vfill + \includegraphics[width=0.7\textwidth]{poissonserial100hz} +\end{frame} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Count statistics} + +\begin{frame} + \frametitle{Count statistics} + Histogram of number of events $N$ (counts) within observation window of duration $W$. + + \vfill + \includegraphics[width=0.48\textwidth]{poissoncounthist100hz10ms}\hfill + \includegraphics[width=0.48\textwidth]{poissoncounthist100hz100ms} +\end{frame} + +\begin{frame} + \frametitle{Count statistics of Poisson process} + Poisson distribution: + \[ P(k) = \frac{(\lambda W)^ke^{\lambda W}}{k!} \] + + \vfill + \includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz10ms}\hfill + \includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz100ms} +\end{frame} + +\begin{frame} + \frametitle{Count statistics --- Fano factor} + Statistics of number of events $N$ within observation window of duration $W$. + \begin{itemize} + \item Mean count: $\mu_N = \langle N \rangle$ + \item Count variance: $\sigma_N^2 = \langle (N - \langle N \rangle)^2 \rangle$ + \item Fano factor (variance divided by mean): $F = \frac{\sigma_N^2}{\mu_N}$ + \item Poisson process: $F=1$ + \end{itemize} + \vfill + Poisson process $\lambda=100$\,Hz: + \includegraphics[width=1\textwidth]{poissonfano100hz} +\end{frame} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Integrate-and-fire models} + +\begin{frame} + \frametitle{Integrate-and-fire models} + Leaky integrate-and-fire model (LIF): + \[ \tau \frac{dV}{dt} = -V + RI + D\xi \] + Whenever membrane potential $V(t)$ crosses the firing threshold $\theta$, a spike is emitted and + $V(t)$ is reset to $V_{reset}$. + \begin{itemize} + \item $\tau$: membrane time constant (typically 10\,ms) + \item $R$: input resistance (here 1\,mV (!)) + \item $D\xi$: additive Gaussian white noise of strength $D$ + \item $\theta$: firing threshold (here 10\,mV) + \item $V_{reset}$: reset potential (here 0\,mV) + \end{itemize} +\end{frame} + +\begin{frame} + \frametitle{Integrate-and-fire models} + Discretization with time step $\Delta t$: $V(t) \rightarrow V_i,\;t_i = i \Delta t$.\\ + Euler integration: + \begin{eqnarray*} + \frac{dV}{dt} & \approx & \frac{V_{i+1} - V_i}{\Delta t} \\ + \Rightarrow \quad V_{i+1} & = & V_i + \Delta t \frac{-V_i+RI_i+\sqrt{2D\Delta t}N_i}{\tau} + \end{eqnarray*} + $N_i$ are normally distributed random numbers (Gaussian with zero mean and unit variance) + --- the $\sqrt{\Delta t}$ is for white noise. + + \includegraphics[width=0.82\textwidth]{lifraster16} +\end{frame} + +\begin{frame} + \frametitle{Interval statistics of LIF} + Interval distribution approaches Inverse Gaussian for large $I$: + \[ p(T) = \frac{1}{\sqrt{4\pi D T^3}}\exp\left[-\frac{(T-\langle T \rangle)^2}{4DT\langle T \rangle^2}\right] \] + where $\langle T \rangle$ is the mean interspike interval and $D$ + is the diffusion coefficient. + \vfill + \includegraphics[width=0.45\textwidth]{lifisihdistr08}\hfill + \includegraphics[width=0.45\textwidth]{lifisihdistr16} +\end{frame} + +\begin{frame} + \frametitle{Interval statistics of PIF} + For the perfect integrate-and-fire (PIF) + \[ \tau \frac{dV}{dt} = RI + D\xi \] + (the canonical model or supra-threshold firing on a limit cycle)\\ + the Inverse Gaussian describes exactly the interspike interval distribution. + \vfill + \includegraphics[width=0.45\textwidth]{pifisihdistr01}\hfill + \includegraphics[width=0.45\textwidth]{pifisihdistr10} +\end{frame} + +\begin{frame} + \frametitle{Interval return map of LIF} + LIF $I=15.7$: + \includegraphics[width=1\textwidth]{lifreturnmap16} +\end{frame} + +\begin{frame} + \frametitle{Serial correlations of LIF} + LIF $I=15.7$: + \includegraphics[width=1\textwidth]{lifserial16}\\ + Integrate-and-fire driven with white noise are still renewal processes! +\end{frame} + +\begin{frame} + \frametitle{Count statistics of LIF} + LIF $I=15.7$: + \includegraphics[width=1\textwidth]{liffano16}\\ + Fano factor is not one! +\end{frame} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{frame} + \frametitle{Interval statistics of LIF with OU noise} + \begin{eqnarray*} + \tau \frac{dV}{dt} & = & -V + RI + U \\ + \tau_{OU} \frac{dU}{dt} & = & - U + D\xi + \end{eqnarray*} + Ohrnstein-Uhlenbeck noise is lowpass filtered white noise. + \includegraphics[width=0.45\textwidth]{lifouisihdistr08-100ms}\hfill + \includegraphics[width=0.45\textwidth]{lifouisihdistr16-100ms}\\ + More peaky than the inverse Gaussian! +\end{frame} + +\begin{frame} + \frametitle{Interval return map of LIF with OU noise} + LIF $I=15.7$, $\tau_{OU}=100$\,ms: + \includegraphics[width=1\textwidth]{lifoureturnmap16-100ms} +\end{frame} + +\begin{frame} + \frametitle{Serial correlations of LIF with OU noise} + LIF $I=15.7$, $\tau_{OU}=100$\,ms: + \includegraphics[width=1\textwidth]{lifouserial16-100ms}\\ + OU-noise introduces positive interval correlations! +\end{frame} + +\begin{frame} + \frametitle{Count statistics of LIF with OU noise} + LIF $I=15.7$, $\tau_{OU}=100$\,ms: + \includegraphics[width=1\textwidth]{lifoufano16-100ms}\\ + Fano factor increases with count window duration. +\end{frame} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{frame} + \frametitle{Interval statistics of LIF with adaptation} + \begin{eqnarray*} + \tau \frac{dV}{dt} & = & -V - A + RI + D\xi \\ + \tau_{adapt} \frac{dA}{dt} & = & - A + \end{eqnarray*} + Adaptation $A$ with time constant $\tau_{adapt}$ and increment $\Delta A$ at spike. + \includegraphics[width=0.45\textwidth]{lifadaptisihdistr08-100ms}\hfill + \includegraphics[width=0.45\textwidth]{lifadaptisihdistr65-100ms}\\ + Similar to LIF with white noise. +\end{frame} + +\begin{frame} + \frametitle{Interval return map of LIF with adaptation} + LIF $I=10$, $\tau_{adapt}=100$\,ms: + \includegraphics[width=1\textwidth]{lifadaptreturnmap10-100ms}\\ + Negative correlation at lag one. +\end{frame} + +\begin{frame} + \frametitle{Serial correlations of LIF with adaptation} + LIF $I=10$, $\tau_{adapt}=100$\,ms: + \includegraphics[width=1\textwidth]{lifadaptserial10-100ms}\\ + Adaptation with white noise introduces negative interval correlations! +\end{frame} + +\begin{frame} + \frametitle{Count statistics of LIF with adaptation} + LIF $I=10$, $\tau_{adapt}=100$\,ms: + \includegraphics[width=1\textwidth]{lifadaptfano10-100ms}\\ + Fano factor decreases with count window duration. +\end{frame} + + +\end{document} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Non stationary} +\subsection{Inhomogeneous Poisson process} +\subsection{Firing rate} +\subsection{Instantaneous rate} +\subsection{Autocorrelation} +\subsection{Crosscorrelation} +\subsection{Joint PSTH} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Renewal process} +\subsection{Superthreshold firing} +\subsection{Subthreshold firing} +\section{Non-renewal processes} +\subsection{Bursting} +\subsection{Resonator} + + +\subsection{Standard distributions} +\subsubsection{Gamma} +\subsubsection{How to read ISI histograms} +refractoriness, poisson tail, sub-, supra-threshold, missed spikes + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Correlation with stimulus} +\subsection{Tuning curve} +\subsection{Linear filter} +\subsection{Spatiotemporal receptive field} +\subsection{Generalized linear model} + +\begin{frame} +\end{frame} diff --git a/pointprocesses/lecture/pointprocesses.tex b/pointprocesses/lecture/pointprocesses.tex index ff49cb2..b654a20 100644 --- a/pointprocesses/lecture/pointprocesses.tex +++ b/pointprocesses/lecture/pointprocesses.tex @@ -1,412 +1,107 @@ -\documentclass{beamer} - -%%%%% title %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\title[]{Scientific Computing --- Point Processes} -\author[]{Jan Benda} -\institute[]{Neuroethology} -\date[]{WS 14/15} -\titlegraphic{\includegraphics[width=0.3\textwidth]{UT_WBMW_Rot_RGB}} - -%%%%% beamer %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\mode -{ - \usetheme{Singapore} - \setbeamercovered{opaque} - \usecolortheme{tuebingen} - \setbeamertemplate{navigation symbols}{} - \usefonttheme{default} - \useoutertheme{infolines} - % \useoutertheme{miniframes} -} - -%\AtBeginSection[] -%{ -% \begin{frame} -% \begin{center} -% \Huge \insertsectionhead -% \end{center} -% \end{frame} -%} - -\setbeamertemplate{blocks}[rounded][shadow=true] -\setcounter{tocdepth}{1} - -%%%%% packages %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\usepackage[english]{babel} -\usepackage{amsmath} -\usepackage{bm} -\usepackage{pslatex} % nice font for pdf file -%\usepackage{multimedia} - -\usepackage{dsfont} -\newcommand{\naZ}{\mathds{N}} -\newcommand{\gaZ}{\mathds{Z}} -\newcommand{\raZ}{\mathds{Q}} -\newcommand{\reZ}{\mathds{R}} -\newcommand{\reZp}{\mathds{R^+}} -\newcommand{\reZpN}{\mathds{R^+_0}} -\newcommand{\koZ}{\mathds{C}} - -%%%% graphics %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\usepackage{graphicx} -\newcommand{\texpicture}[1]{{\sffamily\small\input{#1.tex}}} - -%%%%% listings %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\usepackage{listings} -\lstset{ - basicstyle=\ttfamily, - numbers=left, - showstringspaces=false, - language=Matlab, - commentstyle=\itshape\color{darkgray}, - keywordstyle=\color{blue}, - stringstyle=\color{green}, - backgroundcolor=\color{blue!10}, - breaklines=true, - breakautoindent=true, - columns=flexible, - frame=single, - captionpos=b, - xleftmargin=1em, - xrightmargin=1em, - aboveskip=10pt - } - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\begin{document} - -\begin{frame}[plain] - \frametitle{} - \vspace{-1cm} - \titlepage % erzeugt Titelseite -\end{frame} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\begin{frame} - \frametitle{Content} - \tableofcontents -\end{frame} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Point processes} - -\begin{frame} - \frametitle{Point process} - \vspace{-3ex} - \texpicture{pointprocessscetchA} - - A point process is a stochastic (or random) process that generates a sequence of events - at times $\{t_i\}$, $t_i \in \reZ$. - - For each point process there is an underlying continuous-valued - process evolving in time. The associated point process occurs when - the underlying continuous process crosses a threshold. - Examples: - \begin{itemize} - \item Spikes/heartbeat: generated by the dynamics of the membrane potential of neurons/heart cells. - \item Earth quakes: generated by the pressure dynamics between the tectonic plates on either side of a geological fault line. - \item Onset of cricket/frogs/birds/... songs: generated by the dynamics of the state of a nervous system. - \end{itemize} -\end{frame} +\chapter{\tr{Point processes}{Punktprozesse}} -\begin{frame} - \frametitle{Point process} +\begin{figure}[t] \texpicture{pointprocessscetchB} -\end{frame} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Homogeneous Poisson process} - -\begin{frame} - \frametitle{Homogeneous Poisson process} - The probability $p(t)\delta t$ of an event occuring at time $t$ - is independent of $t$ and independent of any previous event - (independent of event history). - - The probability $P$ for an event occuring within a time bin of width $\Delta t$ - is - \[ P=\lambda \cdot \Delta t \] - for a Poisson process with rate $\lambda$. - \includegraphics[width=1\textwidth]{poissonraster100hz} -\end{frame} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Interval statistics} - -\begin{frame} - \frametitle{Rate} - Rate of events $r$ (``spikes per time'') measured in Hertz. - \begin{itemize} - \item Number of events $N$ per observation time $W$: $r = \frac{N}{W}$ - \item Without boundary effects: $r = \frac{N-1}{t_N-t_1}$ - \item Inverse interval: $r = \frac{1}{\mu_{ISI}}$ - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{(Interspike) interval statistics} - \begin{itemize} - \item Histogram $p(T)$ of intervals $T$. Normalized to $\int_0^{\infty} p(T) \; dT = 1$ - \item Mean interval $\mu_{ISI} = \langle T \rangle = \frac{1}{n}\sum\limits_{i=1}^n T_i$ - \item Variance of intervals $\sigma_{ISI}^2 = \langle (T - \langle T \rangle)^2 \rangle$\vspace{1ex} - \item Coefficient of variation $CV_{ISI} = \frac{\sigma_{ISI}}{\mu_{ISI}}$ - \item Diffusion coefficient $D_{ISI} = \frac{\sigma_{ISI}^2}{2\mu_{ISI}^3}$ - \vfill - \end{itemize} - \includegraphics[width=0.45\textwidth]{poissonisih100hz}\hfill - \includegraphics[width=0.45\textwidth]{lifisih16} -\end{frame} - -\begin{frame} - \frametitle{Interval statistics of homogeneous Poisson process} - \begin{itemize} - \item Exponential distribution of intervals $T$: $p(T) = \lambda e^{-\lambda T}$ - \item Mean interval $\mu_{ISI} = \frac{1}{\lambda}$ - \item Variance of intervals $\sigma_{ISI}^2 = \frac{1}{\lambda^2}$ - \item Coefficient of variation $CV_{ISI} = 1$ - \end{itemize} - \vfill - \includegraphics[width=0.45\textwidth]{poissonisihexp20hz}\hfill - \includegraphics[width=0.45\textwidth]{poissonisihexp100hz} -\end{frame} - -\begin{frame} - \frametitle{Interval return maps} - Scatter plot between succeeding intervals separated by lag $k$. - \vfill - Poisson process $\lambda=100$\,Hz: - \includegraphics[width=1\textwidth]{poissonreturnmap100hz}\hfill -\end{frame} - -\begin{frame} - \frametitle{Serial interval correlations} - Correlation coefficients between succeeding intervals separated by lag $k$: - \[ \rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)} \] - \begin{itemize} - \item $\rho_0=1$ (correlation of each interval with itself). - \item Poisson process: $\rho_k =0$ for $k>0$ (renewal process!) - \end{itemize} - \vfill - \includegraphics[width=0.7\textwidth]{poissonserial100hz} -\end{frame} + \caption{\label{pointprocessscetchfig}Ein Punktprozess ist eine + Abfolge von Zeitpunkten $t_i$ die auch durch die Intervalle + $T_i=t_{i+1}-t_i$ oder die Anzahl der Ereignisse $n_i$ beschrieben + werden kann. } +\end{figure} + +Ein zeitlicher Punktprozess ist ein stochastischer Prozess, der eine +Abfolge von Ereignissen zu den Zeiten $\{t_i\}$, $t_i \in \reZ$, +generiert. + +Jeder Punktprozess wird durch einen sich in der Zeit kontinuierlich +entwickelnden Prozess generiert. Wann immer dieser Prozess eine +Schwelle \"uberschreitet wird ein Ereigniss des Punktprozesses +erzeugt. Zum Beispiel: +\begin{itemize} +\item Aktionspotentiale/Herzschlag: wird durch die Dynamik des + Membranpotentials eines Neurons/Herzzelle erzeugt. +\item Erdbeben: wird durch die Dynamik des Druckes zwischen + tektonischen Platten auf beiden Seiten einer geologischen Verwerfung + erzeugt. +\item Zeitpunkt eines Grillen/Frosch/Vogelgesangs: wird durch die + Dynamik des Nervensystems und des Muskelapparates erzeugt. +\end{itemize} + +\begin{figure}[t] + \includegraphics[width=1\textwidth]{rasterexamples} + \caption{\label{rasterexamplesfig}Raster-Plot von jeweils 10 + Realisierungen eines station\"arenen Punktprozesses (homogener + Poisson Prozess mit Rate $\lambda=20$\;Hz, links) und eines + nicht-station\"aren Punktprozesses (perfect integrate-and-fire + Neuron getrieben mit Ohrnstein-Uhlenbeck Rauschen mit + Zeitkonstante $\tau=100$\,ms, rechts).} +\end{figure} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Intervall Statistik} + +\begin{figure}[t] + \includegraphics[width=1\textwidth]{isihexamples}\hfill + \caption{\label{isihexamplesfig}Interspike-Intervall Histogramme der in + \figref{rasterexamplesfig} gezeigten Spikes.} +\end{figure} + +\subsection{(Interspike) Intervall Statistik erster Ordnung} +\begin{itemize} +\item Histogramm $p(T)$ der Intervalle $T$. Normiert auf $\int_0^{\infty} p(T) \; dT = 1$. +\item Mittleres Intervall $\mu_{ISI} = \langle T \rangle = \frac{1}{n}\sum\limits_{i=1}^n T_i$. +\item Varianz der Intervalle $\sigma_{ISI}^2 = \langle (T - \langle T \rangle)^2 \rangle$\vspace{1ex} +\item Variationskoeffizient (``Coefficient of variation'') $CV_{ISI} = \frac{\sigma_{ISI}}{\mu_{ISI}}$. +\item Diffusions Koeffizient $D_{ISI} = \frac{\sigma_{ISI}^2}{2\mu_{ISI}^3}$. +\end{itemize} + +\subsection{Interval return maps} +Scatter plot von aufeinander folgenden Intervallen $(T_{i+k}, T_i)$ getrennt durch das ``lag'' $k$. + +\begin{figure}[t] + \includegraphics[width=1\textwidth]{returnmapexamples} + \includegraphics[width=1\textwidth]{serialcorrexamples} + \caption{\label{returnmapfig}Interspike-Intervall return maps and serial correlations.} +\end{figure} + +\subsection{Serielle Korrelationen der Intervalle} +Korrelationskoeffizient zwischen aufeinander folgenden Intervallen getrennt durch ``lag'' $k$: +\[ \rho_k = \frac{\langle (T_{i+k} - \langle T \rangle)(T_i - \langle T \rangle) \rangle}{\langle (T_i - \langle T \rangle)^2\rangle} = \frac{{\rm cov}(T_{i+k}, T_i)}{{\rm var}(T_i)} \] +$\rho_0=1$ (Korrelation jedes Intervalls mit sich selber). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Count statistics} - -\begin{frame} - \frametitle{Count statistics} - Histogram of number of events $N$ (counts) within observation window of duration $W$. +\section{Z\"ahlstatistik} - \vfill +\begin{figure}[t] \includegraphics[width=0.48\textwidth]{poissoncounthist100hz10ms}\hfill \includegraphics[width=0.48\textwidth]{poissoncounthist100hz100ms} -\end{frame} - -\begin{frame} - \frametitle{Count statistics of Poisson process} - Poisson distribution: - \[ P(k) = \frac{(\lambda W)^ke^{\lambda W}}{k!} \] - - \vfill - \includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz10ms}\hfill - \includegraphics[width=0.48\textwidth]{poissoncounthistdist100hz100ms} -\end{frame} - -\begin{frame} - \frametitle{Count statistics --- Fano factor} - Statistics of number of events $N$ within observation window of duration $W$. - \begin{itemize} - \item Mean count: $\mu_N = \langle N \rangle$ - \item Count variance: $\sigma_N^2 = \langle (N - \langle N \rangle)^2 \rangle$ - \item Fano factor (variance divided by mean): $F = \frac{\sigma_N^2}{\mu_N}$ - \item Poisson process: $F=1$ - \end{itemize} - \vfill - Poisson process $\lambda=100$\,Hz: - \includegraphics[width=1\textwidth]{poissonfano100hz} -\end{frame} - + \caption{\label{countstatsfig}Count Statistik.} +\end{figure} + +Statistik der Anzahl der Ereignisse $N_i$ innerhalb von Beobachtungsfenstern $i$ der Breite $W$. +\begin{itemize} +\item Histogramm der counts $N_i$. +\item Mittlere Anzahl von Ereignissen: $\mu_N = \langle N \rangle$. +\item Varianz der Anzahl: $\sigma_N^2 = \langle (N - \langle N \rangle)^2 \rangle$. +\item Fano Faktor (Varianz geteilt durch Mittelwert): $F = \frac{\sigma_N^2}{\mu_N}$. +\end{itemize} + +Insbesondere ist die mittlere Rate der Ereignisse $r$ (``Spikes pro Zeit'', Feuerrate) gemessen in Hertz +\[ r = \frac{\langle N \rangle}{W} \; . \] + +\begin{figure}[t] + \begin{minipage}[t]{0.49\textwidth} + Poisson process $\lambda=100$\,Hz:\\ + \includegraphics[width=1\textwidth]{poissonfano100hz} + \end{minipage} + \hfill + \begin{minipage}[t]{0.49\textwidth} + LIF $I=10$, $\tau_{adapt}=100$\,ms:\\ + \includegraphics[width=1\textwidth]{lifadaptfano10-100ms} + \end{minipage} + \caption{\label{fanofig}Fano factor.} +\end{figure} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Integrate-and-fire models} - -\begin{frame} - \frametitle{Integrate-and-fire models} - Leaky integrate-and-fire model (LIF): - \[ \tau \frac{dV}{dt} = -V + RI + D\xi \] - Whenever membrane potential $V(t)$ crosses the firing threshold $\theta$, a spike is emitted and - $V(t)$ is reset to $V_{reset}$. - \begin{itemize} - \item $\tau$: membrane time constant (typically 10\,ms) - \item $R$: input resistance (here 1\,mV (!)) - \item $D\xi$: additive Gaussian white noise of strength $D$ - \item $\theta$: firing threshold (here 10\,mV) - \item $V_{reset}$: reset potential (here 0\,mV) - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{Integrate-and-fire models} - Discretization with time step $\Delta t$: $V(t) \rightarrow V_i,\;t_i = i \Delta t$.\\ - Euler integration: - \begin{eqnarray*} - \frac{dV}{dt} & \approx & \frac{V_{i+1} - V_i}{\Delta t} \\ - \Rightarrow \quad V_{i+1} & = & V_i + \Delta t \frac{-V_i+RI_i+\sqrt{2D\Delta t}N_i}{\tau} - \end{eqnarray*} - $N_i$ are normally distributed random numbers (Gaussian with zero mean and unit variance) - --- the $\sqrt{\Delta t}$ is for white noise. - - \includegraphics[width=0.82\textwidth]{lifraster16} -\end{frame} - -\begin{frame} - \frametitle{Interval statistics of LIF} - Interval distribution approaches Inverse Gaussian for large $I$: - \[ p(T) = \frac{1}{\sqrt{4\pi D T^3}}\exp\left[-\frac{(T-\langle T \rangle)^2}{4DT\langle T \rangle^2}\right] \] - where $\langle T \rangle$ is the mean interspike interval and $D$ - is the diffusion coefficient. - \vfill - \includegraphics[width=0.45\textwidth]{lifisihdistr08}\hfill - \includegraphics[width=0.45\textwidth]{lifisihdistr16} -\end{frame} - -\begin{frame} - \frametitle{Interval statistics of PIF} - For the perfect integrate-and-fire (PIF) - \[ \tau \frac{dV}{dt} = RI + D\xi \] - (the canonical model or supra-threshold firing on a limit cycle)\\ - the Inverse Gaussian describes exactly the interspike interval distribution. - \vfill - \includegraphics[width=0.45\textwidth]{pifisihdistr01}\hfill - \includegraphics[width=0.45\textwidth]{pifisihdistr10} -\end{frame} - -\begin{frame} - \frametitle{Interval return map of LIF} - LIF $I=15.7$: - \includegraphics[width=1\textwidth]{lifreturnmap16} -\end{frame} - -\begin{frame} - \frametitle{Serial correlations of LIF} - LIF $I=15.7$: - \includegraphics[width=1\textwidth]{lifserial16}\\ - Integrate-and-fire driven with white noise are still renewal processes! -\end{frame} - -\begin{frame} - \frametitle{Count statistics of LIF} - LIF $I=15.7$: - \includegraphics[width=1\textwidth]{liffano16}\\ - Fano factor is not one! -\end{frame} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\begin{frame} - \frametitle{Interval statistics of LIF with OU noise} - \begin{eqnarray*} - \tau \frac{dV}{dt} & = & -V + RI + U \\ - \tau_{OU} \frac{dU}{dt} & = & - U + D\xi - \end{eqnarray*} - Ohrnstein-Uhlenbeck noise is lowpass filtered white noise. - \includegraphics[width=0.45\textwidth]{lifouisihdistr08-100ms}\hfill - \includegraphics[width=0.45\textwidth]{lifouisihdistr16-100ms}\\ - More peaky than the inverse Gaussian! -\end{frame} - -\begin{frame} - \frametitle{Interval return map of LIF with OU noise} - LIF $I=15.7$, $\tau_{OU}=100$\,ms: - \includegraphics[width=1\textwidth]{lifoureturnmap16-100ms} -\end{frame} - -\begin{frame} - \frametitle{Serial correlations of LIF with OU noise} - LIF $I=15.7$, $\tau_{OU}=100$\,ms: - \includegraphics[width=1\textwidth]{lifouserial16-100ms}\\ - OU-noise introduces positive interval correlations! -\end{frame} - -\begin{frame} - \frametitle{Count statistics of LIF with OU noise} - LIF $I=15.7$, $\tau_{OU}=100$\,ms: - \includegraphics[width=1\textwidth]{lifoufano16-100ms}\\ - Fano factor increases with count window duration. -\end{frame} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\begin{frame} - \frametitle{Interval statistics of LIF with adaptation} - \begin{eqnarray*} - \tau \frac{dV}{dt} & = & -V - A + RI + D\xi \\ - \tau_{adapt} \frac{dA}{dt} & = & - A - \end{eqnarray*} - Adaptation $A$ with time constant $\tau_{adapt}$ and increment $\Delta A$ at spike. - \includegraphics[width=0.45\textwidth]{lifadaptisihdistr08-100ms}\hfill - \includegraphics[width=0.45\textwidth]{lifadaptisihdistr65-100ms}\\ - Similar to LIF with white noise. -\end{frame} - -\begin{frame} - \frametitle{Interval return map of LIF with adaptation} - LIF $I=10$, $\tau_{adapt}=100$\,ms: - \includegraphics[width=1\textwidth]{lifadaptreturnmap10-100ms}\\ - Negative correlation at lag one. -\end{frame} - -\begin{frame} - \frametitle{Serial correlations of LIF with adaptation} - LIF $I=10$, $\tau_{adapt}=100$\,ms: - \includegraphics[width=1\textwidth]{lifadaptserial10-100ms}\\ - Adaptation with white noise introduces negative interval correlations! -\end{frame} - -\begin{frame} - \frametitle{Count statistics of LIF with adaptation} - LIF $I=10$, $\tau_{adapt}=100$\,ms: - \includegraphics[width=1\textwidth]{lifadaptfano10-100ms}\\ - Fano factor decreases with count window duration. -\end{frame} - - -\end{document} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Non stationary} -\subsection{Inhomogeneous Poisson process} -\subsection{Firing rate} -\subsection{Instantaneous rate} -\subsection{Autocorrelation} -\subsection{Crosscorrelation} -\subsection{Joint PSTH} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Renewal process} -\subsection{Superthreshold firing} -\subsection{Subthreshold firing} -\section{Non-renewal processes} -\subsection{Bursting} -\subsection{Resonator} - - -\subsection{Standard distributions} -\subsubsection{Gamma} -\subsubsection{How to read ISI histograms} -refractoriness, poisson tail, sub-, supra-threshold, missed spikes - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Correlation with stimulus} -\subsection{Tuning curve} -\subsection{Linear filter} -\subsection{Spatiotemporal receptive field} -\subsection{Generalized linear model} -\begin{frame} -\end{frame} diff --git a/pointprocesses/lecture/pointprocessscetchA.eps b/pointprocesses/lecture/pointprocessscetchA.eps index 95a12ae..e799c6c 100644 --- a/pointprocesses/lecture/pointprocessscetchA.eps +++ b/pointprocesses/lecture/pointprocessscetchA.eps @@ -1,7 +1,7 @@ %!PS-Adobe-2.0 EPSF-2.0 %%Title: pointprocessscetchA.tex %%Creator: gnuplot 4.6 patchlevel 4 -%%CreationDate: Sun Oct 26 14:09:12 2014 +%%CreationDate: Mon Oct 26 09:31:15 2015 %%DocumentFonts: %%BoundingBox: 50 50 373 135 %%EndComments @@ -430,10 +430,10 @@ SDict begin [ /Title (pointprocessscetchA.tex) /Subject (gnuplot plot) /Creator (gnuplot 4.6 patchlevel 4) - /Author (jan) + /Author (benda) % /Producer (gnuplot) % /Keywords () - /CreationDate (Sun Oct 26 14:09:12 2014) + /CreationDate (Mon Oct 26 09:31:15 2015) /DOCINFO pdfmark end } ifelse diff --git a/pointprocesses/lecture/pointprocessscetchA.pdf b/pointprocesses/lecture/pointprocessscetchA.pdf index dcc5228..83c76bc 100644 Binary files a/pointprocesses/lecture/pointprocessscetchA.pdf and b/pointprocesses/lecture/pointprocessscetchA.pdf differ diff --git a/pointprocesses/lecture/pointprocessscetchB.eps b/pointprocesses/lecture/pointprocessscetchB.eps index d204109..0e1a2c1 100644 --- a/pointprocesses/lecture/pointprocessscetchB.eps +++ b/pointprocesses/lecture/pointprocessscetchB.eps @@ -1,7 +1,7 @@ %!PS-Adobe-2.0 EPSF-2.0 %%Title: pointprocessscetchB.tex %%Creator: gnuplot 4.6 patchlevel 4 -%%CreationDate: Sun Oct 26 17:34:18 2014 +%%CreationDate: Mon Oct 26 09:31:16 2015 %%DocumentFonts: %%BoundingBox: 50 50 373 237 %%EndComments @@ -430,10 +430,10 @@ SDict begin [ /Title (pointprocessscetchB.tex) /Subject (gnuplot plot) /Creator (gnuplot 4.6 patchlevel 4) - /Author (jan) + /Author (benda) % /Producer (gnuplot) % /Keywords () - /CreationDate (Sun Oct 26 17:34:18 2014) + /CreationDate (Mon Oct 26 09:31:16 2015) /DOCINFO pdfmark end } ifelse diff --git a/pointprocesses/lecture/pointprocessscetchB.pdf b/pointprocesses/lecture/pointprocessscetchB.pdf index a4c7e8c..09d11f8 100644 Binary files a/pointprocesses/lecture/pointprocessscetchB.pdf and b/pointprocesses/lecture/pointprocessscetchB.pdf differ diff --git a/pointprocesses/lecture/rasterexamples.py b/pointprocesses/lecture/rasterexamples.py new file mode 100644 index 0000000..c7c2433 --- /dev/null +++ b/pointprocesses/lecture/rasterexamples.py @@ -0,0 +1,86 @@ +import numpy as np +import matplotlib.pyplot as plt + +def hompoisson(rate, trials, duration) : + spikes = [] + for k in range(trials) : + times = [] + t = 0.0 + while t < duration : + t += np.random.exponential(1/rate) + times.append( t ) + spikes.append( times ) + return spikes + +def inhompoisson(rate, trials, dt) : + spikes = [] + p = rate*dt + for k in range(trials) : + x = np.random.rand(len(rate)) + times = dt*np.nonzero(x= vthresh : + v = vreset + times.append(k*dt) + spikes.append( times ) + return spikes + +# parameter: +rate = 20.0 +drate = 50.0 +trials = 10 +duration = 2.0 +dt = 0.001 +tau = 0.1; + +# homogeneous spike trains: +homspikes = hompoisson(rate, trials, duration) + +# OU noise: +rng = np.random.RandomState(54637281) +time = np.arange(0.0, duration, dt) +x = np.zeros(time.shape)+rate +n = rng.randn(len(time))*drate*tau/np.sqrt(dt)+rate +for k in xrange(1,len(x)) : + x[k] = x[k-1] + (n[k]-x[k-1])*dt/tau +x[x<0.0] = 0.0 + +# inhomogeneous spike trains: +#inhspikes = inhompoisson(x, trials, dt) +# pif spike trains: +inhspikes = pifspikes(x, trials, dt, D=0.3) + +fig = plt.figure( figsize=(9,4) ) +ax = fig.add_subplot(1, 2, 1) +ax.set_title('stationary') +ax.set_xlim(0.0, duration) +ax.set_ylim(-0.5, trials-0.5) +ax.set_xlabel('Time [s]') +ax.set_ylabel('Trials') +ax.eventplot(homspikes, colors=[[0, 0, 0]], linelength=0.8) + +ax = fig.add_subplot(1, 2, 2) +ax.set_title('non-stationary') +ax.set_xlim(0.0, duration) +ax.set_ylim(-0.5, trials-0.5) +ax.set_xlabel('Time [s]') +ax.set_ylabel('Trials') +ax.eventplot(inhspikes, colors=[[0, 0, 0]], linelength=0.8) + +plt.tight_layout() +plt.savefig('rasterexamples.pdf') +plt.show() diff --git a/pointprocesses/lecture/returnmapexamples.py b/pointprocesses/lecture/returnmapexamples.py new file mode 100644 index 0000000..96803ff --- /dev/null +++ b/pointprocesses/lecture/returnmapexamples.py @@ -0,0 +1,105 @@ +import numpy as np +import matplotlib.pyplot as plt + +def hompoisson(rate, trials, duration) : + spikes = [] + for k in range(trials) : + times = [] + t = 0.0 + while t < duration : + t += np.random.exponential(1/rate) + times.append( t ) + spikes.append( times ) + return spikes + +def inhompoisson(rate, trials, dt) : + spikes = [] + p = rate*dt + for k in range(trials) : + x = np.random.rand(len(rate)) + times = dt*np.nonzero(x= vthresh : + v = vreset + times.append(k*dt) + spikes.append( times ) + return spikes + +def isis( spikes ) : + isi = [] + for k in xrange(len(spikes)) : + isi.extend(np.diff(spikes[k])) + return np.array( isi ) + +def plotisih( ax, isis, binwidth=None ) : + if binwidth == None : + nperbin = 200.0 # average number of isis per bin + bins = len(isis)/nperbin # number of bins + binwidth = np.max(isis)/bins + if binwidth < 5e-4 : # half a millisecond + binwidth = 5e-4 + h, b = np.histogram(isis, np.arange(0.0, np.max(isis)+binwidth, binwidth), density=True) + ax.text(0.9, 0.85, 'rate={:.0f}Hz'.format(1.0/np.mean(isis)), ha='right', transform=ax.transAxes) + ax.text(0.9, 0.75, 'mean={:.0f}ms'.format(1000.0*np.mean(isis)), ha='right', transform=ax.transAxes) + ax.text(0.9, 0.65, 'CV={:.2f}'.format(np.std(isis)/np.mean(isis)), ha='right', transform=ax.transAxes) + ax.set_xlabel('ISI [ms]') + ax.set_ylabel('p(ISI) [1/s]') + ax.bar( 1000.0*b[:-1], h, 1000.0*np.diff(b) ) + +def plotreturnmap(ax, isis, lag=1, max=None) : + ax.set_xlabel(r'ISI$_i$ [ms]') + ax.set_ylabel(r'ISI$_{i+1}$ [ms]') + if max != None : + ax.set_xlim(0.0, 1000.0*max) + ax.set_ylim(0.0, 1000.0*max) + ax.scatter( 1000.0*isis[:-lag], 1000.0*isis[lag:] ) + +# parameter: +rate = 20.0 +drate = 50.0 +trials = 10 +duration = 10.0 +dt = 0.001 +tau = 0.1; + +# homogeneous spike trains: +homspikes = hompoisson(rate, trials, duration) + +# OU noise: +rng = np.random.RandomState(54637281) +time = np.arange(0.0, duration, dt) +x = np.zeros(time.shape)+rate +n = rng.randn(len(time))*drate*tau/np.sqrt(dt)+rate +for k in xrange(1,len(x)) : + x[k] = x[k-1] + (n[k]-x[k-1])*dt/tau +x[x<0.0] = 0.0 + +# pif spike trains: +inhspikes = pifspikes(x, trials, dt, D=0.3) + +fig = plt.figure( figsize=(9,4) ) +ax = fig.add_subplot(1, 2, 1) +ax.set_title('stationary') +plotreturnmap(ax, isis(homspikes), 1, 0.3) + +ax = fig.add_subplot(1, 2, 2) +ax.set_title('non-stationary') +plotreturnmap(ax, isis(inhspikes), 1, 0.3) + +plt.tight_layout() +plt.savefig('returnmapexamples.pdf') +#plt.show() diff --git a/pointprocesses/lecture/serialcorrexamples.py b/pointprocesses/lecture/serialcorrexamples.py new file mode 100644 index 0000000..e7fff9c --- /dev/null +++ b/pointprocesses/lecture/serialcorrexamples.py @@ -0,0 +1,117 @@ +import numpy as np +import matplotlib.pyplot as plt + +def hompoisson(rate, trials, duration) : + spikes = [] + for k in range(trials) : + times = [] + t = 0.0 + while t < duration : + t += np.random.exponential(1/rate) + times.append( t ) + spikes.append( times ) + return spikes + +def inhompoisson(rate, trials, dt) : + spikes = [] + p = rate*dt + for k in range(trials) : + x = np.random.rand(len(rate)) + times = dt*np.nonzero(x= vthresh : + v = vreset + times.append(k*dt) + spikes.append( times ) + return spikes + +def isis( spikes ) : + isi = [] + for k in xrange(len(spikes)) : + isi.extend(np.diff(spikes[k])) + return np.array( isi ) + +def plotisih( ax, isis, binwidth=None ) : + if binwidth == None : + nperbin = 200.0 # average number of isis per bin + bins = len(isis)/nperbin # number of bins + binwidth = np.max(isis)/bins + if binwidth < 5e-4 : # half a millisecond + binwidth = 5e-4 + h, b = np.histogram(isis, np.arange(0.0, np.max(isis)+binwidth, binwidth), density=True) + ax.text(0.9, 0.85, 'rate={:.0f}Hz'.format(1.0/np.mean(isis)), ha='right', transform=ax.transAxes) + ax.text(0.9, 0.75, 'mean={:.0f}ms'.format(1000.0*np.mean(isis)), ha='right', transform=ax.transAxes) + ax.text(0.9, 0.65, 'CV={:.2f}'.format(np.std(isis)/np.mean(isis)), ha='right', transform=ax.transAxes) + ax.set_xlabel('ISI [ms]') + ax.set_ylabel('p(ISI) [1/s]') + ax.bar( 1000.0*b[:-1], h, 1000.0*np.diff(b) ) + +def plotreturnmap(ax, isis, lag=1, max=None) : + ax.set_xlabel(r'ISI$_i$ [ms]') + ax.set_ylabel(r'ISI$_{i+1}$ [ms]') + if max != None : + ax.set_xlim(0.0, 1000.0*max) + ax.set_ylim(0.0, 1000.0*max) + ax.scatter( 1000.0*isis[:-lag], 1000.0*isis[lag:] ) + +def plotserialcorr(ax, isis, maxlag=10) : + lags = np.arange(maxlag+1) + corr = [1.0] + for lag in lags[1:] : + corr.append(np.corrcoef(isis[:-lag], isis[lag:])[0,1]) + ax.set_xlabel(r'lag $k$') + ax.set_ylabel(r'ISI correlation $\rho_k$') + ax.set_xlim(0.0, maxlag) + ax.set_ylim(-1.0, 1.0) + ax.plot(lags, corr, '.-', markersize=20) + +# parameter: +rate = 20.0 +drate = 50.0 +trials = 10 +duration = 500.0 +dt = 0.001 +tau = 0.1; + +# homogeneous spike trains: +homspikes = hompoisson(rate, trials, duration) + +# OU noise: +rng = np.random.RandomState(54637281) +time = np.arange(0.0, duration, dt) +x = np.zeros(time.shape)+rate +n = rng.randn(len(time))*drate*tau/np.sqrt(dt)+rate +for k in xrange(1,len(x)) : + x[k] = x[k-1] + (n[k]-x[k-1])*dt/tau +x[x<0.0] = 0.0 + +# pif spike trains: +inhspikes = pifspikes(x, trials, dt, D=0.3) + +fig = plt.figure( figsize=(9,3) ) + +ax = fig.add_subplot(1, 2, 1) +plotserialcorr(ax, isis(homspikes)) +ax.set_ylim(-0.2, 1.0) + +ax = fig.add_subplot(1, 2, 2) +plotserialcorr(ax, isis(inhspikes)) +ax.set_ylim(-0.2, 1.0) + +plt.tight_layout() +plt.savefig('serialcorrexamples.pdf') +#plt.show() diff --git a/programming/lectures/images/badbarleft.png b/programming/lectures/images/badbarleft.png new file mode 100644 index 0000000..667e029 Binary files /dev/null and b/programming/lectures/images/badbarleft.png differ diff --git a/programming/lectures/images/badbarplot.jpg b/programming/lectures/images/badbarplot.jpg new file mode 100644 index 0000000..f9327de Binary files /dev/null and b/programming/lectures/images/badbarplot.jpg differ diff --git a/programming/lectures/images/badbarright.png b/programming/lectures/images/badbarright.png new file mode 100644 index 0000000..22f32dc Binary files /dev/null and b/programming/lectures/images/badbarright.png differ diff --git a/programming/lectures/images/comparison_properly_improperly_graph.png b/programming/lectures/images/comparison_properly_improperly_graph.png new file mode 100644 index 0000000..eaae9c2 Binary files /dev/null and b/programming/lectures/images/comparison_properly_improperly_graph.png differ diff --git a/programming/lectures/images/histogram.png b/programming/lectures/images/histogram.png new file mode 100755 index 0000000..8eaaa88 Binary files /dev/null and b/programming/lectures/images/histogram.png differ diff --git a/programming/lectures/images/histogrambad.png b/programming/lectures/images/histogrambad.png new file mode 100755 index 0000000..3ce2c65 Binary files /dev/null and b/programming/lectures/images/histogrambad.png differ diff --git a/programming/lectures/images/histogrambad2.png b/programming/lectures/images/histogrambad2.png new file mode 100755 index 0000000..a11dc47 Binary files /dev/null and b/programming/lectures/images/histogrambad2.png differ diff --git a/programming/lectures/images/improperly_scaled_graph.png b/programming/lectures/images/improperly_scaled_graph.png new file mode 100644 index 0000000..ac99274 Binary files /dev/null and b/programming/lectures/images/improperly_scaled_graph.png differ diff --git a/programming/lectures/images/line_graph1.png b/programming/lectures/images/line_graph1.png new file mode 100644 index 0000000..afce7a1 Binary files /dev/null and b/programming/lectures/images/line_graph1.png differ diff --git a/programming/lectures/images/line_graph1_3.png b/programming/lectures/images/line_graph1_3.png new file mode 100644 index 0000000..3a4a528 Binary files /dev/null and b/programming/lectures/images/line_graph1_3.png differ diff --git a/programming/lectures/images/line_graph1_4.png b/programming/lectures/images/line_graph1_4.png new file mode 100644 index 0000000..d13e30f Binary files /dev/null and b/programming/lectures/images/line_graph1_4.png differ diff --git a/programming/lectures/images/misleading_pie.png b/programming/lectures/images/misleading_pie.png new file mode 100644 index 0000000..3dae0cd Binary files /dev/null and b/programming/lectures/images/misleading_pie.png differ diff --git a/programming/lectures/images/nobelbad.png b/programming/lectures/images/nobelbad.png new file mode 100644 index 0000000..4602b7c Binary files /dev/null and b/programming/lectures/images/nobelbad.png differ diff --git a/programming/lectures/images/one_d_problem_c.pdf b/programming/lectures/images/one_d_problem_c.pdf new file mode 100644 index 0000000..31974db Binary files /dev/null and b/programming/lectures/images/one_d_problem_c.pdf differ diff --git a/programming/lectures/images/properly_scaled_graph.png b/programming/lectures/images/properly_scaled_graph.png new file mode 100644 index 0000000..1a5de32 Binary files /dev/null and b/programming/lectures/images/properly_scaled_graph.png differ diff --git a/programming/lectures/images/sample_pie.png b/programming/lectures/images/sample_pie.png new file mode 100644 index 0000000..937a231 Binary files /dev/null and b/programming/lectures/images/sample_pie.png differ diff --git a/programming/lectures/plotting_spike_trains.tex b/programming/lectures/plotting_spike_trains.tex index cb85c51..13d9e06 100644 --- a/programming/lectures/plotting_spike_trains.tex +++ b/programming/lectures/plotting_spike_trains.tex @@ -86,6 +86,8 @@ \end{flushright} } +\newcommand{\code}[1]{\texttt{#1}} + \input{../../latex/environments.tex} \makeatother @@ -103,11 +105,259 @@ \begin{enumerate} \item Graphische Darstellung von Daten \item Spiketrain Analyse - \item \"Ubungen, \"Ubungen, \"Ubungen. \end{enumerate} \end{frame} +\begin{frame}[plain] + \huge{1. Graphische Darstellung von Daten}\pause + \begin{figure} + \includegraphics[width=0.9\columnwidth]{images/convincing} + \end{figure} +\end{frame} + +\begin{frame} + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Was soll ein Datenplot erreichen?} + + \begin{itemize} + \item Ist eine m\"oglichst neutrale Darstellung der Daten. + \item Soll dem Leser die Daten greifbar machen und die Aussagen der + Analyse darstellen. + \item Erlaubt dem Leser die gezeigten Effekte selbst zu beguachten + und zu validieren. + \item Muss vollst\"andig annotiert sein. + \item Folgt dem Prinzip der \textbf{ink minimization}. (Das + Verh\"altnis aus Tinte, die f\"ur die Darstellung der Daten + gebraucht wird und der Menge Tinte, die f\"ur die Graphik + ben\"otigt wird sollte m\"oglichst gro{\ss} sein ) + \end{itemize} +\end{frame} + +\begin{frame} + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Was sollte vermieden werden?} + + \begin{itemize} + \item Suggestive oder gar fehlleitende Darstellung. + \item Ablenkung durch unruhige oder \"uberm\"a{\ss}ige Effekte. + \item Comicartige Effekte... + \end{itemize}\pause + \begin{figure} + \includegraphics[width=0.35\columnwidth]{images/one_d_problem_c} + \end{figure}\pause + ... aus{\ss}er sie werden rein zur Illustration benutzt ohne einen + Anspruch auf Richtigkeit zu erheben. +\end{frame} + +\begin{frame} + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Suboptimale Beispiele} + \only <1> { + \begin{figure} + \includegraphics[width=0.5\columnwidth]{images/nobelbad} + \end{figure} + \vspace{0.25cm} + Aus Hafting et al., Nature, 2005 + } + \only <2> { + \begin{figure} + \includegraphics[width=0.7\columnwidth]{images/misleading_pie} + \end{figure} + \vspace{0.5cm} + \url{https://en.wikipedia.org/wiki/Misleading_graph} + } + \only <3> { + \begin{figure} + \includegraphics[width=0.7\columnwidth]{images/sample_pie} + \end{figure} + \vspace{0.5cm} + \url{https://en.wikipedia.org/wiki/Misleading_graph} + } + \only <4> { + \begin{figure} + \includegraphics[width=0.4\columnwidth]{images/badbarright} + \end{figure} + \vspace{0.5cm} + \url{https://en.wikipedia.org/wiki/Misleading_graph} + } + \only <5> { + \begin{figure} + \includegraphics[width=0.4\columnwidth]{images/badbarleft} + \end{figure} + \vspace{0.5cm} + \url{https://en.wikipedia.org/wiki/Misleading_graph} + } + \only <6> { + \begin{figure} + \includegraphics[width=0.8\columnwidth]{images/badbarplot} + \end{figure} + \vspace{0.5cm} + \url{https://en.wikipedia.org/wiki/Misleading_graph} + } + \only <7> { + Wahl der Zeichenfl\"ache kann den visuellen Eindruck beeinflu{\ss}en. + \begin{columns} + \begin{column}{4.cm} + \begin{figure} + \includegraphics[width=0.7\columnwidth]{images/line_graph1} + \end{figure} + \end{column} + + \begin{column}{4.cm} + \begin{figure} + \includegraphics[width=0.7\columnwidth]{images/line_graph1_3} + \end{figure} + \end{column} + + \begin{column}{4.cm} + \begin{figure} + \includegraphics[width=0.7\columnwidth]{images/line_graph1_4} + \end{figure} + \end{column} + \end{columns} + \vspace{0.5cm} + \url{https://en.wikipedia.org/wiki/Misleading_graph} + } + \only <8> { + Vorsicht bei der Skalierung von Symbolen! + \begin{columns} + \begin{column}{4.cm} + \begin{figure} + \includegraphics[width=0.7\columnwidth]{images/improperly_scaled_graph} + \end{figure} + \end{column} + + \begin{column}{4.cm} + \begin{figure} + \includegraphics[width=0.7\columnwidth]{images/properly_scaled_graph} + \end{figure} + \end{column} + + \begin{column}{4.cm} + \begin{figure} + \includegraphics[width=0.7\columnwidth]{images/comparison_properly_improperly_graph} + \end{figure} + \end{column} + \end{columns} + \vspace{0.5cm} + \url{https://en.wikipedia.org/wiki/Misleading_graph} + } +\end{frame} + +\begin{frame}[fragile] + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Plotting Interfaces in Matlab} + Es gibt zwei Wege Graphen zu bearbeiten: + \begin{enumerate} + \item Interaktiv \"uber das \textit{graphische User Interface}\pause + \item Die Kommandozeile bzw. in Skripten und Funktionen.\pause + \end{enumerate} + Beides hat seine Berechtigung und seine eigenen Vor- und Nachteile. Welche? +\end{frame} + + +\begin{frame} + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Ver\"anderung des Graphen \"uber die Kommandozeile} + \begin{itemize} + \item Erstellt ein Skript, dass einen Plot erstellt. + \item Dieser soll zwei Sinus unterschiedlicher Frequenz darstellen. + \end{itemize} + Wir werden jetzt die Kommandozeil bzw. das Skript verbessern um den + Plot ``sch\"oner'' zu machen. +\end{frame} + + +\begin{frame} + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Ver\"anderung des Graphen \"uber die Kommandozeile} + \begin{enumerate} + \item Einstellungen der Linienplots: + \begin{itemize} + \item St\"arke und Farbe. + \item Linienstil, Marker. + \end{itemize}\pause + \item Achsbeschriftung: + \begin{itemize} + \item \code{xlabel}, \code{ylabel}. + \item Schriftart und Gr\"o{\ss}e. + \end{itemize}\pause + \item Achsenskalierung und Ticks: + \begin{itemize} + \item Skalierung der Achsen (Minumum und Maxmimum, logarithmisch oder linear). + \item Manuelles Setzen der Ticks, ihrer Richtung und Beschriftung. + \item Grid or no Grid? + \end{itemize}\pause + \item Setzen von globalen Parametern: + \begin{itemize} + \item Einstellung der Papiergr\"o{\ss}e und plzieren der + Zeichenfl\"ache. + \item Box oder nicht? + \item Speichern der Abbildung als pdf. + \end{itemize} + \end{enumerate} +\end{frame} + + +\begin{frame} [fragile] + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Ver\"andern von Eigenschaften \"uber die Kommandozeile} + \vspace{-0.75em} + \scriptsize + \begin{lstlisting} +fig = figure(); +set(gcf, 'PaperUnits', 'centimeters', 'PaperSize', [11.7 9.0]); +set(gcf, 'PaperPosition',[0.0 0.0 11.7 9.0], 'Color', 'white') +hold on +plot(time, neuronal_data, 'color', [ 0.2 0.5 0.7], 'linewidth', 1.) +plot(spike_times, ones(size(spike_times))*threshold, 'ro', 'markersize', 4) +line([time(1) time(end)], [threshold threshold], 'linestyle', '--', + 'linewidth', 0.75, 'color', [0.9 0.9 0.9]) +ylim([0 35]) +xlim([0 2.25]) +box('off') +xlabel('time [s]', 'fontname', 'MyriadPro-Regular', 'fontsize', 10) +ylabel('potential [mV]', 'fontname', 'MyriadPro-Regular', 'fontsize', 10) +title('pyramidal cell', 'fontname', 'MyriadPro-Regular', 'fontsize', 12) +set(gca, 'TickDir','out', 'linewidth', 1.5, 'fontname', 'MyriadPro-Regular') +saveas(fig, 'spike_detection.pdf', 'pdf') +\end{lstlisting} +\end{frame} + + +\begin{frame} [fragile] + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Ver\"andern von Eigenschaften \"uber die Kommandozeile} + \begin{figure} + \centering + \includegraphics[width=0.75\columnwidth]{./images/spike_detection} + \end{figure} +\end{frame} + + +\begin{frame} [fragile] + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Welche Art Plot wof\"ur?} + \url{http://www.mathworks.de/discovery/gallery.html} +\end{frame} + + +\begin{frame} [fragile] + \frametitle{Graphische Darstellung von Daten} + \framesubtitle{Was macht einen guten Abbildung aus?} + \begin{enumerate} + \item Klarheit. + \item Vollstaendige Beschriftung. + \item Deutliche Unterscheidbarkeit von Kurven. + \item Keine suggestive Darstellung. + \item Ausgewogenheit von Linienst\"arken Schrift- und Plotgr\"o{\ss}e. + \item Vermeidung von Suggestiven Darstellungen. + \item Fehlerbalken, wenn sie angebracht sind. + \end{enumerate} +\end{frame} + + \begin{frame}[plain] \huge{2. Spiketrain Analyse I} \end{frame} diff --git a/scientificcomputing-script.tex b/scientificcomputing-script.tex index 306a84a..a053861 100644 --- a/scientificcomputing-script.tex +++ b/scientificcomputing-script.tex @@ -59,7 +59,8 @@ % figures: \setlength{\fboxsep}{0pt} -\newcommand{\texpicture}[1]{{\sffamily\footnotesize\input{#1.tex}}} +\newcommand{\texinputpath}{} +\newcommand{\texpicture}[1]{{\sffamily\footnotesize\input{\texinputpath#1.tex}}} %\newcommand{\texpicture}[1]{\fbox{\sffamily\footnotesize\input{#1.tex}}} %\newcommand{\texpicture}[1]{\setlength{\fboxsep}{2mm}\fbox{#1}} %\newcommand{\texpicture}[1]{} @@ -204,8 +205,9 @@ \newenvironment{definition}[1][]{\medskip\noindent\textbf{Definition}\ifthenelse{\equal{#1}{}}{}{ #1}:\newline}% {\medskip} +%%%%% exercises: %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newcounter{maxexercise} -\setcounter{maxexercise}{9} % show listings up to exercise maxexercise +\setcounter{maxexercise}{10} % show listings up to exercise maxexercise \newcounter{theexercise} \setcounter{theexercise}{1} \newcommand{\codepath}{} @@ -213,7 +215,7 @@ \arabic{theexercise}:}\newline \newcommand{\exercisesource}{#1}}% {\ifthenelse{\equal{\exercisesource}{}}{}{\ifthenelse{\value{theexercise}>\value{maxexercise}}{}{\medskip\lstinputlisting{\codepath\exercisesource}}}\medskip\stepcounter{theexercise}} -\graphicspath{{statistics/lecture/}{statistics/lecture/figures/}{bootstrap/lecture/}{bootstrap/lecture/figures/}{likelihood/lecture/}{likelihood/lecture/figures/}} +\graphicspath{{statistics/lecture/}{statistics/lecture/figures/}{bootstrap/lecture/}{bootstrap/lecture/figures/}{likelihood/lecture/}{likelihood/lecture/figures/}{pointprocesses/lecture/}{pointprocesses/lecture/figures/}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -233,4 +235,8 @@ \renewcommand{\codepath}{likelihood/code/} \include{likelihood/lecture/likelihood} +\renewcommand{\codepath}{pointprocesses/code/} +\renewcommand{\texinputpath}{pointprocesses/lecture/} +%\include{pointprocesses/lecture/pointprocesses} + \end{document} diff --git a/statistics/code/boltzmann.m b/statistics/code/boltzmann.m new file mode 100644 index 0000000..8190460 --- /dev/null +++ b/statistics/code/boltzmann.m @@ -0,0 +1,7 @@ +function y = boltzmann(parameter, x) +% parameter 1: alpha +% parameter 2: k +% parameter 3: x_0 +% parameter 4: y_0 + +y = (parameter(1) ./ (1 + exp(-parameter(2) .* (x - parameter(3))))) + parameter(4); \ No newline at end of file diff --git a/statistics/code/create_linear_data.m b/statistics/code/create_linear_data.m new file mode 100644 index 0000000..f929f23 --- /dev/null +++ b/statistics/code/create_linear_data.m @@ -0,0 +1,7 @@ +function y = create_linear_data(x) + +m = 2.5; +n = -0.35; +d = 2.5; + +y = x .* m + n + randn(size(x)) .* d; diff --git a/statistics/code/estimate_regression.m b/statistics/code/estimate_regression.m new file mode 100644 index 0000000..fd3d2c4 --- /dev/null +++ b/statistics/code/estimate_regression.m @@ -0,0 +1,7 @@ +function param = estimate_regression(x,y,p_0) + +objective_function = @(p)lsq_error(p, x, y); +param = fminunc(objective_function, p_0); +disp(param) +param1 = fminsearch(objective_function, p_0); +disp(param1) \ No newline at end of file diff --git a/statistics/code/exponential.m b/statistics/code/exponential.m new file mode 100644 index 0000000..1bb2a04 --- /dev/null +++ b/statistics/code/exponential.m @@ -0,0 +1,5 @@ +function y = exponential(parameter, x) +% Function implements an exponential function with two parameters +% controlling the amplitude and the time constant. + +y = parameter(1) .* exp(x./parameter(2)); \ No newline at end of file diff --git a/statistics/code/lsq_gradient_sigmoid.m b/statistics/code/lsq_gradient_sigmoid.m new file mode 100644 index 0000000..05e9721 --- /dev/null +++ b/statistics/code/lsq_gradient_sigmoid.m @@ -0,0 +1,9 @@ +function gradient = lsq_gradient_sigmoid(parameter, x, y) +h = 1e-6; + +gradient = zeros(size(parameter)); +for i = 1:length(parameter) + parameter_h = parameter; + parameter_h(i) = parameter_h(i) + h; + gradient(i) = (lsq_sigmoid_error(parameter_h, x, y) - lsq_sigmoid_error(parameter, x, y)) / h; +end \ No newline at end of file diff --git a/statistics/code/lsq_sigmoid_error.m b/statistics/code/lsq_sigmoid_error.m new file mode 100644 index 0000000..5f05f21 --- /dev/null +++ b/statistics/code/lsq_sigmoid_error.m @@ -0,0 +1,8 @@ +function error = lsq_sigmoid_error(parameter, x, y) +% p(1) the amplitude +% p(2) the slope +% p(3) the x-shift +% p(4) the y-shift + +y_est = parameter(1)./(1+ exp(-parameter(2) .* (x - parameter(3)))) + parameter(4); +error = mean((y_est - y).^2); \ No newline at end of file diff --git a/statistics/code/sigmoidal_gradient_descent.m b/statistics/code/sigmoidal_gradient_descent.m new file mode 100644 index 0000000..9763f5a --- /dev/null +++ b/statistics/code/sigmoidal_gradient_descent.m @@ -0,0 +1,44 @@ + + +%% fit the sigmoid + +clear +close all + +load('iv_curve.mat') + +figure() +plot(voltage, current, 'o') +xlabel('voltate [mV]') +ylabel('current [pA]') + +% amplitude, slope, x-shift, y-shift +%parameter = [10 0.25 -50, 2.5]; +parameter = [20 0.5 -50, 2.5]; + +eps = 0.1; +% do the descent +gradient = []; +steps = 0; +error = []; + +while isempty(gradient) || norm(gradient) > 0.01 + steps = steps + 1; + gradient = lsq_gradient_sigmoid(parameter, voltage, current); + error(steps) = lsq_sigmoid_error(parameter, voltage, current); + parameter = parameter - eps .* gradient; +end +plot(1:steps, error) + +disp('gradient descent done!') +disp(strcat('final position: ', num2str(parameter))) +disp(strcat('final error: ', num2str(error(end)))) + +%% use fminsearch +parameter = [10 0.5 -50, 2.5]; + +objective_function = @(p)lsq_sigmoid_error(p, voltage, current); +param = fminunc(objective_function, parameter); +disp(param) +param1 = fminsearch(objective_function, parameter); +disp(param1) diff --git a/statistics/exercises/mlestd.m b/statistics/exercises/mlestd.m index 61f4b22..de37a56 100644 --- a/statistics/exercises/mlestd.m +++ b/statistics/exercises/mlestd.m @@ -27,4 +27,4 @@ subplot(1, 2, 2); plot(psigs, loglm); xlabel('standard deviation') ylabel('log likelihood') -savefigpdf(gcf, 'mlestd.pdf', 12, 5); +savefigpdf(gcf, 'mlestd.pdf', 15, 5); diff --git a/statistics/exercises/mlestd.pdf b/statistics/exercises/mlestd.pdf index dad420c..f01f927 100644 Binary files a/statistics/exercises/mlestd.pdf and b/statistics/exercises/mlestd.pdf differ diff --git a/statistics/exercises/statistics04.tex b/statistics/exercises/statistics04.tex index 276f7ea..1c3dcdf 100644 --- a/statistics/exercises/statistics04.tex +++ b/statistics/exercises/statistics04.tex @@ -113,8 +113,10 @@ Absch\"atzung der Standardabweichung verdeutlichen. \continue %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \question \qt{Maximum-Likelihood-Sch\"atzer einer Ursprungsgeraden} -In der Vorlesung haben wir eine Gleichung f\"ur die Maximum-Likelihood -Absch\"atzung der Steigung einer Ursprungsgeraden hergeleitet. +In der Vorlesung haben wir folgende Formel f\"ur die Maximum-Likelihood +Absch\"atzung der Steigung $\theta$ einer Ursprungsgeraden durch $n$ Datenpunkte $(x_i|y_i)$ mit Standardabweichung $\sigma_i$ hergeleitet: +\[\theta = \frac{\sum_{i=1}^n \frac{x_iy_i}{\sigma_i^2}}{ \sum_{i=1}^n + \frac{x_i^2}{\sigma_i^2}} \] \begin{parts} \part \label{mleslopefunc} Schreibe eine Funktion, die in einem $x$ und einem $y$ Vektor die Datenpaare \"uberreicht bekommt und die Steigung der @@ -146,13 +148,12 @@ nicht so einfach wie der Mittelwert und die Standardabweichung einer Normalverteilung direkt aus den Daten berechnet werden k\"onnen. Solche Parameter m\"ussen dann aus den Daten mit der Maximum-Likelihood-Methode gefittet werden. -Um dies zu veranschaulichen ziehen wir uns diesmal Zufallszahlen, die nicht einer -Normalverteilung entstammen, sonder aus der Gamma-Verteilung. +Um dies zu veranschaulichen ziehen wir uns diesmal nicht normalverteilte Zufallszahlen, sondern Zufallszahlen aus der Gamma-Verteilung. \begin{parts} \part - Finde heraus welche Funktion die Wahrscheinlichkeitsdichtefunktion - (probability density function) der Gamma-Verteilung in \code{matlab} - berechnet. + Finde heraus welche \code{matlab} Funktion die + Wahrscheinlichkeitsdichtefunktion (probability density function) der + Gamma-Verteilung berechnet. \part Plotte mit Hilfe dieser Funktion die Wahrscheinlichkeitsdichtefunktion @@ -169,17 +170,17 @@ Normalverteilung entstammen, sonder aus der Gamma-Verteilung. \part Finde heraus mit welcher \code{matlab}-Funktion eine beliebige - Verteilung (``distribution'') und die Gammaverteilung an die - Zufallszahlen nach der Maximum-Likelihood Methode gefittet werden - kann. + Verteilung (``distribution'') an die Zufallszahlen nach der + Maximum-Likelihood Methode gefittet werden kann. Wie wird diese + Funktion benutzt, um die Gammaverteilung an die Daten zu fitten? \part - Bestimme mit dieser Funktion die Parameter der - Gammaverteilung aus den Zufallszahlen. + Bestimme mit dieser Funktion die Parameter der Gammaverteilung aus + den Zufallszahlen. \part - Plotte anschlie{\ss}end - die Gammaverteilung mit den gefitteten Parametern. + Plotte anschlie{\ss}end die Gammaverteilung mit den gefitteten + Parametern. \end{parts} \begin{solution} \lstinputlisting{mlepdffit.m} diff --git a/statistics/lecture/Makefile b/statistics/lecture/Makefile index 445913e..ec84405 100644 --- a/statistics/lecture/Makefile +++ b/statistics/lecture/Makefile @@ -1,22 +1,29 @@ BASENAME=statistics + PYFILES=$(wildcard *.py) PYPDFFILES=$(PYFILES:.py=.pdf) -pdf : $(BASENAME)-chapter.pdf $(PYPDFFILES) +all : pdf + +# script: +pdf : $(BASENAME)-chapter.pdf -$(BASENAME)-chapter.pdf : $(BASENAME)-chapter.tex $(BASENAME).tex +$(BASENAME)-chapter.pdf : $(BASENAME)-chapter.tex $(BASENAME).tex $(PYPDFFILES) pdflatex -interaction=scrollmode $< | tee /dev/stderr | fgrep -q "Rerun to get cross-references right" && pdflatex -interaction=scrollmode $< || true $(PYPDFFILES) : %.pdf : %.py python $< clean : - rm -f *~ $(BASENAME)-chapter.aux $(BASENAME)-chapter.log $(BASENAME)-chapter.out $(BASENAME).aux $(BASENAME).log + rm -f *~ + rm -f $(BASENAME).aux $(BASENAME).log + rm -f $(BASENAME)-chapter.aux $(BASENAME)-chapter.log $(BASENAME)-chapter.out + rm -f $(PYPDFFILES) $(GPTTEXFILES) cleanall : clean rm -f $(BASENAME)-chapter.pdf -watch : +watchpdf : while true; do ! make -q pdf && make pdf; sleep 0.5; done diff --git a/statistics/lecture/boxwhisker.py b/statistics/lecture/boxwhisker.py index 1209f7e..38a0dcb 100644 --- a/statistics/lecture/boxwhisker.py +++ b/statistics/lecture/boxwhisker.py @@ -43,5 +43,5 @@ ax.annotate('maximum', ax.boxplot( x, whis=100.0 ) plt.tight_layout() plt.savefig('boxwhisker.pdf') -plt.show() +#plt.show() diff --git a/statistics/lecture/correlation.py b/statistics/lecture/correlation.py index db317c7..2fa85d4 100644 --- a/statistics/lecture/correlation.py +++ b/statistics/lecture/correlation.py @@ -5,7 +5,6 @@ plt.xkcd() fig = plt.figure( figsize=(6,5) ) n = 200 for k, r in enumerate( [ 1.0, 0.6, 0.0, -0.9 ] ) : - print r x = np.random.randn( n ) y = r*x + np.sqrt(1.0-r*r)*np.random.randn( n ) ax = fig.add_subplot( 2, 2, k+1 ) @@ -30,5 +29,4 @@ for k, r in enumerate( [ 1.0, 0.6, 0.0, -0.9 ] ) : plt.tight_layout() plt.savefig('correlation.pdf') -plt.show() - +#plt.show() diff --git a/statistics/lecture/diehistograms.py b/statistics/lecture/diehistograms.py index d5e0380..a8832e0 100644 --- a/statistics/lecture/diehistograms.py +++ b/statistics/lecture/diehistograms.py @@ -28,5 +28,4 @@ ax.set_ylabel( 'Probability' ) ax.hist([x2, x1], bins, normed=True, color=['#FFCC00', '#FFFF66' ]) plt.tight_layout() fig.savefig( 'diehistograms.pdf' ) -plt.show() - +#plt.show() diff --git a/statistics/lecture/median.py b/statistics/lecture/median.py index 2bf420c..a1231f9 100644 --- a/statistics/lecture/median.py +++ b/statistics/lecture/median.py @@ -29,5 +29,4 @@ ax.plot(x,g, 'b', lw=4) ax.plot([0.0, 0.0], [0.0, 0.45], 'k', lw=2 ) plt.tight_layout() fig.savefig( 'median.pdf' ) -plt.show() - +#plt.show() diff --git a/statistics/lecture/nonlincorrelation.py b/statistics/lecture/nonlincorrelation.py index c0ca723..498d985 100644 --- a/statistics/lecture/nonlincorrelation.py +++ b/statistics/lecture/nonlincorrelation.py @@ -39,4 +39,4 @@ ax.scatter( x, z ) plt.tight_layout() plt.savefig('nonlincorrelation.pdf') -plt.show() +#plt.show() diff --git a/statistics/lecture/pdfhistogram.py b/statistics/lecture/pdfhistogram.py index 039b524..a61460e 100644 --- a/statistics/lecture/pdfhistogram.py +++ b/statistics/lecture/pdfhistogram.py @@ -35,5 +35,5 @@ ax.hist(r, 20, normed=True, color='#FFCC00') plt.tight_layout() fig.savefig( 'pdfhistogram.pdf' ) -plt.show() +#plt.show() diff --git a/statistics/lecture/pdfprobabilities.py b/statistics/lecture/pdfprobabilities.py index 6481da1..bcbaa09 100644 --- a/statistics/lecture/pdfprobabilities.py +++ b/statistics/lecture/pdfprobabilities.py @@ -32,5 +32,4 @@ ax.fill_between( x[(x>x1)&(xx1)&(x +{ + \usetheme{Singapore} + \setbeamercovered{opaque} + \usecolortheme{tuebingen} + \setbeamertemplate{navigation symbols}{} + \usefonttheme{default} + \useoutertheme{infolines} + % \useoutertheme{miniframes} +} + +%\AtBeginSection[] +%{ +% \begin{frame} +% \begin{center} +% \Huge \insertsectionhead +% \end{center} +% \end{frame} +%} + +\setbeamertemplate{blocks}[rounded][shadow=true] +\setcounter{tocdepth}{1} + +%%%%% packages %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage[english]{babel} +\usepackage{amsmath} +\usepackage{bm} +\usepackage{pslatex} % nice font for pdf file +%\usepackage{multimedia} + +\usepackage{dsfont} +\newcommand{\naZ}{\mathds{N}} +\newcommand{\gaZ}{\mathds{Z}} +\newcommand{\raZ}{\mathds{Q}} +\newcommand{\reZ}{\mathds{R}} +\newcommand{\reZp}{\mathds{R^+}} +\newcommand{\reZpN}{\mathds{R^+_0}} +\newcommand{\koZ}{\mathds{C}} + +%%%% graphics %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{graphicx} +\newcommand{\texpicture}[1]{{\sffamily\small\input{#1.tex}}} + +%%%%% listings %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\usepackage{listings} +\lstset{ + basicstyle=\ttfamily, + numbers=left, + showstringspaces=false, + language=Matlab, + commentstyle=\itshape\color{darkgray}, + keywordstyle=\color{blue}, + stringstyle=\color{green}, + backgroundcolor=\color{blue!10}, + breaklines=true, + breakautoindent=true, + columns=flexible, + frame=single, + captionpos=b, + xleftmargin=1em, + xrightmargin=1em, + aboveskip=10pt + } + +\graphicspath{{figures/}} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{document} + +\begin{frame}[plain] + \frametitle{} + \vspace{-1cm} + \titlepage % erzeugt Titelseite +\end{frame} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{frame} + \frametitle{Content} + \tableofcontents +\end{frame} + + +\subsection{What is inferential statistics?} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{frame} + \frametitle{sources of error in an experiment} + \begin{task}{Think about it for 2 min} + If you repeat a scientific experiment, why do you not get the same + result every time you repeat it? + \end{task} + \pause + \begin{itemize} + \item sampling error (a finite subset of the population of interest + is selected in each experiment) + \item nonsampling errors (e.g. noise, uncontrolled factors) + \end{itemize} +\end{frame} + +% ---------------------------------------------------------- +\begin{frame}[fragile] +\frametitle{statisticians are lazy} +\Large +\only<1>{ + \begin{center} + \includegraphics[width=.8\linewidth]{2012-10-29_16-26-05_771.jpg} + \end{center} + \mycite{Larry Gonick, The Cartoon Guide to Statistics} +}\pause +\only<2>{ + \begin{center} + \includegraphics[width=.8\linewidth]{2012-10-29_16-41-39_523.jpg} + \end{center} + \mycite{Larry Gonick, The Cartoon Guide to Statistics} +}\pause +\only<3>{ + \begin{center} + \includegraphics[width=.8\linewidth]{2012-10-29_16-29-35_312.jpg} + \end{center} + \mycite{Larry Gonick, The Cartoon Guide to Statistics} +} +\end{frame}