diff --git a/simulations/lecture/simulations.tex b/simulations/lecture/simulations.tex
index b3c2b38..e68447f 100644
--- a/simulations/lecture/simulations.tex
+++ b/simulations/lecture/simulations.tex
@@ -21,6 +21,40 @@ field of nonlinear dynamical systems forward. Conceptually, many kinds
 of simulations are very simple and are implemented in a few lines of
 code.
 
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Random numbers}
+At the heart of many simulations are random numbers. Pseudo random
+number generator XXX.  These are numerical algorithms that return
+sequences of numbers that appear to be as random as possible. If we
+draw random number using, for example, the \code{rand()} function,
+then these numbers are indeed uniformly distributed and have a mean of
+one half. Subsequent numbers are also independent of each other,
+i.e. the autocorrelation function is zero everywhere except at lag
+zero. However, numerical random number generators have a period, after
+which they repeat the exact same sequence. This differentiates them
+from truely random numbers and hence they are called \enterm{pseudo
+  random number generators}. In rare cases this periodicity can induce
+problems in your simulations. Luckily, nowadays the periods of random
+nunmber generators very large, $2^{64}$, $2^{128}$, or even larger.
+
+An advantage of pseudo random numbers is that they can be exactly
+repeated given a defined state or seed of the random number
+generator. After defining the state of the generator or setting a
+\term{seed} with the \code{rng()} function, the exact same sequence of
+random numbers is generated by subsequent calls of the random number
+generator. This is in particular useful for plots that involve some
+random numbers but should look the same whenever the script is run.
+
+Figure XXX: three sequences - initial one, second different one with
+seed, third with same seed. Fourth panel with autocorrelation
+function.
+
+\begin{exercise}{}{}
+  Generate three times the same sequence of 20 uniformly distributed
+  numbers using the \code{rand()} and \code{rng()} functions.
+\end{exercise}
+
+
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 \section{Univariate data}
 The most basic type of simulation is to draw random numbers from a
@@ -77,8 +111,9 @@ gamma
 \subsection{Random integers}
 \code{randi()}
 
+
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-\section{Static nonlinearities}
+\section{Bivariate data and static nonlinearities}
 
 \begin{figure}[t]
   \includegraphics[width=1\textwidth]{staticnonlinearity}
diff --git a/simulations/lecture/staticnonlinearity.py b/simulations/lecture/staticnonlinearity.py
index b290ac7..bf7929e 100644
--- a/simulations/lecture/staticnonlinearity.py
+++ b/simulations/lecture/staticnonlinearity.py
@@ -8,14 +8,14 @@ def boltzmann(x, x0, k):
     return 1.0/(1.0+np.exp(-k*(x-x0)))
 
 if __name__ == "__main__":
-    n = 70
+    n = 50
     xmin = -18.0
     xmax = 18.0
     x0 = 2.0
-    k = 0.3
+    k = 0.25
     sigma = 0.08
-    rng = np.random.RandomState(38281)
-    x = (xmax-xmin)*rng.rand(n) + xmin
+    rng = np.random.RandomState(15281)
+    x = np.linspace(xmin, xmax, n)
     y = boltzmann(x, x0, k) + sigma*rng.randn(len(x))
     xx = np.linspace(xmin, xmax, 200)
     yy = boltzmann(xx, x0, k)
@@ -30,9 +30,9 @@ if __name__ == "__main__":
     ax1.set_xlabel('Hair deflection / nm')
     ax1.set_ylabel('Open probability')
     ax1.set_xlim(-20, 20)
-    ax1.set_ylim(-0.17, 1.17)
+    ax1.set_ylim(-0.2, 1.17)
     ax1.set_xticks(np.arange(-20.0, 21.0, 10.0))
-    ax1.set_yticks(np.arange(0, 1.1, 0.2))
+    ax1.set_yticks(np.arange(-0.2, 1.1, 0.2))
     
     ax2 = fig.add_subplot(spec[0, 1])
     show_spines(ax2, 'lb')