diff --git a/Fitter.py b/Fitter.py
index 82b6b6d..ea1b17f 100644
--- a/Fitter.py
+++ b/Fitter.py
@@ -19,7 +19,7 @@ def main():
     run_with_real_data()
 
 
-def iget_start_parameters(mem_tau_list=None, input_scaling_list=None, noise_strength_list=None, dend_tau_list=None):
+def iget_start_parameters(mem_tau_list=None, input_scaling_list=None, noise_strength_list=None, dend_tau_list=None, tau_a_list=None, delta_a_list=None):
     # mem_tau, input_scaling, noise_strength, dend_tau,
     # expand by tau_a, delta_a ?
     if mem_tau_list is None:
@@ -27,9 +27,13 @@ def iget_start_parameters(mem_tau_list=None, input_scaling_list=None, noise_stre
     if input_scaling_list is None:
         input_scaling_list = [40, 60, 80]
     if noise_strength_list is None:
-        noise_strength_list = [0.02, 0.06]
+        noise_strength_list = [0.03]  # [0.02, 0.06]
     if dend_tau_list is None:
         dend_tau_list = [0.001, 0.002]
+    # if tau_a_list is None:
+    #     tau_a_list =
+    # if delta_a_list is None:
+    #     delta_a_list =
 
     for mem_tau in mem_tau_list:
         for input_scaling in input_scaling_list:
@@ -44,7 +48,8 @@ def run_with_real_data():
         start_par_count = 0
         for start_parameters in iget_start_parameters():
             start_par_count += 1
-            if start_par_count <= 4:
+            print("START PARAMETERS:", start_par_count)
+            if start_par_count <= 0:
                 continue
             print("cell:", cell_data.get_data_path())
             trace = cell_data.get_base_traces(trace_type=cell_data.V1)
@@ -141,7 +146,6 @@ class Fitter:
         self.f_inf_values = []
         self.f_inf_slope = 0
 
-
         self.f_zero_values = []
         self.f_zero_slope = 0
         self.f_zero_fit = []
@@ -175,7 +179,7 @@ class Fitter:
 
         # print("delta_a: {:.3f}".format(self.delta_a), "tau_a: {:.3f}".format(self.tau_a))
 
-        return self.fit_routine_4(data, start_parameters)
+        return self.fit_routine_5(data, start_parameters)
         # return self.fit_model(fit_adaption=False)
 
     def fit_routine_1(self, cell_data=None):
@@ -252,55 +256,76 @@ class Fitter:
         if start_parameters is None:
             x0 = np.array([0.02, 70, 0.001])
         else:
-            x0 = np.array([start_parameters["mem_tau"], start_parameters["input_scaling"], start_parameters["dend_tau"]])
+            x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
+                           start_parameters["input_scaling"], start_parameters["dend_tau"]])
         initial_simplex = create_init_simples(x0, search_scale=2)
-        error_weights = (0, 1, 5, 1, 2, 0, 0)
-        fmin = minimize(fun=self.cost_function_with_fixed_adaption_with_dend_tau_no_noise,
+        error_weights = (0, 5, 15, 1, 2, 1, 0)
+        fmin = minimize(fun=self.cost_function_with_fixed_adaption_with_dend_tau,
                               args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead",
-                              options={"initial_simplex": initial_simplex, "xatol": 0.001})
+                              options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 400, "maxiter": 400})
         res_parameters = fmin.x
 
-        print_comparision_cell_model(cell_data, self.base_model.get_parameters())
+        # print_comparision_cell_model(cell_data, self.base_model.get_parameters())
 
         self.counter = 0
-        x0 = np.array([res_parameters[0], res_parameters[1], self.tau_a,
-                       self.delta_a, res_parameters[2]])
+        x0 = np.array([self.tau_a,
+                       self.delta_a, res_parameters[0]])
         initial_simplex = create_init_simples(x0, search_scale=2)
-        error_weights = (0, 0, 0, 0, 0, 4, 2)
-        fmin = minimize(fun=self.cost_function_all_without_noise,
+        error_weights = (0, 1, 1, 2, 2, 4, 2)
+        fmin = minimize(fun=self.cost_function_only_adaption,
                         args=(error_weights,), x0=x0, method="Nelder-Mead",
                         options={"initial_simplex": initial_simplex, "xatol": 0.001})
         res_parameters = fmin.x
         print(fmin)
         print_comparision_cell_model(cell_data, self.base_model.get_parameters())
 
-        self.counter = 0
-        x0 = np.array([res_parameters[0],
-                       res_parameters[1], self.tau_a,
-                       self.delta_a, res_parameters[2]])
-        initial_simplex = create_init_simples(x0, search_scale=2)
-        error_weights = (0, 0, 1, 0, 0, 5, 2)
-        fmin = minimize(fun=self.cost_function_all_without_noise,
-                        args=(error_weights,), x0=x0, method="Nelder-Mead",
-                        options={"initial_simplex": initial_simplex, "xatol": 0.001})
-        res_parameters = self.base_model.get_parameters()
-
-        print_comparision_cell_model(cell_data, self.base_model.get_parameters())
-
-        # noise_strength = 0.03
+        #
+        # # self.counter = 0
+        # # x0 = np.array([res_parameters[0],
+        # #                res_parameters[1], self.tau_a,
+        # #                self.delta_a, res_parameters[2]])
+        # # initial_simplex = create_init_simples(x0, search_scale=2)
+        # # error_weights = (1, 3, 1, 2, 1, 3, 2)
+        # # fmin = minimize(fun=self.cost_function_all_without_noise,
+        # #                 args=(error_weights,), x0=x0, method="Nelder-Mead",
+        # #                 options={"initial_simplex": initial_simplex, "xatol": 0.001})
+        # # res_parameters = self.base_model.get_parameters()
+        # #
+        # # print_comparision_cell_model(cell_data, self.base_model.get_parameters())
+        #
         # self.counter = 0
-        # x0 = np.array([res_parameters["mem_tau"], noise_strength,
-        #                res_parameters["input_scaling"], res_parameters["tau_a"],
-        #                res_parameters["delta_a"], res_parameters["dend_tau"]])
+        # x0 = np.array([res_parameters[0], start_parameters["noise_strength"],
+        #                res_parameters[1], res_parameters[2],
+        #                res_parameters[3], res_parameters[4]])
         # initial_simplex = create_init_simples(x0, search_scale=2)
-        # error_weights = (0, 2, 2, 1, 1, 3, 2)
+        # error_weights = (0, 1, 2, 1, 1, 3, 2)
         # fmin = minimize(fun=self.cost_function_all,
         #                 args=(error_weights,), x0=x0, method="Nelder-Mead",
-        #                 options={"initial_simplex": initial_simplex, "xatol": 0.001})
+        #                 options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxiter": 599})
         # res_parameters = self.base_model.get_parameters()
 
         return fmin, self.base_model.get_parameters()
 
+    def fit_routine_5(self, cell_data=None, start_parameters=None):
+        global SAVE_PATH_PREFIX
+        SAVE_PATH_PREFIX = "fit_routine_5_"
+        # errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
+        self.counter = 0
+        # fit only v_offset, mem_tau, input_scaling, dend_tau
+        if start_parameters is None:
+            x0 = np.array([0.02, 70, 0.001])
+        else:
+            x0 = np.array([start_parameters["mem_tau"], start_parameters["input_scaling"],
+                           self.tau_a, self.delta_a, start_parameters["dend_tau"]])
+        initial_simplex = create_init_simples(x0, search_scale=2)
+        error_weights = (0, 1, 1, 1, 1, 2, 1)
+        fmin = minimize(fun=self.cost_function_all_without_noise,
+                        args=(error_weights,), x0=x0, method="Nelder-Mead",
+                        options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 400, "maxiter": 400})
+        res_parameters = fmin.x
+
+        return fmin, self.base_model.get_parameters()
+
     def fit_model(self, x0=None, initial_simplex=None, fit_adaption=False):
         self.counter = 0
 
@@ -361,9 +386,10 @@ class Fitter:
 
         return sum(error_list)
 
-    def cost_function_only_adaption_and_v_offset(self, X, error_weights=None):
+    def cost_function_only_adaption(self, X, error_weights=None):
         self.base_model.set_variable("tau_a", X[0])
         self.base_model.set_variable("delta_a", X[1])
+        self.base_model.set_variable("mem_tau", X[2])
 
         base_stimulus = SinusoidalStepStimulus(self.eod_freq, 0)
         # find right v-offset
@@ -376,14 +402,6 @@ class Fitter:
 
         return sum(error_list)
 
-    def cost_function_only_adaption(self, X, error_weights=None):
-        self.base_model.set_variable("tau_a", X[0])
-        self.base_model.set_variable("delta_a", X[1])
-
-        error_list = self.calculate_errors(error_weights)
-
-        return sum(error_list)
-
     def cost_function_with_fixed_adaption(self, X, tau_a, delta_a, error_weights=None):
         # set model parameters:
         model = self.base_model
@@ -453,10 +471,20 @@ class Fitter:
 
         # f_infinities, f_infinities_slope = self.base_model.calculate_fi_markers(self.fi_contrasts, self.eod_freq)
         f_baselines, f_zeros, f_infinities = self.base_model.calculate_fi_curve(self.fi_contrasts, self.eod_freq)
-        f_infinities_fit = hF.fit_clipped_line(self.fi_contrasts, f_infinities)
-        f_infinities_slope = f_infinities_fit[0]
+        try:
+            f_infinities_fit = hF.fit_clipped_line(self.fi_contrasts, f_infinities)
+        except Exception as e:
+            print("EXCEPTION IN FIT LINE!")
+            print(e)
+            f_infinities_fit = [0, 0]
 
-        f_zeros_fit = hF.fit_boltzmann(self.fi_contrasts, f_zeros)
+        f_infinities_slope = f_infinities_fit[0]
+        try:
+            f_zeros_fit = hF.fit_boltzmann(self.fi_contrasts, f_zeros)
+        except Exception as e:
+            print("EXCEPTION IN FIT BOLTZMANN!")
+            print(e)
+            f_zeros_fit = [0, 0, 0, 0]
         f_zero_slope = fu.full_boltzmann_straight_slope(f_zeros_fit[0], f_zeros_fit[1], f_zeros_fit[2], f_zeros_fit[3])
         # print("fi-curve features calculated!")
         # calculate errors with reference values
@@ -478,7 +506,7 @@ class Fitter:
         error = sum(error_list)
 
         self.counter += 1
-        if self.counter % 200 == 0 and False: # TODO currently shut off!
+        if self.counter % 200 == 0:  # and False: # TODO currently shut off!
             print("\nCost function run times: {:}\n".format(self.counter),
                   "Total weighted error: {:.4f}\n".format(error),
                   "Baseline frequency - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
diff --git a/helperFunctions.py b/helperFunctions.py
index 15f6a12..da217b2 100644
--- a/helperFunctions.py
+++ b/helperFunctions.py
@@ -3,6 +3,7 @@ from warnings import warn
 from thunderfish.eventdetection import detect_peaks, threshold_crossing_times, threshold_crossings
 from scipy.optimize import curve_fit
 import functions as fu
+from numba import jit
 
 
 def fit_clipped_line(x, y):
@@ -27,6 +28,11 @@ def fit_boltzmann(x, y):
     return popt
 
 
+@jit(nopython=True)
+def rectify_stimulus_array(stimulus_array: np.ndarray):
+    return np.array([x if x > 0 else 0 for x in stimulus_array])
+
+
 def merge_similar_intensities(intensities, spiketimes, trans_amplitudes):
     i = 0
 
@@ -151,7 +157,8 @@ def calculate_isi_frequency_trace(spiketimes, sampling_interval, time_in_ms=Fals
 
 def calculate_time_and_frequency_trace(spiketimes, sampling_interval, time_in_ms=False):
     if len(spiketimes) < 2:
-        pass # raise ValueError("Cannot compute a time and frequency vector with fewer than 2 spikes")
+        return [0], [0]
+        # raise ValueError("Cannot compute a time and frequency vector with fewer than 2 spikes")
 
     frequency = calculate_isi_frequency_trace(spiketimes, sampling_interval, time_in_ms)
 
@@ -161,7 +168,6 @@ def calculate_time_and_frequency_trace(spiketimes, sampling_interval, time_in_ms
         if len(time) > len(frequency):
             time = time[:len(frequency)]
 
-
     return time, frequency
 
 
@@ -430,6 +436,7 @@ def detect_f_zero_in_frequency_trace(time, frequency, stimulus_start, sampling_i
 
     if len(freq_before) < 3:
         print("mäh")
+        return 0
 
     min_before = min(freq_before)
     max_before = max(freq_before)
diff --git a/introduction/test.py b/introduction/test.py
index 73110b1..f3edf7b 100644
--- a/introduction/test.py
+++ b/introduction/test.py
@@ -2,27 +2,10 @@ import numpy as np
 import matplotlib.pyplot as plt
 import helperFunctions as hF
 import time
+from stimuli.SinusoidalStepStimulus import SinusoidalStepStimulus
 
-def main():
-
-
-    time = np.arange(-1, 30, 0.0001)
-    eod = np.sin(2*np.pi * 600 * time)
-
-
-    signs = np.sign(eod[:-1]) != np.sign(eod[1:])
-    delta = eod[:-1] < eod[1:]
-
-    sign_changes = np.where(signs & delta)[0]
-
-    plt.plot(time, eod)
-    plt.plot([time[i] for i in sign_changes], [0]*len(sign_changes), 'o')
-    plt.show()
-
-
-    print(sign_changes)
-    quit()
 
+def main():
 
     for freq in [700, 50, 100, 500, 1000]:
         reps = 1000
diff --git a/main.py b/main.py
index 98f905a..fb5dc44 100644
--- a/main.py
+++ b/main.py
@@ -1,10 +1,6 @@
-
-from FiCurve import FICurve
 from CellData import icelldata_of_dir
-import os
-import helperFunctions as hf
-from AdaptionCurrent import Adaption
-from functionalityTests import *
+
+
 # TODO command line interface needed/nice ?
 
 
diff --git a/models/LIFACnoise.py b/models/LIFACnoise.py
index f77716e..224de90 100644
--- a/models/LIFACnoise.py
+++ b/models/LIFACnoise.py
@@ -32,6 +32,7 @@ class LifacNoiseModel(AbstractModel):
         if self.parameters["step_size"] > 0.0001:
             warn("LifacNoiseModel: The step size is quite big simulation could fail.")
         self.voltage_trace = []
+        self.input_voltage = []
         self.adaption_trace = []
         self.spiketimes = []
         self.stimulus = None
@@ -43,18 +44,19 @@ class LifacNoiseModel(AbstractModel):
         time = np.arange(0, total_time_s, self.parameters["step_size"])
         output_voltage = np.zeros(len(time), dtype='float64')
         adaption = np.zeros(len(time), dtype='float64')
+        input_voltage = np.zeros(len(time), dtype='float64')
         spiketimes = []
 
         current_v = self.parameters["v_zero"]
         current_a = self.parameters["a_zero"]
+        input_voltage[0] = fu.rectify(stimulus.value_at_time_in_s(time[0]))
         output_voltage[0] = current_v
         adaption[0] = current_a
 
         for i in range(1, len(time), 1):
             time_point = time[i]
             # rectified input:
-            stimulus_strength = fu.rectify(stimulus.value_at_time_in_s(time_point)) * self.parameters["input_scaling"]
-
+            stimulus_strength = self._calculate_input_voltage_step(input_voltage[i-1], fu.rectify(stimulus.value_at_time_in_s(time_point)))
             v_next = self._calculate_voltage_step(current_v, stimulus_strength - current_a)
             a_next = self._calculate_adaption_step(current_a)
 
@@ -65,6 +67,7 @@ class LifacNoiseModel(AbstractModel):
 
             output_voltage[i] = v_next
             adaption[i] = a_next
+            input_voltage[i] = stimulus_strength
 
             current_v = v_next
             current_a = a_next
@@ -72,6 +75,7 @@ class LifacNoiseModel(AbstractModel):
         self.voltage_trace = output_voltage
         self.adaption_trace = adaption
         self.spiketimes = spiketimes
+        self.input_voltage = input_voltage
 
         return output_voltage, spiketimes
 
@@ -91,6 +95,10 @@ class LifacNoiseModel(AbstractModel):
         step_size = self.parameters["step_size"]
         return current_a + (step_size * (-current_a)) / self.parameters["tau_a"]
 
+    def _calculate_input_voltage_step(self, current_i, rectified_input):
+        # input_voltage[i] = input_voltage[i - 1] + (-input_voltage[i - 1] + rectified_stimulus_array[i] * input_scaling) / dend_tau
+        return current_i + ((-current_i + rectified_input * self.parameters["input_scaling"]) / self.parameters["dend_tau"]) * self.parameters["step_size"]
+
     def simulate_fast(self, stimulus: AbstractStimulus, total_time_s, time_start=0):
 
         v_zero = self.parameters["v_zero"]
@@ -109,9 +117,10 @@ class LifacNoiseModel(AbstractModel):
         rectified_stimulus = rectify_stimulus_array(stimulus.as_array(time_start, total_time_s, step_size))
         parameters = np.array([v_zero, a_zero, step_size, threshold, v_base, delta_a, tau_a, v_offset, mem_tau, noise_strength, time_start, input_scaling, dend_tau])
 
-        voltage_trace, adaption, spiketimes = simulate_fast(rectified_stimulus, total_time_s, parameters)
+        voltage_trace, adaption, spiketimes, input_voltage = simulate_fast(rectified_stimulus, total_time_s, parameters)
 
         self.stimulus = stimulus
+        self.input_voltage = input_voltage
         self.voltage_trace = voltage_trace
         self.adaption_trace = adaption
         self.spiketimes = spiketimes
@@ -207,32 +216,36 @@ class LifacNoiseModel(AbstractModel):
 
     def calculate_fi_curve(self, contrasts, stimulus_freq):
 
-        max_time_constant = max([self.parameters["tau_a"], self.parameters["mem_tau"]])
-        factor_to_equilibrium = 5
-        stim_duration = max_time_constant * factor_to_equilibrium
-        stim_start = max_time_constant * factor_to_equilibrium
-        total_simulation_time = max_time_constant * factor_to_equilibrium * 3
+
+        stim_duration = 0.5
+        stim_start = 0.5
+        total_simulation_time = stim_duration + 2 * stim_start
         # print("Total simulation time (vs 2.5) {:.2f}".format(total_simulation_time))
 
         sampling_interval = self.get_sampling_interval()
         f_infinities = []
         f_zeros = []
         f_baselines = []
-        import matplotlib.pyplot as plt
         for c in contrasts:
             stimulus = SinusoidalStepStimulus(stimulus_freq, c, stim_start, stim_duration)
             _, spiketimes = self.simulate_fast(stimulus, total_simulation_time)
 
+            # if len(spiketimes) > 0:
+            #     print("min:", min(spiketimes), "max:", max(spiketimes), "len:", len(spiketimes))
+            # else:
+            #     print("spiketimes empty")
             time, frequency = hF.calculate_time_and_frequency_trace(spiketimes, sampling_interval)
             # if c == contrasts[0] or c == contrasts[-1]:
             #    plt.plot(frequency)
             #    plt.show()
 
-            if len(time) == 0 or time[0] >= stim_start or len(spiketimes) < 5:
+            if len(spiketimes) < 10 or len(time) == 0 or min(time) > stim_start or max(time) < stim_start+stim_duration:
+                print("Too few spikes to calculate f_inf, f_0 and f_base")
                 f_infinities.append(0)
                 f_zeros.append(0)
                 f_baselines.append(0)
                 continue
+
             f_inf = hF.detect_f_infinity_in_freq_trace(time, frequency, stim_start, stim_duration, sampling_interval)
             f_infinities.append(f_inf)
 
@@ -242,7 +255,7 @@ class LifacNoiseModel(AbstractModel):
             f_baseline = hF.detect_f_baseline_in_freq_trace(time, frequency, stim_start, sampling_interval)
             f_baselines.append(f_baseline)
 
-
+            # import matplotlib.pyplot as plt
             # fig, axes = plt.subplots(2, 1, sharex="all")
             # stim_time = np.arange(0,3.5, sampling_interval)
             # axes[0].set_title("Contrast: " + str(c))
@@ -359,8 +372,8 @@ def simulate_fast(rectified_stimulus_array, total_time_s, parameters: np.ndarray
         noise_value = np.random.normal()
         noise = noise_strength * noise_value / np.sqrt(step_size)
 
-        input_voltage[i] = input_voltage[i - 1] + (-input_voltage[i - 1] + rectified_stimulus_array[i] * input_scaling) / dend_tau
-        output_voltage[i] = output_voltage[i-1] + ((v_base - output_voltage[i-1] + v_offset + (rectified_stimulus_array[i] * input_scaling) - adaption[i-1] + noise) / mem_tau) * step_size
+        input_voltage[i] = input_voltage[i - 1] + ((-input_voltage[i - 1] + rectified_stimulus_array[i]) / dend_tau) * step_size
+        output_voltage[i] = output_voltage[i-1] + ((v_base - output_voltage[i-1] + v_offset + (input_voltage[i] * input_scaling) - adaption[i-1] + noise) / mem_tau) * step_size
         adaption[i] = adaption[i-1] + ((-adaption[i-1]) / tau_a) * step_size
 
         if output_voltage[i] > threshold:
@@ -368,7 +381,7 @@ def simulate_fast(rectified_stimulus_array, total_time_s, parameters: np.ndarray
             spiketimes.append(i*step_size)
             adaption[i] += delta_a / tau_a
 
-    return output_voltage, adaption, spiketimes
+    return output_voltage, adaption, spiketimes, input_voltage
 
 
 
diff --git a/tests/ModelTests.py b/tests/ModelTests.py
index b79aad9..2984922 100644
--- a/tests/ModelTests.py
+++ b/tests/ModelTests.py
@@ -5,7 +5,7 @@ import helperFunctions as hf
 from models.FirerateModel import FirerateModel
 from models.LIFACnoise import LifacNoiseModel
 from stimuli.StepStimulus import StepStimulus
-from stimuli.SinusAmplitudeModulation import SinusAmplitudeModulationStimulus
+from stimuli.SinusoidalStepStimulus import SinusoidalStepStimulus
 import functions as fu
 
 
@@ -52,16 +52,16 @@ def test_lifac_noise():
 
     model = LifacNoiseModel()
 
-    start = 0.2
-    duration = 30
+    start = 1
+    duration = 3
     total_time = duration + 2*start
-    step_size = model.get_parameters()["step_size"]/1000
+    step_size = model.get_parameters()["step_size"]
     time = np.arange(0, total_time, step_size)
-    stimulus = SinusAmplitudeModulationStimulus(700, 0.2, 10, 20, start, duration)
+    stimulus = SinusoidalStepStimulus(700, 0.2, start, duration)
 
-    model.simulate(stimulus, total_time)
+    model.simulate_fast(stimulus, total_time)
 
-    fig, axes = plt.subplots(nrows=3, sharex="col")
+    fig, axes = plt.subplots(nrows=4, sharex="col")
     sparse_time = np.arange(0, total_time, 1/5000)
     axes[0].plot(sparse_time, [stimulus.value_at_time_in_s(x) for x in sparse_time], label="given stimulus")
     axes[0].plot(sparse_time, [fu.rectify(stimulus.value_at_time_in_s(x)) for x in sparse_time], label="seen stimulus")
@@ -69,54 +69,63 @@ def test_lifac_noise():
     axes[0].set_ylabel("stimulus strength")
     axes[0].legend()
 
-    axes[1].plot(time, model.get_voltage_trace())
-    axes[1].set_title("Voltage trace")
-    axes[1].set_ylabel("voltage")
+    input_voltage = model.input_voltage
+    axes[1].plot(time, input_voltage)
+    axes[1].set_title("Stimulus after dendtritic filter")
+    axes[1].set_ylabel("stimulus strength")
 
-    t, f = hf.calculate_isi_frequency_trace(model.get_spiketimes(), 0, step_size)
-    axes[2].plot(t, f)
-    axes[2].set_title("ISI frequency trace")
-    axes[2].set_ylabel("Frequency")
+    voltage_trace = model.get_voltage_trace()
+    axes[2].plot(time, voltage_trace)
+    axes[2].set_title("Voltage trace")
+    axes[2].set_ylabel("voltage")
 
-    spiketimes_small_step = model.get_spiketimes()
+    spiketimes = model.get_spiketimes()
+    t, f = hf.calculate_time_and_frequency_trace(spiketimes, step_size)
+    axes[3].plot(t, f)
+    axes[3].set_title("ISI frequency trace")
+    axes[3].set_ylabel("Frequency")
 
-    model.set_variable("step_size", 0.02)
-    model.simulate(stimulus, total_time)
-    print(model.get_adaption_trace()[int(0.1/(0.01/1000))])
-    step_size = model.get_parameters()["step_size"] / 1000
-    time = np.arange(0, total_time, step_size)
-    t, f = hf.calculate_isi_frequency_trace(model.get_spiketimes(), 0, step_size)
-
-    axes[1].plot(time, model.get_voltage_trace())
-    axes[2].plot(t, f)
-
-    spiketimes_big_step = model.get_spiketimes()
-
-    print("CV:")
-    print("small step:", hf.calculate_coefficient_of_variation(spiketimes_small_step))
-    print("big   step:", hf.calculate_coefficient_of_variation(spiketimes_big_step))
-
-    plt.show()
-    plt.close()
-
-    max_lag = 5
-    x = np.arange(1, max_lag+1, 1)
-    serial_cor_small = hf.calculate_serial_correlation(spiketimes_small_step, max_lag)
-    serial_cor_big = hf.calculate_serial_correlation(spiketimes_big_step, max_lag)
-
-    print(serial_cor_small)
-    print(serial_cor_big)
-    plt.plot(x, serial_cor_small, 'o', label='small step',)
-    plt.plot(x, serial_cor_big, 'o', label='big step')
-    plt.ylim(-1, 1)
-    plt.legend()
     plt.show()
-    plt.close()
 
-    bins = np.arange(0, max(np.diff(spiketimes_small_step)), 0.0001)
-    plt.hist(np.diff(spiketimes_small_step), bins=bins, alpha=0.5)
-    plt.hist(np.diff(spiketimes_big_step), bins=bins, alpha=0.5)
-    plt.show()
+    # spiketimes_small_step = model.get_spiketimes()
+    #
+    # model.set_variable("step_size", 0.02)
+    # model.simulate(stimulus, total_time)
+    # print(model.get_adaption_trace()[int(0.1/(0.01/1000))])
+    # step_size = model.get_parameters()["step_size"] / 1000
+    # time = np.arange(0, total_time, step_size)
+    # t, f = hf.calculate_time_and_frequency_trace(model.get_spiketimes(), 0.02)
+    #
+    # axes[1].plot(time, model.get_voltage_trace())
+    # axes[2].plot(t, f)
+    #
+    # spiketimes_big_step = model.get_spiketimes()
+    #
+    # print("CV:")
+    # print("small step:", hf.calculate_coefficient_of_variation(spiketimes_small_step))
+    # print("big   step:", hf.calculate_coefficient_of_variation(spiketimes_big_step))
+
+    # plt.show()
+    # plt.close()
+    #
+    # max_lag = 5
+    # x = np.arange(1, max_lag+1, 1)
+    # serial_cor_small = hf.calculate_serial_correlation(spiketimes_small_step, max_lag)
+    # serial_cor_big = hf.calculate_serial_correlation(spiketimes_big_step, max_lag)
+    #
+    # print(serial_cor_small)
+    # print(serial_cor_big)
+    # plt.plot(x, serial_cor_small, 'o', label='small step',)
+    # plt.plot(x, serial_cor_big, 'o', label='big step')
+    # plt.ylim(-1, 1)
+    # plt.legend()
+    # plt.show()
+    # plt.close()
+    #
+    # bins = np.arange(0, max(np.diff(spiketimes_small_step)), 0.0001)
+    # plt.hist(np.diff(spiketimes_small_step), bins=bins, alpha=0.5)
+    # plt.hist(np.diff(spiketimes_big_step), bins=bins, alpha=0.5)
+    # plt.show()
 
 
 if __name__ == '__main__':
diff --git a/functionalityTests.py b/tests/functionalityTests.py
similarity index 100%
rename from functionalityTests.py
rename to tests/functionalityTests.py
diff --git a/generalTests.py b/tests/generalTests.py
similarity index 97%
rename from generalTests.py
rename to tests/generalTests.py
index 0a58126..4190ce2 100644
--- a/generalTests.py
+++ b/tests/generalTests.py
@@ -28,7 +28,7 @@ def time_test_function():
 
 
 def test_cell_data():
-    for cell_data in icelldata_of_dir("./data/"):
+    for cell_data in icelldata_of_dir("../data/"):
         #if "2012-12-20-ad" not in cell_data.get_data_path():
         #    continue
         print()
@@ -47,7 +47,7 @@ def test_cell_data():
 
 
 def test_peak_detection():
-    for cell_data in icelldata_of_dir("./data/"):
+    for cell_data in icelldata_of_dir("../data/"):
         print()
         print(cell_data.get_data_path())
         times = cell_data.get_base_traces(cell_data.TIME)
@@ -102,7 +102,7 @@ def test_simulation_speed():
 
 
 def test_fi_curve_class():
-    for cell_data in icelldata_of_dir("./data/"):
+    for cell_data in icelldata_of_dir("../data/"):
         fi_curve = FICurve(cell_data)
         fi_curve.get_f_zero_and_f_inf_intersection()
         fi_curve.plot_fi_curve()
@@ -113,7 +113,7 @@ def test_fi_curve_class():
 
 
 def test_adaption_class():
-    for cell_data in icelldata_of_dir("./data/"):
+    for cell_data in icelldata_of_dir("../data/"):
         print()
         print(cell_data.get_data_path())
         fi_curve = FICurve(cell_data)
diff --git a/tests/stimuli/TestStepStimulus.py b/unittests/TestStepStimulus.py
similarity index 100%
rename from tests/stimuli/TestStepStimulus.py
rename to unittests/TestStepStimulus.py