commit all..

This commit is contained in:
alexanderott 2020-05-10 13:55:48 +02:00
parent 0855596623
commit 2d04311790
9 changed files with 177 additions and 141 deletions

120
Fitter.py
View File

@ -19,7 +19,7 @@ def main():
run_with_real_data() run_with_real_data()
def iget_start_parameters(mem_tau_list=None, input_scaling_list=None, noise_strength_list=None, dend_tau_list=None): def iget_start_parameters(mem_tau_list=None, input_scaling_list=None, noise_strength_list=None, dend_tau_list=None, tau_a_list=None, delta_a_list=None):
# mem_tau, input_scaling, noise_strength, dend_tau, # mem_tau, input_scaling, noise_strength, dend_tau,
# expand by tau_a, delta_a ? # expand by tau_a, delta_a ?
if mem_tau_list is None: if mem_tau_list is None:
@ -27,9 +27,13 @@ def iget_start_parameters(mem_tau_list=None, input_scaling_list=None, noise_stre
if input_scaling_list is None: if input_scaling_list is None:
input_scaling_list = [40, 60, 80] input_scaling_list = [40, 60, 80]
if noise_strength_list is None: if noise_strength_list is None:
noise_strength_list = [0.02, 0.06] noise_strength_list = [0.03] # [0.02, 0.06]
if dend_tau_list is None: if dend_tau_list is None:
dend_tau_list = [0.001, 0.002] dend_tau_list = [0.001, 0.002]
# if tau_a_list is None:
# tau_a_list =
# if delta_a_list is None:
# delta_a_list =
for mem_tau in mem_tau_list: for mem_tau in mem_tau_list:
for input_scaling in input_scaling_list: for input_scaling in input_scaling_list:
@ -44,7 +48,8 @@ def run_with_real_data():
start_par_count = 0 start_par_count = 0
for start_parameters in iget_start_parameters(): for start_parameters in iget_start_parameters():
start_par_count += 1 start_par_count += 1
if start_par_count <= 4: print("START PARAMETERS:", start_par_count)
if start_par_count <= 0:
continue continue
print("cell:", cell_data.get_data_path()) print("cell:", cell_data.get_data_path())
trace = cell_data.get_base_traces(trace_type=cell_data.V1) trace = cell_data.get_base_traces(trace_type=cell_data.V1)
@ -141,7 +146,6 @@ class Fitter:
self.f_inf_values = [] self.f_inf_values = []
self.f_inf_slope = 0 self.f_inf_slope = 0
self.f_zero_values = [] self.f_zero_values = []
self.f_zero_slope = 0 self.f_zero_slope = 0
self.f_zero_fit = [] self.f_zero_fit = []
@ -175,7 +179,7 @@ class Fitter:
# print("delta_a: {:.3f}".format(self.delta_a), "tau_a: {:.3f}".format(self.tau_a)) # print("delta_a: {:.3f}".format(self.delta_a), "tau_a: {:.3f}".format(self.tau_a))
return self.fit_routine_4(data, start_parameters) return self.fit_routine_5(data, start_parameters)
# return self.fit_model(fit_adaption=False) # return self.fit_model(fit_adaption=False)
def fit_routine_1(self, cell_data=None): def fit_routine_1(self, cell_data=None):
@ -252,55 +256,76 @@ class Fitter:
if start_parameters is None: if start_parameters is None:
x0 = np.array([0.02, 70, 0.001]) x0 = np.array([0.02, 70, 0.001])
else: else:
x0 = np.array([start_parameters["mem_tau"], start_parameters["input_scaling"], start_parameters["dend_tau"]]) x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
start_parameters["input_scaling"], start_parameters["dend_tau"]])
initial_simplex = create_init_simples(x0, search_scale=2) initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 1, 5, 1, 2, 0, 0) error_weights = (0, 5, 15, 1, 2, 1, 0)
fmin = minimize(fun=self.cost_function_with_fixed_adaption_with_dend_tau_no_noise, fmin = minimize(fun=self.cost_function_with_fixed_adaption_with_dend_tau,
args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead", args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001}) options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 400, "maxiter": 400})
res_parameters = fmin.x res_parameters = fmin.x
print_comparision_cell_model(cell_data, self.base_model.get_parameters()) # print_comparision_cell_model(cell_data, self.base_model.get_parameters())
self.counter = 0 self.counter = 0
x0 = np.array([res_parameters[0], res_parameters[1], self.tau_a, x0 = np.array([self.tau_a,
self.delta_a, res_parameters[2]]) self.delta_a, res_parameters[0]])
initial_simplex = create_init_simples(x0, search_scale=2) initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 0, 0, 0, 0, 4, 2) error_weights = (0, 1, 1, 2, 2, 4, 2)
fmin = minimize(fun=self.cost_function_all_without_noise, fmin = minimize(fun=self.cost_function_only_adaption,
args=(error_weights,), x0=x0, method="Nelder-Mead", args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001}) options={"initial_simplex": initial_simplex, "xatol": 0.001})
res_parameters = fmin.x res_parameters = fmin.x
print(fmin) print(fmin)
print_comparision_cell_model(cell_data, self.base_model.get_parameters()) print_comparision_cell_model(cell_data, self.base_model.get_parameters())
self.counter = 0 #
x0 = np.array([res_parameters[0], # # self.counter = 0
res_parameters[1], self.tau_a, # # x0 = np.array([res_parameters[0],
self.delta_a, res_parameters[2]]) # # res_parameters[1], self.tau_a,
initial_simplex = create_init_simples(x0, search_scale=2) # # self.delta_a, res_parameters[2]])
error_weights = (0, 0, 1, 0, 0, 5, 2) # # initial_simplex = create_init_simples(x0, search_scale=2)
fmin = minimize(fun=self.cost_function_all_without_noise, # # error_weights = (1, 3, 1, 2, 1, 3, 2)
args=(error_weights,), x0=x0, method="Nelder-Mead", # # fmin = minimize(fun=self.cost_function_all_without_noise,
options={"initial_simplex": initial_simplex, "xatol": 0.001}) # # args=(error_weights,), x0=x0, method="Nelder-Mead",
res_parameters = self.base_model.get_parameters() # # options={"initial_simplex": initial_simplex, "xatol": 0.001})
# # res_parameters = self.base_model.get_parameters()
print_comparision_cell_model(cell_data, self.base_model.get_parameters()) # #
# # print_comparision_cell_model(cell_data, self.base_model.get_parameters())
# noise_strength = 0.03 #
# self.counter = 0 # self.counter = 0
# x0 = np.array([res_parameters["mem_tau"], noise_strength, # x0 = np.array([res_parameters[0], start_parameters["noise_strength"],
# res_parameters["input_scaling"], res_parameters["tau_a"], # res_parameters[1], res_parameters[2],
# res_parameters["delta_a"], res_parameters["dend_tau"]]) # res_parameters[3], res_parameters[4]])
# initial_simplex = create_init_simples(x0, search_scale=2) # initial_simplex = create_init_simples(x0, search_scale=2)
# error_weights = (0, 2, 2, 1, 1, 3, 2) # error_weights = (0, 1, 2, 1, 1, 3, 2)
# fmin = minimize(fun=self.cost_function_all, # fmin = minimize(fun=self.cost_function_all,
# args=(error_weights,), x0=x0, method="Nelder-Mead", # args=(error_weights,), x0=x0, method="Nelder-Mead",
# options={"initial_simplex": initial_simplex, "xatol": 0.001}) # options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxiter": 599})
# res_parameters = self.base_model.get_parameters() # res_parameters = self.base_model.get_parameters()
return fmin, self.base_model.get_parameters() return fmin, self.base_model.get_parameters()
def fit_routine_5(self, cell_data=None, start_parameters=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_5_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, input_scaling, dend_tau
if start_parameters is None:
x0 = np.array([0.02, 70, 0.001])
else:
x0 = np.array([start_parameters["mem_tau"], start_parameters["input_scaling"],
self.tau_a, self.delta_a, start_parameters["dend_tau"]])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 1, 1, 1, 1, 2, 1)
fmin = minimize(fun=self.cost_function_all_without_noise,
args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 400, "maxiter": 400})
res_parameters = fmin.x
return fmin, self.base_model.get_parameters()
def fit_model(self, x0=None, initial_simplex=None, fit_adaption=False): def fit_model(self, x0=None, initial_simplex=None, fit_adaption=False):
self.counter = 0 self.counter = 0
@ -361,9 +386,10 @@ class Fitter:
return sum(error_list) return sum(error_list)
def cost_function_only_adaption_and_v_offset(self, X, error_weights=None): def cost_function_only_adaption(self, X, error_weights=None):
self.base_model.set_variable("tau_a", X[0]) self.base_model.set_variable("tau_a", X[0])
self.base_model.set_variable("delta_a", X[1]) self.base_model.set_variable("delta_a", X[1])
self.base_model.set_variable("mem_tau", X[2])
base_stimulus = SinusoidalStepStimulus(self.eod_freq, 0) base_stimulus = SinusoidalStepStimulus(self.eod_freq, 0)
# find right v-offset # find right v-offset
@ -376,14 +402,6 @@ class Fitter:
return sum(error_list) return sum(error_list)
def cost_function_only_adaption(self, X, error_weights=None):
self.base_model.set_variable("tau_a", X[0])
self.base_model.set_variable("delta_a", X[1])
error_list = self.calculate_errors(error_weights)
return sum(error_list)
def cost_function_with_fixed_adaption(self, X, tau_a, delta_a, error_weights=None): def cost_function_with_fixed_adaption(self, X, tau_a, delta_a, error_weights=None):
# set model parameters: # set model parameters:
model = self.base_model model = self.base_model
@ -453,10 +471,20 @@ class Fitter:
# f_infinities, f_infinities_slope = self.base_model.calculate_fi_markers(self.fi_contrasts, self.eod_freq) # f_infinities, f_infinities_slope = self.base_model.calculate_fi_markers(self.fi_contrasts, self.eod_freq)
f_baselines, f_zeros, f_infinities = self.base_model.calculate_fi_curve(self.fi_contrasts, self.eod_freq) f_baselines, f_zeros, f_infinities = self.base_model.calculate_fi_curve(self.fi_contrasts, self.eod_freq)
f_infinities_fit = hF.fit_clipped_line(self.fi_contrasts, f_infinities) try:
f_infinities_slope = f_infinities_fit[0] f_infinities_fit = hF.fit_clipped_line(self.fi_contrasts, f_infinities)
except Exception as e:
print("EXCEPTION IN FIT LINE!")
print(e)
f_infinities_fit = [0, 0]
f_zeros_fit = hF.fit_boltzmann(self.fi_contrasts, f_zeros) f_infinities_slope = f_infinities_fit[0]
try:
f_zeros_fit = hF.fit_boltzmann(self.fi_contrasts, f_zeros)
except Exception as e:
print("EXCEPTION IN FIT BOLTZMANN!")
print(e)
f_zeros_fit = [0, 0, 0, 0]
f_zero_slope = fu.full_boltzmann_straight_slope(f_zeros_fit[0], f_zeros_fit[1], f_zeros_fit[2], f_zeros_fit[3]) f_zero_slope = fu.full_boltzmann_straight_slope(f_zeros_fit[0], f_zeros_fit[1], f_zeros_fit[2], f_zeros_fit[3])
# print("fi-curve features calculated!") # print("fi-curve features calculated!")
# calculate errors with reference values # calculate errors with reference values
@ -478,7 +506,7 @@ class Fitter:
error = sum(error_list) error = sum(error_list)
self.counter += 1 self.counter += 1
if self.counter % 200 == 0 and False: # TODO currently shut off! if self.counter % 200 == 0: # and False: # TODO currently shut off!
print("\nCost function run times: {:}\n".format(self.counter), print("\nCost function run times: {:}\n".format(self.counter),
"Total weighted error: {:.4f}\n".format(error), "Total weighted error: {:.4f}\n".format(error),
"Baseline frequency - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format( "Baseline frequency - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(

View File

@ -3,6 +3,7 @@ from warnings import warn
from thunderfish.eventdetection import detect_peaks, threshold_crossing_times, threshold_crossings from thunderfish.eventdetection import detect_peaks, threshold_crossing_times, threshold_crossings
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
import functions as fu import functions as fu
from numba import jit
def fit_clipped_line(x, y): def fit_clipped_line(x, y):
@ -27,6 +28,11 @@ def fit_boltzmann(x, y):
return popt return popt
@jit(nopython=True)
def rectify_stimulus_array(stimulus_array: np.ndarray):
return np.array([x if x > 0 else 0 for x in stimulus_array])
def merge_similar_intensities(intensities, spiketimes, trans_amplitudes): def merge_similar_intensities(intensities, spiketimes, trans_amplitudes):
i = 0 i = 0
@ -151,7 +157,8 @@ def calculate_isi_frequency_trace(spiketimes, sampling_interval, time_in_ms=Fals
def calculate_time_and_frequency_trace(spiketimes, sampling_interval, time_in_ms=False): def calculate_time_and_frequency_trace(spiketimes, sampling_interval, time_in_ms=False):
if len(spiketimes) < 2: if len(spiketimes) < 2:
pass # raise ValueError("Cannot compute a time and frequency vector with fewer than 2 spikes") return [0], [0]
# raise ValueError("Cannot compute a time and frequency vector with fewer than 2 spikes")
frequency = calculate_isi_frequency_trace(spiketimes, sampling_interval, time_in_ms) frequency = calculate_isi_frequency_trace(spiketimes, sampling_interval, time_in_ms)
@ -161,7 +168,6 @@ def calculate_time_and_frequency_trace(spiketimes, sampling_interval, time_in_ms
if len(time) > len(frequency): if len(time) > len(frequency):
time = time[:len(frequency)] time = time[:len(frequency)]
return time, frequency return time, frequency
@ -430,6 +436,7 @@ def detect_f_zero_in_frequency_trace(time, frequency, stimulus_start, sampling_i
if len(freq_before) < 3: if len(freq_before) < 3:
print("mäh") print("mäh")
return 0
min_before = min(freq_before) min_before = min(freq_before)
max_before = max(freq_before) max_before = max(freq_before)

View File

@ -2,27 +2,10 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import helperFunctions as hF import helperFunctions as hF
import time import time
from stimuli.SinusoidalStepStimulus import SinusoidalStepStimulus
def main():
time = np.arange(-1, 30, 0.0001)
eod = np.sin(2*np.pi * 600 * time)
signs = np.sign(eod[:-1]) != np.sign(eod[1:])
delta = eod[:-1] < eod[1:]
sign_changes = np.where(signs & delta)[0]
plt.plot(time, eod)
plt.plot([time[i] for i in sign_changes], [0]*len(sign_changes), 'o')
plt.show()
print(sign_changes)
quit()
def main():
for freq in [700, 50, 100, 500, 1000]: for freq in [700, 50, 100, 500, 1000]:
reps = 1000 reps = 1000

View File

@ -1,10 +1,6 @@
from FiCurve import FICurve
from CellData import icelldata_of_dir from CellData import icelldata_of_dir
import os
import helperFunctions as hf
from AdaptionCurrent import Adaption
from functionalityTests import *
# TODO command line interface needed/nice ? # TODO command line interface needed/nice ?

View File

@ -32,6 +32,7 @@ class LifacNoiseModel(AbstractModel):
if self.parameters["step_size"] > 0.0001: if self.parameters["step_size"] > 0.0001:
warn("LifacNoiseModel: The step size is quite big simulation could fail.") warn("LifacNoiseModel: The step size is quite big simulation could fail.")
self.voltage_trace = [] self.voltage_trace = []
self.input_voltage = []
self.adaption_trace = [] self.adaption_trace = []
self.spiketimes = [] self.spiketimes = []
self.stimulus = None self.stimulus = None
@ -43,18 +44,19 @@ class LifacNoiseModel(AbstractModel):
time = np.arange(0, total_time_s, self.parameters["step_size"]) time = np.arange(0, total_time_s, self.parameters["step_size"])
output_voltage = np.zeros(len(time), dtype='float64') output_voltage = np.zeros(len(time), dtype='float64')
adaption = np.zeros(len(time), dtype='float64') adaption = np.zeros(len(time), dtype='float64')
input_voltage = np.zeros(len(time), dtype='float64')
spiketimes = [] spiketimes = []
current_v = self.parameters["v_zero"] current_v = self.parameters["v_zero"]
current_a = self.parameters["a_zero"] current_a = self.parameters["a_zero"]
input_voltage[0] = fu.rectify(stimulus.value_at_time_in_s(time[0]))
output_voltage[0] = current_v output_voltage[0] = current_v
adaption[0] = current_a adaption[0] = current_a
for i in range(1, len(time), 1): for i in range(1, len(time), 1):
time_point = time[i] time_point = time[i]
# rectified input: # rectified input:
stimulus_strength = fu.rectify(stimulus.value_at_time_in_s(time_point)) * self.parameters["input_scaling"] stimulus_strength = self._calculate_input_voltage_step(input_voltage[i-1], fu.rectify(stimulus.value_at_time_in_s(time_point)))
v_next = self._calculate_voltage_step(current_v, stimulus_strength - current_a) v_next = self._calculate_voltage_step(current_v, stimulus_strength - current_a)
a_next = self._calculate_adaption_step(current_a) a_next = self._calculate_adaption_step(current_a)
@ -65,6 +67,7 @@ class LifacNoiseModel(AbstractModel):
output_voltage[i] = v_next output_voltage[i] = v_next
adaption[i] = a_next adaption[i] = a_next
input_voltage[i] = stimulus_strength
current_v = v_next current_v = v_next
current_a = a_next current_a = a_next
@ -72,6 +75,7 @@ class LifacNoiseModel(AbstractModel):
self.voltage_trace = output_voltage self.voltage_trace = output_voltage
self.adaption_trace = adaption self.adaption_trace = adaption
self.spiketimes = spiketimes self.spiketimes = spiketimes
self.input_voltage = input_voltage
return output_voltage, spiketimes return output_voltage, spiketimes
@ -91,6 +95,10 @@ class LifacNoiseModel(AbstractModel):
step_size = self.parameters["step_size"] step_size = self.parameters["step_size"]
return current_a + (step_size * (-current_a)) / self.parameters["tau_a"] return current_a + (step_size * (-current_a)) / self.parameters["tau_a"]
def _calculate_input_voltage_step(self, current_i, rectified_input):
# input_voltage[i] = input_voltage[i - 1] + (-input_voltage[i - 1] + rectified_stimulus_array[i] * input_scaling) / dend_tau
return current_i + ((-current_i + rectified_input * self.parameters["input_scaling"]) / self.parameters["dend_tau"]) * self.parameters["step_size"]
def simulate_fast(self, stimulus: AbstractStimulus, total_time_s, time_start=0): def simulate_fast(self, stimulus: AbstractStimulus, total_time_s, time_start=0):
v_zero = self.parameters["v_zero"] v_zero = self.parameters["v_zero"]
@ -109,9 +117,10 @@ class LifacNoiseModel(AbstractModel):
rectified_stimulus = rectify_stimulus_array(stimulus.as_array(time_start, total_time_s, step_size)) rectified_stimulus = rectify_stimulus_array(stimulus.as_array(time_start, total_time_s, step_size))
parameters = np.array([v_zero, a_zero, step_size, threshold, v_base, delta_a, tau_a, v_offset, mem_tau, noise_strength, time_start, input_scaling, dend_tau]) parameters = np.array([v_zero, a_zero, step_size, threshold, v_base, delta_a, tau_a, v_offset, mem_tau, noise_strength, time_start, input_scaling, dend_tau])
voltage_trace, adaption, spiketimes = simulate_fast(rectified_stimulus, total_time_s, parameters) voltage_trace, adaption, spiketimes, input_voltage = simulate_fast(rectified_stimulus, total_time_s, parameters)
self.stimulus = stimulus self.stimulus = stimulus
self.input_voltage = input_voltage
self.voltage_trace = voltage_trace self.voltage_trace = voltage_trace
self.adaption_trace = adaption self.adaption_trace = adaption
self.spiketimes = spiketimes self.spiketimes = spiketimes
@ -207,32 +216,36 @@ class LifacNoiseModel(AbstractModel):
def calculate_fi_curve(self, contrasts, stimulus_freq): def calculate_fi_curve(self, contrasts, stimulus_freq):
max_time_constant = max([self.parameters["tau_a"], self.parameters["mem_tau"]])
factor_to_equilibrium = 5 stim_duration = 0.5
stim_duration = max_time_constant * factor_to_equilibrium stim_start = 0.5
stim_start = max_time_constant * factor_to_equilibrium total_simulation_time = stim_duration + 2 * stim_start
total_simulation_time = max_time_constant * factor_to_equilibrium * 3
# print("Total simulation time (vs 2.5) {:.2f}".format(total_simulation_time)) # print("Total simulation time (vs 2.5) {:.2f}".format(total_simulation_time))
sampling_interval = self.get_sampling_interval() sampling_interval = self.get_sampling_interval()
f_infinities = [] f_infinities = []
f_zeros = [] f_zeros = []
f_baselines = [] f_baselines = []
import matplotlib.pyplot as plt
for c in contrasts: for c in contrasts:
stimulus = SinusoidalStepStimulus(stimulus_freq, c, stim_start, stim_duration) stimulus = SinusoidalStepStimulus(stimulus_freq, c, stim_start, stim_duration)
_, spiketimes = self.simulate_fast(stimulus, total_simulation_time) _, spiketimes = self.simulate_fast(stimulus, total_simulation_time)
# if len(spiketimes) > 0:
# print("min:", min(spiketimes), "max:", max(spiketimes), "len:", len(spiketimes))
# else:
# print("spiketimes empty")
time, frequency = hF.calculate_time_and_frequency_trace(spiketimes, sampling_interval) time, frequency = hF.calculate_time_and_frequency_trace(spiketimes, sampling_interval)
# if c == contrasts[0] or c == contrasts[-1]: # if c == contrasts[0] or c == contrasts[-1]:
# plt.plot(frequency) # plt.plot(frequency)
# plt.show() # plt.show()
if len(time) == 0 or time[0] >= stim_start or len(spiketimes) < 5: if len(spiketimes) < 10 or len(time) == 0 or min(time) > stim_start or max(time) < stim_start+stim_duration:
print("Too few spikes to calculate f_inf, f_0 and f_base")
f_infinities.append(0) f_infinities.append(0)
f_zeros.append(0) f_zeros.append(0)
f_baselines.append(0) f_baselines.append(0)
continue continue
f_inf = hF.detect_f_infinity_in_freq_trace(time, frequency, stim_start, stim_duration, sampling_interval) f_inf = hF.detect_f_infinity_in_freq_trace(time, frequency, stim_start, stim_duration, sampling_interval)
f_infinities.append(f_inf) f_infinities.append(f_inf)
@ -242,7 +255,7 @@ class LifacNoiseModel(AbstractModel):
f_baseline = hF.detect_f_baseline_in_freq_trace(time, frequency, stim_start, sampling_interval) f_baseline = hF.detect_f_baseline_in_freq_trace(time, frequency, stim_start, sampling_interval)
f_baselines.append(f_baseline) f_baselines.append(f_baseline)
# import matplotlib.pyplot as plt
# fig, axes = plt.subplots(2, 1, sharex="all") # fig, axes = plt.subplots(2, 1, sharex="all")
# stim_time = np.arange(0,3.5, sampling_interval) # stim_time = np.arange(0,3.5, sampling_interval)
# axes[0].set_title("Contrast: " + str(c)) # axes[0].set_title("Contrast: " + str(c))
@ -359,8 +372,8 @@ def simulate_fast(rectified_stimulus_array, total_time_s, parameters: np.ndarray
noise_value = np.random.normal() noise_value = np.random.normal()
noise = noise_strength * noise_value / np.sqrt(step_size) noise = noise_strength * noise_value / np.sqrt(step_size)
input_voltage[i] = input_voltage[i - 1] + (-input_voltage[i - 1] + rectified_stimulus_array[i] * input_scaling) / dend_tau input_voltage[i] = input_voltage[i - 1] + ((-input_voltage[i - 1] + rectified_stimulus_array[i]) / dend_tau) * step_size
output_voltage[i] = output_voltage[i-1] + ((v_base - output_voltage[i-1] + v_offset + (rectified_stimulus_array[i] * input_scaling) - adaption[i-1] + noise) / mem_tau) * step_size output_voltage[i] = output_voltage[i-1] + ((v_base - output_voltage[i-1] + v_offset + (input_voltage[i] * input_scaling) - adaption[i-1] + noise) / mem_tau) * step_size
adaption[i] = adaption[i-1] + ((-adaption[i-1]) / tau_a) * step_size adaption[i] = adaption[i-1] + ((-adaption[i-1]) / tau_a) * step_size
if output_voltage[i] > threshold: if output_voltage[i] > threshold:
@ -368,7 +381,7 @@ def simulate_fast(rectified_stimulus_array, total_time_s, parameters: np.ndarray
spiketimes.append(i*step_size) spiketimes.append(i*step_size)
adaption[i] += delta_a / tau_a adaption[i] += delta_a / tau_a
return output_voltage, adaption, spiketimes return output_voltage, adaption, spiketimes, input_voltage

View File

@ -5,7 +5,7 @@ import helperFunctions as hf
from models.FirerateModel import FirerateModel from models.FirerateModel import FirerateModel
from models.LIFACnoise import LifacNoiseModel from models.LIFACnoise import LifacNoiseModel
from stimuli.StepStimulus import StepStimulus from stimuli.StepStimulus import StepStimulus
from stimuli.SinusAmplitudeModulation import SinusAmplitudeModulationStimulus from stimuli.SinusoidalStepStimulus import SinusoidalStepStimulus
import functions as fu import functions as fu
@ -52,16 +52,16 @@ def test_lifac_noise():
model = LifacNoiseModel() model = LifacNoiseModel()
start = 0.2 start = 1
duration = 30 duration = 3
total_time = duration + 2*start total_time = duration + 2*start
step_size = model.get_parameters()["step_size"]/1000 step_size = model.get_parameters()["step_size"]
time = np.arange(0, total_time, step_size) time = np.arange(0, total_time, step_size)
stimulus = SinusAmplitudeModulationStimulus(700, 0.2, 10, 20, start, duration) stimulus = SinusoidalStepStimulus(700, 0.2, start, duration)
model.simulate(stimulus, total_time) model.simulate_fast(stimulus, total_time)
fig, axes = plt.subplots(nrows=3, sharex="col") fig, axes = plt.subplots(nrows=4, sharex="col")
sparse_time = np.arange(0, total_time, 1/5000) sparse_time = np.arange(0, total_time, 1/5000)
axes[0].plot(sparse_time, [stimulus.value_at_time_in_s(x) for x in sparse_time], label="given stimulus") axes[0].plot(sparse_time, [stimulus.value_at_time_in_s(x) for x in sparse_time], label="given stimulus")
axes[0].plot(sparse_time, [fu.rectify(stimulus.value_at_time_in_s(x)) for x in sparse_time], label="seen stimulus") axes[0].plot(sparse_time, [fu.rectify(stimulus.value_at_time_in_s(x)) for x in sparse_time], label="seen stimulus")
@ -69,54 +69,63 @@ def test_lifac_noise():
axes[0].set_ylabel("stimulus strength") axes[0].set_ylabel("stimulus strength")
axes[0].legend() axes[0].legend()
axes[1].plot(time, model.get_voltage_trace()) input_voltage = model.input_voltage
axes[1].set_title("Voltage trace") axes[1].plot(time, input_voltage)
axes[1].set_ylabel("voltage") axes[1].set_title("Stimulus after dendtritic filter")
axes[1].set_ylabel("stimulus strength")
t, f = hf.calculate_isi_frequency_trace(model.get_spiketimes(), 0, step_size) voltage_trace = model.get_voltage_trace()
axes[2].plot(t, f) axes[2].plot(time, voltage_trace)
axes[2].set_title("ISI frequency trace") axes[2].set_title("Voltage trace")
axes[2].set_ylabel("Frequency") axes[2].set_ylabel("voltage")
spiketimes_small_step = model.get_spiketimes() spiketimes = model.get_spiketimes()
t, f = hf.calculate_time_and_frequency_trace(spiketimes, step_size)
axes[3].plot(t, f)
axes[3].set_title("ISI frequency trace")
axes[3].set_ylabel("Frequency")
model.set_variable("step_size", 0.02)
model.simulate(stimulus, total_time)
print(model.get_adaption_trace()[int(0.1/(0.01/1000))])
step_size = model.get_parameters()["step_size"] / 1000
time = np.arange(0, total_time, step_size)
t, f = hf.calculate_isi_frequency_trace(model.get_spiketimes(), 0, step_size)
axes[1].plot(time, model.get_voltage_trace())
axes[2].plot(t, f)
spiketimes_big_step = model.get_spiketimes()
print("CV:")
print("small step:", hf.calculate_coefficient_of_variation(spiketimes_small_step))
print("big step:", hf.calculate_coefficient_of_variation(spiketimes_big_step))
plt.show()
plt.close()
max_lag = 5
x = np.arange(1, max_lag+1, 1)
serial_cor_small = hf.calculate_serial_correlation(spiketimes_small_step, max_lag)
serial_cor_big = hf.calculate_serial_correlation(spiketimes_big_step, max_lag)
print(serial_cor_small)
print(serial_cor_big)
plt.plot(x, serial_cor_small, 'o', label='small step',)
plt.plot(x, serial_cor_big, 'o', label='big step')
plt.ylim(-1, 1)
plt.legend()
plt.show() plt.show()
plt.close()
bins = np.arange(0, max(np.diff(spiketimes_small_step)), 0.0001) # spiketimes_small_step = model.get_spiketimes()
plt.hist(np.diff(spiketimes_small_step), bins=bins, alpha=0.5) #
plt.hist(np.diff(spiketimes_big_step), bins=bins, alpha=0.5) # model.set_variable("step_size", 0.02)
plt.show() # model.simulate(stimulus, total_time)
# print(model.get_adaption_trace()[int(0.1/(0.01/1000))])
# step_size = model.get_parameters()["step_size"] / 1000
# time = np.arange(0, total_time, step_size)
# t, f = hf.calculate_time_and_frequency_trace(model.get_spiketimes(), 0.02)
#
# axes[1].plot(time, model.get_voltage_trace())
# axes[2].plot(t, f)
#
# spiketimes_big_step = model.get_spiketimes()
#
# print("CV:")
# print("small step:", hf.calculate_coefficient_of_variation(spiketimes_small_step))
# print("big step:", hf.calculate_coefficient_of_variation(spiketimes_big_step))
# plt.show()
# plt.close()
#
# max_lag = 5
# x = np.arange(1, max_lag+1, 1)
# serial_cor_small = hf.calculate_serial_correlation(spiketimes_small_step, max_lag)
# serial_cor_big = hf.calculate_serial_correlation(spiketimes_big_step, max_lag)
#
# print(serial_cor_small)
# print(serial_cor_big)
# plt.plot(x, serial_cor_small, 'o', label='small step',)
# plt.plot(x, serial_cor_big, 'o', label='big step')
# plt.ylim(-1, 1)
# plt.legend()
# plt.show()
# plt.close()
#
# bins = np.arange(0, max(np.diff(spiketimes_small_step)), 0.0001)
# plt.hist(np.diff(spiketimes_small_step), bins=bins, alpha=0.5)
# plt.hist(np.diff(spiketimes_big_step), bins=bins, alpha=0.5)
# plt.show()
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -28,7 +28,7 @@ def time_test_function():
def test_cell_data(): def test_cell_data():
for cell_data in icelldata_of_dir("./data/"): for cell_data in icelldata_of_dir("../data/"):
#if "2012-12-20-ad" not in cell_data.get_data_path(): #if "2012-12-20-ad" not in cell_data.get_data_path():
# continue # continue
print() print()
@ -47,7 +47,7 @@ def test_cell_data():
def test_peak_detection(): def test_peak_detection():
for cell_data in icelldata_of_dir("./data/"): for cell_data in icelldata_of_dir("../data/"):
print() print()
print(cell_data.get_data_path()) print(cell_data.get_data_path())
times = cell_data.get_base_traces(cell_data.TIME) times = cell_data.get_base_traces(cell_data.TIME)
@ -102,7 +102,7 @@ def test_simulation_speed():
def test_fi_curve_class(): def test_fi_curve_class():
for cell_data in icelldata_of_dir("./data/"): for cell_data in icelldata_of_dir("../data/"):
fi_curve = FICurve(cell_data) fi_curve = FICurve(cell_data)
fi_curve.get_f_zero_and_f_inf_intersection() fi_curve.get_f_zero_and_f_inf_intersection()
fi_curve.plot_fi_curve() fi_curve.plot_fi_curve()
@ -113,7 +113,7 @@ def test_fi_curve_class():
def test_adaption_class(): def test_adaption_class():
for cell_data in icelldata_of_dir("./data/"): for cell_data in icelldata_of_dir("../data/"):
print() print()
print(cell_data.get_data_path()) print(cell_data.get_data_path())
fi_curve = FICurve(cell_data) fi_curve = FICurve(cell_data)