cv clean up

This commit is contained in:
a.ott 2020-05-11 11:35:20 +02:00
parent b37db0ea36
commit 9485492f4d
5 changed files with 169 additions and 156 deletions

View File

@ -62,6 +62,15 @@ class CellData:
return self.base_spikes return self.base_spikes
def get_base_isis(self):
spikestimes = self.get_base_spikes()
isis = []
for spikes in spikestimes:
isis.extend(np.diff(spikes))
return isis
def get_fi_traces(self): def get_fi_traces(self):
raise NotImplementedError("CellData:get_fi_traces():\n" + raise NotImplementedError("CellData:get_fi_traces():\n" +
"Getting the Fi-Traces currently overflows the RAM and causes swapping! Reimplement if really needed!") "Getting the Fi-Traces currently overflows the RAM and causes swapping! Reimplement if really needed!")

View File

@ -1,8 +1,6 @@
from CellData import CellData from CellData import CellData
import numpy as np import numpy as np
from scipy.optimize import curve_fit
from scipy.stats import linregress
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from warnings import warn from warnings import warn
import functions as fu import functions as fu

185
Fitter.py
View File

@ -20,21 +20,14 @@ def main():
run_with_real_data() run_with_real_data()
def iget_start_parameters(mem_tau_list=None, input_scaling_list=None, noise_strength_list=None, dend_tau_list=None, tau_a_list=None, delta_a_list=None): def iget_start_parameters():
# mem_tau, input_scaling, noise_strength, dend_tau, # mem_tau, input_scaling, noise_strength, dend_tau,
# expand by tau_a, delta_a ? # expand by tau_a, delta_a ?
if mem_tau_list is None:
mem_tau_list = [0.01] mem_tau_list = [0.01]
if input_scaling_list is None: input_scaling_list = [40, 60, 80]
input_scaling_list = [40, 60, 80] noise_strength_list = [0.03] # [0.02, 0.06]
if noise_strength_list is None: dend_tau_list = [0.001, 0.002]
noise_strength_list = [0.03] # [0.02, 0.06]
if dend_tau_list is None:
dend_tau_list = [0.001, 0.002]
# if tau_a_list is None:
# tau_a_list =
# if delta_a_list is None:
# delta_a_list =
for mem_tau in mem_tau_list: for mem_tau in mem_tau_list:
for input_scaling in input_scaling_list: for input_scaling in input_scaling_list:
@ -50,8 +43,6 @@ def run_with_real_data():
for start_parameters in iget_start_parameters(): for start_parameters in iget_start_parameters():
start_par_count += 1 start_par_count += 1
print("START PARAMETERS:", start_par_count) print("START PARAMETERS:", start_par_count)
if start_par_count <= 0:
continue
print("cell:", cell_data.get_data_path()) print("cell:", cell_data.get_data_path())
trace = cell_data.get_base_traces(trace_type=cell_data.V1) trace = cell_data.get_base_traces(trace_type=cell_data.V1)
if len(trace) == 0: if len(trace) == 0:
@ -61,6 +52,7 @@ def run_with_real_data():
results_path = "results/" + os.path.split(cell_data.get_data_path())[-1] + "/" results_path = "results/" + os.path.split(cell_data.get_data_path())[-1] + "/"
print("results at:", results_path) print("results at:", results_path)
start_time = time.time() start_time = time.time()
fitter = Fitter() fitter = Fitter()
fmin, parameters = fitter.fit_model_to_data(cell_data, start_parameters) fmin, parameters = fitter.fit_model_to_data(cell_data, start_parameters)
@ -79,7 +71,7 @@ def run_with_real_data():
results_path += SAVE_PATH_PREFIX + "par_set_" + str(start_par_count) + "_" results_path += SAVE_PATH_PREFIX + "par_set_" + str(start_par_count) + "_"
print('Fitting of cell took function took {:.3f} s'.format((end_time - start_time))) print('Fitting of cell took function took {:.3f} s'.format((end_time - start_time)))
#print(results_path) # print(results_path)
print_comparision_cell_model(cell_data, parameters, plot=True, savepath=results_path) print_comparision_cell_model(cell_data, parameters, plot=True, savepath=results_path)
break break
@ -93,7 +85,8 @@ def print_comparision_cell_model(cell_data, parameters, plot=False, savepath=Non
fi_curve = FICurve(cell_data) fi_curve = FICurve(cell_data)
m_bf, m_vs, m_sc, m_cv = res_model.calculate_baseline_markers(cell_data.get_eod_frequency()) m_bf, m_vs, m_sc, m_cv = res_model.calculate_baseline_markers(cell_data.get_eod_frequency())
f_baselines, f_zeros, m_f_infinities = res_model.calculate_fi_curve(fi_curve.stimulus_value, cell_data.get_eod_frequency()) f_baselines, f_zeros, m_f_infinities = res_model.calculate_fi_curve(fi_curve.stimulus_value,
cell_data.get_eod_frequency())
f_infinities_fit = hF.fit_clipped_line(fi_curve.stimulus_value, m_f_infinities) f_infinities_fit = hF.fit_clipped_line(fi_curve.stimulus_value, m_f_infinities)
m_f_infinities_slope = f_infinities_fit[0] m_f_infinities_slope = f_infinities_fit[0]
@ -176,7 +169,10 @@ class Fitter:
self.f_zero_values = fi_curve.f_zeros self.f_zero_values = fi_curve.f_zeros
self.f_zero_fit = fi_curve.boltzmann_fit_vars self.f_zero_fit = fi_curve.boltzmann_fit_vars
self.f_zero_slope = fi_curve.get_fi_curve_slope_of_straight() self.f_zero_slope = fi_curve.get_fi_curve_slope_of_straight()
# self.f_zero_slope = fi_curve.get_fi_curve_slope_at(fi_curve.get_f_zero_and_f_inf_intersection()) # around 1/3 of the value at straight
# around 1/3 of the value at straight
# self.f_zero_slope = fi_curve.get_fi_curve_slope_at(fi_curve.get_f_zero_and_f_inf_intersection())
self.delta_a = (self.f_zero_slope / self.f_inf_slope) / 1000 # seems to work if divided by 1000... self.delta_a = (self.f_zero_slope / self.f_inf_slope) / 1000 # seems to work if divided by 1000...
adaption = Adaption(data, fi_curve) adaption = Adaption(data, fi_curve)
@ -187,130 +183,6 @@ class Fitter:
return self.fit_routine_5(data, start_parameters) return self.fit_routine_5(data, start_parameters)
# return self.fit_model(fit_adaption=False) # return self.fit_model(fit_adaption=False)
def fit_routine_1(self, cell_data=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_1_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, noise_strength, input_scaling
x0 = np.array([0.02, 0.03, 70])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (1, 1, 1, 1, 1, 0, 0)
fmin_step1 = minimize(fun=self.cost_function_with_fixed_adaption, args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex})
res_parameters_step1 = self.base_model.get_parameters()
if cell_data is not None:
print("##### After step 1: (fixed adaption)")
print_comparision_cell_model(cell_data, res_parameters_step1)
self.counter = 0
x0 = np.array([res_parameters_step1["mem_tau"], res_parameters_step1["noise_strength"],
res_parameters_step1["input_scaling"], res_parameters_step1["tau_a"],
res_parameters_step1["delta_a"]])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (1, 1, 1, 1, 1, 2, 4)
fmin_step2 = minimize(fun=self.cost_function_all, args=(error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex})
res_parameters_step2 = self.base_model.get_parameters()
if cell_data is not None:
print("##### After step 2: (Everything)")
# print_comparision_cell_model(cell_data, res_parameters_step2)
return fmin_step2, res_parameters_step2
def fit_routine_2(self, cell_data=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_2_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, noise_strength, input_scaling
x0 = np.array([0.02, 0.03, 70])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (1, 1, 5, 1, 2, 0, 0)
fmin = minimize(fun=self.cost_function_with_fixed_adaption,
args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex})
res_parameters = self.base_model.get_parameters()
return fmin, res_parameters
def fit_routine_3(self, cell_data=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_3_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, noise_strength, input_scaling, dend_tau
x0 = np.array([0.02, 0.03, 70, 0.001])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (1, 1, 5, 1, 2, 0, 0)
fmin = minimize(fun=self.cost_function_with_fixed_adaption_with_dend_tau,
args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex})
res_parameters = self.base_model.get_parameters()
return fmin, res_parameters
def fit_routine_4(self, cell_data=None, start_parameters=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_4_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, input_scaling, dend_tau
if start_parameters is None:
x0 = np.array([0.02, 70, 0.001])
else:
x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
start_parameters["input_scaling"], start_parameters["dend_tau"]])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 5, 15, 1, 2, 1, 0)
fmin = minimize(fun=self.cost_function_with_fixed_adaption_with_dend_tau,
args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 400, "maxiter": 400})
res_parameters = fmin.x
# print_comparision_cell_model(cell_data, self.base_model.get_parameters())
self.counter = 0
x0 = np.array([self.tau_a,
self.delta_a, res_parameters[0]])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 1, 1, 2, 2, 4, 2)
fmin = minimize(fun=self.cost_function_only_adaption,
args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001})
res_parameters = fmin.x
print(fmin)
print_comparision_cell_model(cell_data, self.base_model.get_parameters())
#
# # self.counter = 0
# # x0 = np.array([res_parameters[0],
# # res_parameters[1], self.tau_a,
# # self.delta_a, res_parameters[2]])
# # initial_simplex = create_init_simples(x0, search_scale=2)
# # error_weights = (1, 3, 1, 2, 1, 3, 2)
# # fmin = minimize(fun=self.cost_function_all_without_noise,
# # args=(error_weights,), x0=x0, method="Nelder-Mead",
# # options={"initial_simplex": initial_simplex, "xatol": 0.001})
# # res_parameters = self.base_model.get_parameters()
# #
# # print_comparision_cell_model(cell_data, self.base_model.get_parameters())
#
# self.counter = 0
# x0 = np.array([res_parameters[0], start_parameters["noise_strength"],
# res_parameters[1], res_parameters[2],
# res_parameters[3], res_parameters[4]])
# initial_simplex = create_init_simples(x0, search_scale=2)
# error_weights = (0, 1, 2, 1, 1, 3, 2)
# fmin = minimize(fun=self.cost_function_all,
# args=(error_weights,), x0=x0, method="Nelder-Mead",
# options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxiter": 599})
# res_parameters = self.base_model.get_parameters()
return fmin, self.base_model.get_parameters()
def fit_routine_5(self, cell_data=None, start_parameters=None): def fit_routine_5(self, cell_data=None, start_parameters=None):
global SAVE_PATH_PREFIX global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_5_" SAVE_PATH_PREFIX = "fit_routine_5_"
@ -320,14 +192,17 @@ class Fitter:
if start_parameters is None: if start_parameters is None:
x0 = np.array([0.02, 70, 0.001]) x0 = np.array([0.02, 70, 0.001])
else: else:
x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"], start_parameters["input_scaling"], x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
self.tau_a, self.delta_a, start_parameters["dend_tau"]]) start_parameters["input_scaling"], self.tau_a, self.delta_a, start_parameters["dend_tau"]])
initial_simplex = create_init_simples(x0, search_scale=2) initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 1, 1, 1, 1, 1, 2, 1) error_weights = (0, 1, 1, 1, 1, 1, 2, 1)
fmin = minimize(fun=self.cost_function_all, fmin = minimize(fun=self.cost_function_all,
args=(error_weights,), x0=x0, method="Nelder-Mead", args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 400, "maxiter": 400}) options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 400, "maxiter": 400})
res_parameters = fmin.x
if cell_data is not None:
print("##### After step 1: (Everything)")
# print_comparision_cell_model(cell_data, res_parameters_step2)
return fmin, self.base_model.get_parameters() return fmin, self.base_model.get_parameters()
@ -340,14 +215,16 @@ class Fitter:
if initial_simplex is None: if initial_simplex is None:
initial_simplex = create_init_simples(x0) initial_simplex = create_init_simples(x0)
fmin = minimize(fun=self.cost_function_all, x0=x0, method="Nelder-Mead", options={"initial_simplex": initial_simplex}) fmin = minimize(fun=self.cost_function_all, x0=x0,
method="Nelder-Mead", options={"initial_simplex": initial_simplex})
else: else:
if x0 is None: if x0 is None:
x0 = np.array([0.02, 0.03, 70]) x0 = np.array([0.02, 0.03, 70])
if initial_simplex is None: if initial_simplex is None:
initial_simplex = create_init_simples(x0) initial_simplex = create_init_simples(x0)
fmin = minimize(fun=self.cost_function_with_fixed_adaption, x0=x0, args=(self.tau_a, self.delta_a), method="Nelder-Mead", options={"initial_simplex": initial_simplex}) fmin = minimize(fun=self.cost_function_with_fixed_adaption, x0=x0, args=(self.tau_a, self.delta_a),
method="Nelder-Mead", options={"initial_simplex": initial_simplex})
return fmin, self.base_model.get_parameters() return fmin, self.base_model.get_parameters()
@ -504,7 +381,8 @@ class Fitter:
error_f_zero_slope = abs((f_zero_slope - self.f_zero_slope) / self.f_zero_slope) error_f_zero_slope = abs((f_zero_slope - self.f_zero_slope) / self.f_zero_slope)
error_f_zero = calculate_f_values_error(f_zeros, self.f_zero_values) error_f_zero = calculate_f_values_error(f_zeros, self.f_zero_values)
error_list = [error_bf, error_vs, error_sc, error_cv, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope] error_list = [error_bf, error_vs, error_sc, error_cv,
error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
if error_weights is not None and len(error_weights) == len(error_list): if error_weights is not None and len(error_weights) == len(error_list):
for i in range(len(error_weights)): for i in range(len(error_weights)):
@ -513,18 +391,19 @@ class Fitter:
elif len(error_weights) != len(error_list): elif len(error_weights) != len(error_list):
warn("Error weights had different length than errors and were ignored!") warn("Error weights had different length than errors and were ignored!")
error = sum(error_list) error = sum(error_list)
self.counter += 1 self.counter += 1
if self.counter % 200 == 0: # and False: # TODO currently shut off! if self.counter % 200 == 0: # and False:
print("\nCost function run times: {:}\n".format(self.counter), print("\nCost function run times: {:}\n".format(self.counter),
"Total weighted error: {:.4f}\n".format(error), "Total weighted error: {:.4f}\n".format(error),
"Baseline frequency - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format( "Baseline frequency - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
self.baseline_freq, baseline_freq, error_bf), self.baseline_freq, baseline_freq, error_bf),
"Vector strength - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format( "Vector strength - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
self.vector_strength, vector_strength, error_vs), self.vector_strength, vector_strength, error_vs),
"Serial correlation - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format( "Serial correlation - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
self.serial_correlation[0], serial_correlation[0], error_sc), self.serial_correlation[0], serial_correlation[0], error_sc),
"Coefficient of variation - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
self.coefficient_of_variation, coefficient_of_variation, error_cv),
"f-infinity slope - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format( "f-infinity slope - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
self.f_inf_slope, f_infinities_slope, error_f_inf_slope), self.f_inf_slope, f_infinities_slope, error_f_inf_slope),
"f-infinity values:\nexpected:", np.around(self.f_inf_values), "\ncurrent: ", np.around(f_infinities), "f-infinity values:\nexpected:", np.around(self.f_inf_values), "\ncurrent: ", np.around(f_infinities),

View File

@ -189,7 +189,7 @@ class LifacNoiseModel(AbstractModel):
vector_strength = hF.calculate_vector_strength_from_spiketimes(time_trace, stimulus_array, spiketimes, self.get_sampling_interval()) vector_strength = hF.calculate_vector_strength_from_spiketimes(time_trace, stimulus_array, spiketimes, self.get_sampling_interval())
serial_correlation = hF.calculate_serial_correlation(np.array(spiketimes), max_lag) serial_correlation = hF.calculate_serial_correlation(np.array(spiketimes), max_lag)
coeffient_of_variation = hF.calculate_coefficient_of_variation(spiketimes) coeffient_of_variation = hF.calculate_coefficient_of_variation(np.array(spiketimes))
return baseline_freq, vector_strength, serial_correlation, coeffient_of_variation return baseline_freq, vector_strength, serial_correlation, coeffient_of_variation

127
tests/old_fit_routines.py Normal file
View File

@ -0,0 +1,127 @@
def fit_routine_1(self, cell_data=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_1_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, noise_strength, input_scaling
x0 = np.array([0.02, 0.03, 70])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (1, 1, 1, 1, 1, 0, 0)
fmin_step1 = minimize(fun=self.cost_function_with_fixed_adaption, args=(self.tau_a, self.delta_a, error_weights),
x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex})
res_parameters_step1 = self.base_model.get_parameters()
if cell_data is not None:
print("##### After step 1: (fixed adaption)")
print_comparision_cell_model(cell_data, res_parameters_step1)
self.counter = 0
x0 = np.array([res_parameters_step1["mem_tau"], res_parameters_step1["noise_strength"],
res_parameters_step1["input_scaling"], res_parameters_step1["tau_a"],
res_parameters_step1["delta_a"]])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (1, 1, 1, 1, 1, 2, 4)
fmin_step2 = minimize(fun=self.cost_function_all, args=(error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex})
res_parameters_step2 = self.base_model.get_parameters()
if cell_data is not None:
print("##### After step 2: (Everything)")
# print_comparision_cell_model(cell_data, res_parameters_step2)
return fmin_step2, res_parameters_step2
def fit_routine_2(self, cell_data=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_2_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, noise_strength, input_scaling
x0 = np.array([0.02, 0.03, 70])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (1, 1, 5, 1, 2, 0, 0)
fmin = minimize(fun=self.cost_function_with_fixed_adaption,
args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex})
res_parameters = self.base_model.get_parameters()
return fmin, res_parameters
def fit_routine_3(self, cell_data=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_3_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, noise_strength, input_scaling, dend_tau
x0 = np.array([0.02, 0.03, 70, 0.001])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (1, 1, 5, 1, 2, 0, 0)
fmin = minimize(fun=self.cost_function_with_fixed_adaption_with_dend_tau,
args=(self.tau_a, self.delta_a, error_weights), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex})
res_parameters = self.base_model.get_parameters()
return fmin, res_parameters
def fit_routine_4(self, cell_data=None, start_parameters=None):
global SAVE_PATH_PREFIX
SAVE_PATH_PREFIX = "fit_routine_4_"
# errors: [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
self.counter = 0
# fit only v_offset, mem_tau, input_scaling, dend_tau
if start_parameters is None:
x0 = np.array([0.02, 70, 0.001])
else:
x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
start_parameters["input_scaling"], start_parameters["dend_tau"]])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 5, 15, 1, 2, 1, 0)
fmin = minimize(fun=self.cost_function_with_fixed_adaption_with_dend_tau,
args=(self.tau_a, self.delta_a, error_weights),
x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 400, "maxiter": 400})
res_parameters = fmin.x
# print_comparision_cell_model(cell_data, self.base_model.get_parameters())
self.counter = 0
x0 = np.array([self.tau_a,
self.delta_a, res_parameters[0]])
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 1, 1, 2, 2, 4, 2)
fmin = minimize(fun=self.cost_function_only_adaption,
args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001})
print(fmin)
print_comparision_cell_model(cell_data, self.base_model.get_parameters())
#
# # self.counter = 0
# # x0 = np.array([res_parameters[0],
# # res_parameters[1], self.tau_a,
# # self.delta_a, res_parameters[2]])
# # initial_simplex = create_init_simples(x0, search_scale=2)
# # error_weights = (1, 3, 1, 2, 1, 3, 2)
# # fmin = minimize(fun=self.cost_function_all_without_noise,
# # args=(error_weights,), x0=x0, method="Nelder-Mead",
# # options={"initial_simplex": initial_simplex, "xatol": 0.001})
# # res_parameters = self.base_model.get_parameters()
# #
# # print_comparision_cell_model(cell_data, self.base_model.get_parameters())
#
# self.counter = 0
# x0 = np.array([res_parameters[0], start_parameters["noise_strength"],
# res_parameters[1], res_parameters[2],
# res_parameters[3], res_parameters[4]])
# initial_simplex = create_init_simples(x0, search_scale=2)
# error_weights = (0, 1, 2, 1, 1, 3, 2)
# fmin = minimize(fun=self.cost_function_all,
# args=(error_weights,), x0=x0, method="Nelder-Mead",
# options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxiter": 599})
# res_parameters = self.base_model.get_parameters()
return fmin, self.base_model.get_parameters()