add way to test routines / add routines

This commit is contained in:
a.ott 2020-05-27 09:08:02 +02:00
parent 043303228e
commit 9c434117fa

215
Fitter.py
View File

@ -13,10 +13,56 @@ import os
SAVE_PATH_PREFIX = ""
FIT_ROUTINE = ""
def main():
run_with_real_data()
# fitter = Fitter()
# run_with_real_data(fitter, fitter.fit_routine_3)
test_fit_routines()
def test_fit_routines():
fitter = Fitter()
names = ("routine_1", "routine_2", "routine_3")
global FIT_ROUTINE
for i, routine in enumerate([fitter.fit_routine_1, fitter.fit_routine_2, fitter.fit_routine_3]):
FIT_ROUTINE = names[i]
run_with_real_data(fitter, routine)
best = []
cells = sorted(os.listdir("test_routines/" + names[0] + "/"))
for name in names:
save_path = "test_routines/" + name + "/"
cell_best = []
for directory in sorted(os.listdir(save_path)):
path = os.path.join(save_path, directory)
if os.path.isdir(path):
cell_best.append(find_best_run(path))
best.append(cell_best)
with open("test_routines/comparision.csv", "w") as res_file:
res_file.write("routine")
for cell in cells:
res_file.write("," + cell)
for i, routine_results in enumerate(best):
res_file.write(names[i])
for cell_best in routine_results:
res_file.write("," + str(cell_best))
def find_best_run(cell_path):
values = []
for directory in sorted(os.listdir(cell_path)):
start_par_path = os.path.join(cell_path, directory)
if os.path.isdir(start_par_path):
values.append(float(start_par_path.split("_")[-1]))
return min(values)
def iget_start_parameters():
@ -39,7 +85,7 @@ def iget_start_parameters():
"delta_a": delta_a}
def run_with_real_data():
def run_with_real_data(fitter, fit_routine_func, parallel=False):
count = 0
for cell_data in icelldata_of_dir("./data/"):
count += 1
@ -53,7 +99,9 @@ def run_with_real_data():
print("NO V1 TRACE FOUND")
continue
results_path = "results/" + os.path.split(cell_data.get_data_path())[-1] + "/"
global FIT_ROUTINE
# results_path = "results/" + os.path.split(cell_data.get_data_path())[-1] + "/"
results_path = "test_routines/" + FIT_ROUTINE + "/" + os.path.split(cell_data.get_data_path())[-1] + "/"
print("results at:", results_path)
if not os.path.exists(results_path):
@ -77,8 +125,8 @@ def run_with_real_data():
print("START PARAMETERS:", start_par_count)
start_time = time.time()
fitter = Fitter()
fmin, parameters = fitter.fit_model_to_data(cell_data, start_parameters)
# fitter = Fitter()
fmin, parameters = fitter.fit_model_to_data(cell_data, start_parameters, fit_routine_func)
print(fmin)
print(parameters)
@ -193,16 +241,16 @@ class Fitter:
# counts how often the cost_function was called
self.counter = 0
def fit_model_to_data(self, data: CellData, start_parameters=None):
self.eod_freq = data.get_eod_frequency()
def set_data_reference_values(self, cell_data: CellData):
self.eod_freq = cell_data.get_eod_frequency()
data_baseline = get_baseline_class(data)
data_baseline = get_baseline_class(cell_data)
self.baseline_freq = data_baseline.get_baseline_frequency()
self.vector_strength = data_baseline.get_vector_strength()
self.serial_correlation = data_baseline.get_serial_correlation(self.sc_max_lag)
self.coefficient_of_variation = data_baseline.get_coefficient_of_variation()
fi_curve = get_fi_curve_class(data, data.get_fi_contrasts())
fi_curve = get_fi_curve_class(cell_data, cell_data.get_fi_contrasts())
self.fi_contrasts = fi_curve.stimulus_values
self.f_inf_values = fi_curve.f_inf_frequencies
self.f_inf_slope = fi_curve.get_f_inf_slope()
@ -219,17 +267,17 @@ class Fitter:
adaption = Adaption(fi_curve)
self.tau_a = adaption.get_tau_real()
return self.fit_routine_5(data, start_parameters)
def fit_model_to_data(self, data: CellData, start_parameters, fit_routine_func: callable):
self.set_data_reference_values(data)
return fit_routine_func(start_parameters)
def fit_routine_5(self, cell_data=None, start_parameters=None):
def fit_routine_1(self, start_parameters):
self.counter = 0
# fit only v_offset, mem_tau, input_scaling, dend_tau
if start_parameters is None:
x0 = np.array([0.02, 70, 0.001])
else:
x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
start_parameters["input_scaling"], self.tau_a, start_parameters["delta_a"],
start_parameters["dend_tau"]])
x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
start_parameters["input_scaling"], self.tau_a, start_parameters["delta_a"],
start_parameters["dend_tau"]])
initial_simplex = create_init_simples(x0, search_scale=2)
# error_list = [error_bf, error_vs, error_sc, error_cv,
@ -239,31 +287,58 @@ class Fitter:
args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 200, "maxiter": 400})
if cell_data is not None:
print("##### After step 1: (Everything)")
# print_comparision_cell_model(cell_data, res_parameters_step2)
return fmin, self.base_model.get_parameters()
def fit_model(self, x0=None, initial_simplex=None, fit_adaption=False):
def fit_routine_2(self, start_parameters):
self.counter = 0
# fit only v_offset, mem_tau, input_scaling, dend_tau
if fit_adaption:
if x0 is None:
x0 = np.array([0.02, 0.03, 70, self.tau_a, 0.05])
if initial_simplex is None:
initial_simplex = create_init_simples(x0)
x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
start_parameters["input_scaling"], self.tau_a, start_parameters["delta_a"],
start_parameters["dend_tau"]])
initial_simplex = create_init_simples(x0, search_scale=2)
fmin = minimize(fun=self.cost_function_all, x0=x0,
method="Nelder-Mead", options={"initial_simplex": initial_simplex})
else:
if x0 is None:
x0 = np.array([0.02, 0.03, 70, 0.05])
if initial_simplex is None:
initial_simplex = create_init_simples(x0)
# error_list = [error_bf, error_vs, error_sc, error_cv,
# error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
error_weights = (0, 2, 2, 2, 1, 1, 1, 1)
fmin = minimize(fun=self.cost_function_all,
args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 100, "maxiter": 400})
fmin = minimize(fun=self.cost_function_with_fixed_adaption_tau, x0=x0, args=(self.tau_a,),
method="Nelder-Mead", options={"initial_simplex": initial_simplex})
best_pars = fmin.x
x0 = np.array([best_pars[0], best_pars[2], # mem_tau, input_scaling
best_pars[4], best_pars[5]]) # delta_a, dend_tau
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 1, 1, 1, 3, 2, 3, 2)
fmin = minimize(fun=self.cost_function_only_adaption,
args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 100, "maxiter": 400})
return fmin, self.base_model.get_parameters()
def fit_routine_3(self, start_parameters):
self.counter = 0
x0 = np.array([start_parameters["mem_tau"], start_parameters["input_scaling"], # mem_tau, input_scaling
start_parameters["delta_a"], start_parameters["dend_tau"]]) # delta_a, dend_tau
initial_simplex = create_init_simples(x0, search_scale=2)
error_weights = (0, 1, 1, 1, 3, 2, 3, 2)
fmin = minimize(fun=self.cost_function_only_adaption,
args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 100, "maxiter": 400})
best_pars = fmin.x
x0 = np.array([best_pars[0], start_parameters["noise_strength"],
best_pars[1], self.tau_a, best_pars[2],
best_pars[3]])
initial_simplex = create_init_simples(x0, search_scale=2)
# error_list = [error_bf, error_vs, error_sc, error_cv,
# error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
error_weights = (0, 2, 2, 2, 1, 1, 1, 1)
fmin = minimize(fun=self.cost_function_all,
args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 100, "maxiter": 400})
return fmin, self.base_model.get_parameters()
@ -308,9 +383,10 @@ class Fitter:
return sum(error_list)
def cost_function_only_adaption(self, X, error_weights=None):
self.base_model.set_variable("tau_a", X[0])
self.base_model.set_variable("delta_a", X[1])
self.base_model.set_variable("mem_tau", X[2])
self.base_model.set_variable("mem_tau", X[0])
self.base_model.set_variable("input_scaling", X[1])
self.base_model.set_variable("delta_a", X[2])
self.base_model.set_variable("dend_tau", X[3])
base_stimulus = SinusoidalStepStimulus(self.eod_freq, 0)
# find right v-offset
@ -330,9 +406,9 @@ class Fitter:
model.set_variable("noise_strength", X[1])
model.set_variable("input_scaling", X[2])
model.set_variable("delta_a", X[3])
model.set_variable("dend_tau", X[4])
model.set_variable("tau_a", tau_a)
base_stimulus = SinusoidalStepStimulus(self.eod_freq, 0)
# find right v-offset
test_model = model.get_model_copy()
@ -386,14 +462,17 @@ class Fitter:
return sum(error_list)
def calculate_errors(self, error_weights=None):
model_baseline = get_baseline_class(self.base_model, self.eod_freq)
def calculate_errors(self, error_weights=None, model=None):
if model is None:
model = self.base_model
model_baseline = get_baseline_class(model, self.eod_freq)
baseline_freq = model_baseline.get_baseline_frequency()
vector_strength = model_baseline.get_vector_strength()
serial_correlation = model_baseline.get_serial_correlation(self.sc_max_lag)
coefficient_of_variation = model_baseline.get_coefficient_of_variation()
fi_curve_model = get_fi_curve_class(self.base_model, self.fi_contrasts, self.eod_freq)
fi_curve_model = get_fi_curve_class(model, self.fi_contrasts, self.eod_freq)
f_zeros = fi_curve_model.get_f_zero_frequencies()
f_infinities = fi_curve_model.get_f_inf_frequencies()
f_infinities_slope = fi_curve_model.get_f_inf_slope()
@ -424,31 +503,31 @@ class Fitter:
if error_weights is not None and len(error_weights) == len(error_list):
for i in range(len(error_weights)):
error_list[i] = error_list[i] * error_weights[i]
if len(error_weights) != len(error_list):
warn("Error weights had different length than errors and were ignored!")
error = sum(error_list)
self.counter += 1
if self.counter % 200 == 0: # and False:
print("\nCost function run times: {:}\n".format(self.counter),
"Total weighted error: {:.4f}\n".format(error),
"Baseline frequency - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
self.baseline_freq, baseline_freq, error_bf),
"Vector strength - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
self.vector_strength, vector_strength, error_vs),
"Serial correlation - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
self.serial_correlation[0], serial_correlation[0], error_sc),
"Coefficient of variation - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
self.coefficient_of_variation, coefficient_of_variation, error_cv),
"f-infinity slope - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
self.f_inf_slope, f_infinities_slope, error_f_inf_slope),
"f-infinity values:\nexpected:", np.around(self.f_inf_values), "\ncurrent: ", np.around(f_infinities),
"\nerror: {:.3f}\n".format(error_f_inf),
"f-zero slope - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
self.f_zero_slope_at_straight, f_zero_slope_at_straight, error_f_zero_slope_at_straight),
"f-zero values:\nexpected:", np.around(self.f_zero_values), "\ncurrent: ", np.around(f_zeros),
"\nerror: {:.3f}".format(error_f_zero))
elif error_weights is not None:
warn("Error: weights had different length than errors and were ignored!")
# error = sum(error_list)
# self.counter += 1
# if self.counter % 200 == 0: # and False:
# print("\nCost function run times: {:}\n".format(self.counter),
# "Total weighted error: {:.4f}\n".format(error),
# "Baseline frequency - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
# self.baseline_freq, baseline_freq, error_bf),
# "Vector strength - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
# self.vector_strength, vector_strength, error_vs),
# "Serial correlation - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
# self.serial_correlation[0], serial_correlation[0], error_sc),
# "Coefficient of variation - expected: {:.2f}, current: {:.2f}, error: {:.3f}\n".format(
# self.coefficient_of_variation, coefficient_of_variation, error_cv),
# "f-infinity slope - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
# self.f_inf_slope, f_infinities_slope, error_f_inf_slope),
# "f-infinity values:\nexpected:", np.around(self.f_inf_values), "\ncurrent: ", np.around(f_infinities),
# "\nerror: {:.3f}\n".format(error_f_inf),
# "f-zero slope - expected: {:.0f}, current: {:.0f}, error: {:.3f}\n".format(
# self.f_zero_slope_at_straight, f_zero_slope_at_straight, error_f_zero_slope_at_straight),
# "f-zero values:\nexpected:", np.around(self.f_zero_values), "\ncurrent: ", np.around(f_zeros),
# "\nerror: {:.3f}".format(error_f_zero))
return error_list