This commit is contained in:
a.ott 2020-09-04 17:54:29 +02:00
parent be35a13226
commit ad016b169b
20 changed files with 1158 additions and 477 deletions

View File

@ -70,7 +70,7 @@ class CellData:
self.base_traces = None
self.base_spikes = None
# self.fi_traces = None
self.fi_traces = None
self.fi_intensities = None
self.fi_spiketimes = None
self.fi_trans_amplitudes = None
@ -155,11 +155,10 @@ class CellData:
return isis
def get_fi_traces(self):
raise NotImplementedError("CellData:get_fi_traces():\n" +
"Getting the Fi-Traces currently overflows the RAM and causes swapping! Reimplement if really needed!")
# if self.fi_traces is None:
# self.fi_traces = self.parser.get_fi_curve_traces()
# return self.fi_traces
if self.fi_traces is None:
warn("Fi traces not sorted in the same way as the spiketimes!!!")
self.fi_traces = self.parser.get_fi_curve_traces()
return self.fi_traces
def get_fi_spiketimes(self):
self.__read_fi_spiketimes_info__()
@ -170,6 +169,7 @@ class CellData:
return self.fi_intensities
def get_fi_contrasts(self):
if self.fi_intensities is None:
self.__read_fi_spiketimes_info__()
contrast = []
for i in range(len(self.fi_intensities)):
@ -296,8 +296,23 @@ class CellData:
if self.fi_spiketimes is None:
self.fi_trans_amplitudes, self.fi_intensities, self.fi_spiketimes = self.parser.get_fi_curve_spiketimes()
# self.fi_intensities, self.fi_spiketimes, self.fi_trans_amplitudes = hf.merge_similar_intensities(
# intensities, spiketimes, trans_amplitudes)
if os.path.exists(self.get_data_path() + "/redetected_spikes.npy"):
print("overwriting fi_spiketimes with redetected ones.")
contrasts = self.get_fi_contrasts()
spikes = np.load(self.get_data_path() + "/redetected_spikes.npy", allow_pickle=True)
trace_contrasts_idx = np.load(self.get_data_path() + "/fi_traces_contrasts.npy", allow_pickle=True)
trace_max_similarity = np.load(self.get_data_path() + "/fi_traces_contrasts_similarity.npy", allow_pickle=True)
spiketimes = []
for i in range(len(contrasts)):
contrast_list = []
for j in range(len(trace_contrasts_idx)):
if trace_contrasts_idx[j] == i and trace_max_similarity[j][0] > trace_max_similarity[j][1] + 0.15:
contrast_list.append(spikes[j])
spiketimes.append(contrast_list)
self.fi_spiketimes = spiketimes
def __read_sam_info__(self):
if self.sam_spiketimes is None:

129
Figures_results.py Normal file
View File

@ -0,0 +1,129 @@
import numpy as np
import matplotlib.pyplot as plt
from analysis import get_fit_info, get_behaviour_values, calculate_percent_errors
def main():
dir_path = "results/final_1/"
fits_info = get_fit_info(dir_path)
cell_behaviour, model_behaviour = get_behaviour_values(fits_info)
# behaviour_overview_pairs(cell_behaviour, model_behaviour)
# errors = calculate_percent_errors(fits_info)
# create_boxplots(errors)
def create_boxplots(errors):
labels = ["{}_n:{}".format(k, len(errors[k])) for k in sorted(errors.keys())]
for k in sorted(errors.keys()):
print("{}: median %-error: {:.2f}".format(k, np.median(errors[k])))
y_values = [errors[k] for k in sorted(errors.keys())]
plt.boxplot(y_values)
plt.xticks(np.arange(1, len(y_values)+1, 1), labels, rotation=45)
plt.tight_layout()
plt.show()
plt.close()
def behaviour_overview_pairs(cell_behaviour, model_behaviour):
# behaviour_keys = ["Burstiness", "coefficient_of_variation", "serial_correlation",
# "vector_strength", "f_inf_slope", "f_zero_slope", "baseline_frequency"]
pairs = [("baseline_frequency", "vector_strength", "serial_correlation"),
("Burstiness", "coefficient_of_variation"),
("f_inf_slope", "f_zero_slope")]
for pair in pairs:
cell = []
model = []
for behaviour in pair:
cell.append(cell_behaviour[behaviour])
model.append(model_behaviour[behaviour])
overview_pair(cell, model, pair)
def overview_pair(cell, model, titles):
fig = plt.figure(figsize=(8, 6))
columns = len(cell)
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
# the size of the marginal axes and the main axes in both directions.
# Also adjust the subplot parameters for a square plot.
gs = fig.add_gridspec(2, columns, width_ratios=[5] * columns, height_ratios=[3, 7],
left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.2, hspace=0.05)
for i in range(len(cell)):
ax = fig.add_subplot(gs[1, i])
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
scatter_hist(cell[i], model[i], ax, ax_histx, titles[i])
plt.tight_layout()
plt.show()
def grouped_error_overview_behaviour_dist(cell_behaviours, model_behaviours):
# start with a square Figure
fig = plt.figure(figsize=(12, 12))
rows = 4
columns = 2
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
# the size of the marginal axes and the main axes in both directions.
# Also adjust the subplot parameters for a square plot.
gs = fig.add_gridspec(rows*2, columns, width_ratios=[5]*columns, height_ratios=[3, 7] * rows,
left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.2, hspace=0.5)
for i, behaviour in enumerate(sorted(cell_behaviours.keys())):
col = int(np.floor(i / rows))
row = i - rows*col
ax = fig.add_subplot(gs[row*2 + 1, col])
ax_histx = fig.add_subplot(gs[row*2, col])
# use the previously defined function
scatter_hist(cell_behaviours[behaviour], model_behaviours[behaviour], ax, ax_histx, behaviour)
# plt.tight_layout()
plt.show()
def scatter_hist(cell_values, model_values, ax, ax_histx, behaviour, ax_histy=None):
# copied from matplotlib
# no labels
ax_histx.tick_params(axis="cell_values", labelbottom=False)
# ax_histy.tick_params(axis="model_values", labelleft=False)
# the scatter plot:
ax.scatter(cell_values, model_values)
minimum = min(min(cell_values), min(model_values))
maximum = max(max(cell_values), max(model_values))
ax.plot((minimum, maximum), (minimum, maximum), color="grey")
ax.set_xlabel("cell")
ax.set_ylabel("model")
# now determine nice limits by hand:
binwidth = 0.25
xymax = max(np.max(np.abs(cell_values)), np.max(np.abs(model_values)))
lim = (int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
ax_histx.hist(cell_values, color="blue", alpha=0.5)
ax_histx.hist(model_values, color="orange", alpha=0.5)
# ax_histx.set_xticklabels([])
# ax_histx.set_xticks(ax.get_xticks())
# ax_histx.set_xlim(ax.get_xlim())
ax_histx.set_title(behaviour)
# ax_histy.hist(y, bins=bins, orientation='horizontal')
if __name__ == '__main__':
main()

122
Fitter.py
View File

@ -294,68 +294,68 @@ class Fitter:
return error_f0_curve
def calculate_f0_curve_error_new(self, model, fi_curve_model):
buffer = 0.05
test_duration = 0.05
times, freqs = fi_curve_model.get_mean_time_and_freq_traces()
freq_prediction = np.array(freqs[self.f_zero_curve_contrast_idx])
time_prediction = np.array(times[self.f_zero_curve_contrast_idx])
if len(time_prediction) == 0:
return 200
stimulus_start = fi_curve_model.get_stimulus_start() - time_prediction[0]
model_start_idx = int((stimulus_start - buffer) / model.get_sampling_interval())
model_end_idx = int((stimulus_start + buffer + test_duration) / model.get_sampling_interval())
if len(time_prediction) == 0 or len(time_prediction) < model_end_idx \
or time_prediction[0] > fi_curve_model.get_stimulus_start():
error_f0_curve = 200
return error_f0_curve
model_curve = np.array(freq_prediction[model_start_idx:model_end_idx])
# prepare cell frequency_curve:
stimulus_start = self.recording_times[1] - self.f_zero_curve_time[0]
cell_start_idx = int((stimulus_start - buffer) / self.data_sampling_interval)
cell_end_idx = int((stimulus_start - buffer + test_duration) / self.data_sampling_interval)
if round(model.get_sampling_interval() % self.data_sampling_interval, 4) == 0:
step_cell = int(round(model.get_sampling_interval() / self.data_sampling_interval))
else:
raise ValueError("Model sampling interval is not a multiple of data sampling interval.")
cell_curve = self.f_zero_curve_freq[cell_start_idx:cell_end_idx:step_cell]
cell_time = self.f_zero_curve_time[cell_start_idx:cell_end_idx:step_cell]
cell_curve_std = np.std(self.f_zero_curve_freq)
model_curve_std = np.std(freq_prediction)
model_limit = self.baseline_freq + model_curve_std
cell_limit = self.baseline_freq + cell_curve_std
cell_full_precicion = np.array(self.f_zero_curve_freq[cell_start_idx:cell_end_idx])
cell_points_above = cell_full_precicion > cell_limit
cell_area_above = sum(cell_full_precicion[cell_points_above]) * self.data_sampling_interval
model_points_above = model_curve > model_limit
model_area_above = sum(model_curve[model_points_above]) * model.get_sampling_interval()
# plt.close()
# plt.plot(cell_time, cell_curve, color="blue")
# plt.plot((cell_time[0], cell_time[-1]), (cell_limit, cell_limit),
# color="lightblue", label="area: {:.2f}".format(cell_area_above))
# def calculate_f0_curve_error_new(self, model, fi_curve_model):
# buffer = 0.05
# test_duration = 0.05
#
# plt.plot(time_prediction[model_start_idx:model_end_idx], model_curve, color="orange")
# plt.plot((time_prediction[model_start_idx], time_prediction[model_end_idx]), (model_limit, model_limit),
# color="red", label="area: {:.2f}".format(model_area_above))
# plt.legend()
# plt.title("Error: {:.2f}".format(abs(model_area_above - cell_area_above) / 0.02))
# plt.savefig("./figures/f_zero_curve_error_{}.png".format(time.strftime("%H:%M:%S")))
# plt.close()
return abs(model_area_above - cell_area_above)
# times, freqs = fi_curve_model.get_mean_time_and_freq_traces()
# freq_prediction = np.array(freqs[self.f_zero_curve_contrast_idx])
# time_prediction = np.array(times[self.f_zero_curve_contrast_idx])
#
# if len(time_prediction) == 0:
# return 200
# stimulus_start = fi_curve_model.get_stimulus_start() - time_prediction[0]
#
# model_start_idx = int((stimulus_start - buffer) / model.get_sampling_interval())
# model_end_idx = int((stimulus_start + buffer + test_duration) / model.get_sampling_interval())
#
# if len(time_prediction) == 0 or len(time_prediction) < model_end_idx \
# or time_prediction[0] > fi_curve_model.get_stimulus_start():
# error_f0_curve = 200
# return error_f0_curve
#
# model_curve = np.array(freq_prediction[model_start_idx:model_end_idx])
#
# # prepare cell frequency_curve:
#
# stimulus_start = self.recording_times[1] - self.f_zero_curve_time[0]
# cell_start_idx = int((stimulus_start - buffer) / self.data_sampling_interval)
# cell_end_idx = int((stimulus_start - buffer + test_duration) / self.data_sampling_interval)
#
# if round(model.get_sampling_interval() % self.data_sampling_interval, 4) == 0:
# step_cell = int(round(model.get_sampling_interval() / self.data_sampling_interval))
# else:
# raise ValueError("Model sampling interval is not a multiple of data sampling interval.")
#
# cell_curve = self.f_zero_curve_freq[cell_start_idx:cell_end_idx:step_cell]
# cell_time = self.f_zero_curve_time[cell_start_idx:cell_end_idx:step_cell]
# cell_curve_std = np.std(self.f_zero_curve_freq)
# model_curve_std = np.std(freq_prediction)
#
# model_limit = self.baseline_freq + model_curve_std
# cell_limit = self.baseline_freq + cell_curve_std
#
# cell_full_precicion = np.array(self.f_zero_curve_freq[cell_start_idx:cell_end_idx])
# cell_points_above = cell_full_precicion > cell_limit
# cell_area_above = sum(cell_full_precicion[cell_points_above]) * self.data_sampling_interval
#
# model_points_above = model_curve > model_limit
# model_area_above = sum(model_curve[model_points_above]) * model.get_sampling_interval()
#
# # plt.close()
# # plt.plot(cell_time, cell_curve, color="blue")
# # plt.plot((cell_time[0], cell_time[-1]), (cell_limit, cell_limit),
# # color="lightblue", label="area: {:.2f}".format(cell_area_above))
# #
# # plt.plot(time_prediction[model_start_idx:model_end_idx], model_curve, color="orange")
# # plt.plot((time_prediction[model_start_idx], time_prediction[model_end_idx]), (model_limit, model_limit),
# # color="red", label="area: {:.2f}".format(model_area_above))
# # plt.legend()
# # plt.title("Error: {:.2f}".format(abs(model_area_above - cell_area_above) / 0.02))
# # plt.savefig("./figures/f_zero_curve_error_{}.png".format(time.strftime("%H:%M:%S")))
# # plt.close()
#
# return abs(model_area_above - cell_area_above)
def calculate_list_error(fit, reference):

View File

@ -11,7 +11,7 @@ import functions as fu
import matplotlib.pyplot as plt
def get_best_fit(folder_path, use_comparable_error=True):
def get_best_fit(folder_path, use_comparable_error=False):
min_err = np.inf
min_item = ""
for item in os.listdir(folder_path):

View File

@ -31,36 +31,16 @@ def main():
fits_info = get_fit_info(dir_path)
total_fits = len(fits_info)
for cell in sorted(fits_info.keys()):
model_values = fits_info[cell][1]
# if model_values["vector_strength"] < 0.4:
# del fits_info[cell]
# print("excluded because of vs")
#
# elif model_values["f_inf_slope"] / fits_info[cell][2]["f_inf_slope"] > 2:
# del fits_info[cell]
# print("f_inf bad")
#
# elif abs((model_values["baseline_frequency"] / fits_info[cell][2]["baseline_frequency"]) - 1) > 0.05:
# del fits_info[cell]
# print("baseline freq bad")
#
# elif fits_info[cell][2]["Burstiness"] == 0 or abs((model_values["Burstiness"] / fits_info[cell][2]["Burstiness"]) - 1) > 0.65:
# del fits_info[cell]
# print("burstiness bad")
# plot_overview_plus_hist(fits_info)
cell_behaviour, model_behaviour = get_behaviour_values(fits_info)
print("'good' fits of total fits: {} / {}".format(len(fits_info), total_fits))
errors = calculate_percent_errors(fits_info)
create_boxplots(errors)
labels, corr_values, corrected_p_values = behaviour_correlations(fits_info, model_values=False)
create_correlation_plot(labels, corr_values, corrected_p_values)
labels, corr_values, corrected_p_values = parameter_correlations(fits_info)
create_correlation_plot(labels, corr_values, corrected_p_values)
# labels, corr_values, corrected_p_values = behaviour_correlations(fits_info, model_values=False)
# create_correlation_plot(labels, corr_values, corrected_p_values)
#
# labels, corr_values, corrected_p_values = parameter_correlations(fits_info)
# create_correlation_plot(labels, corr_values, corrected_p_values)
create_parameter_distributions(get_parameter_values(fits_info))
cell_b, model_b = get_behaviour_values(fits_info)
@ -204,87 +184,6 @@ def create_correlation_plot(labels, correlations, p_values):
plt.show()
def create_boxplots(errors):
labels = ["{}_n:{}".format(k, len(errors[k])) for k in sorted(errors.keys())]
for k in sorted(errors.keys()):
print("{}: median %-error: {:.2f}".format(k, np.median(errors[k])))
y_values = [errors[k] for k in sorted(errors.keys())]
plt.boxplot(y_values)
plt.xticks(np.arange(1, len(y_values)+1, 1), labels, rotation=45)
plt.tight_layout()
plt.show()
plt.close()
def plot_overview_plus_hist(fits_info):
pairs = {}
for cell in sorted(fits_info.keys()):
for behaviour in fits_info[cell][1].keys():
if behaviour not in pairs.keys():
pairs[behaviour] = [[], []]
# model_value
pairs[behaviour][1].append(fits_info[cell][1][behaviour])
# cell value
pairs[behaviour][0].append(fits_info[cell][2][behaviour])
for behaviour in pairs.keys():
error_overview_with_behaviour_dist(pairs[behaviour][0], pairs[behaviour][1], behaviour)
def error_overview_with_behaviour_dist(x, y, title):
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
# start with a square Figure
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes(rect_scatter)
ax_histx = fig.add_axes(rect_histx, sharex=ax)
ax_histy = None # fig.add_axes(rect_histy, sharey=ax)
# use the previously defined function
scatter_hist(x, y, ax, ax_histx, ax_histy)
plt.title(title)
plt.show()
def scatter_hist(cell_values, model_values, ax, ax_histx, ax_histy):
# copied from matplotlib
# no labels
ax_histx.tick_params(axis="cell_values", labelbottom=False)
# ax_histy.tick_params(axis="model_values", labelleft=False)
# the scatter plot:
ax.scatter(cell_values, model_values)
minimum = min(min(cell_values), min(model_values))
maximum = max(max(cell_values), max(model_values))
ax.plot((minimum, maximum), (minimum, maximum), color="grey")
ax.set_xlabel("cell value")
ax.set_ylabel("model value")
# now determine nice limits by hand:
binwidth = 0.25
xymax = max(np.max(np.abs(cell_values)), np.max(np.abs(model_values)))
lim = (int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
ax_histx.hist(cell_values, color="blue", alpha=0.5)
ax_histx.hist(model_values, color="orange", alpha=0.5)
# ax_histy.hist(y, bins=bins, orientation='horizontal')
def create_parameter_distributions(par_values):
fig, axes = plt.subplots(4, 2)

View File

@ -13,8 +13,8 @@ def main():
# plot_visualizations("cells/")
# full_overview("cells/master_table.csv", "cells/")
# recalculate_saved_preanalysis("data/final/")
metadata_analysis("data/final/")
recalculate_saved_preanalysis("data/final/")
# metadata_analysis("data/final/")
pass
@ -85,9 +85,9 @@ def count_with_dict(dictionary, key):
return dictionary
def recalculate_saved_preanalysis(data_folder):
for cell_data in icelldata_of_dir(data_folder, test_for_v1_trace=True):
for cell_data in icelldata_of_dir(data_folder, test_for_v1_trace=False):
print(cell_data.get_data_path())
baseline = BaselineCellData(cell_data)
baseline.save_values(cell_data.get_data_path())

View File

@ -25,7 +25,7 @@ def precalculate_baseline_spiketimes():
thresholds_dict[name] = [thresh, min_length, step_size]
for cell_data in icelldata_of_dir("data/final/"):
for cell_data in icelldata_of_dir("data/final/", test_for_v1_trace=False):
name = os.path.basename(cell_data.get_data_path())
if name not in thresholds_dict.keys():
@ -36,7 +36,7 @@ def precalculate_baseline_spiketimes():
min_length = thresholds_dict[name][1]
split_step_size = thresholds_dict[name][2]
cell_data.get_base_spikes(threshold=thresh, min_length=min_length, split_step=split_step_size)
cell_data.get_base_spikes(threshold=thresh, min_length=min_length, split_step=split_step_size, re_calculate=True)
def choose_thresholds():

76
glm_prediction.py Normal file
View File

@ -0,0 +1,76 @@
from ModelFit import get_best_fit
import numpy as np
import os
import pandas
import matplotlib.pyplot as plt
import statsmodels.api as sm
def main():
folder = "results/final_1/"
# input len(cells) x len(variables) 2D array
variable_order = ['Burstiness', 'baseline_frequency', 'coefficient_of_variation',
'f_inf_slope', 'f_zero_slope', 'serial_correlation', 'vector_strength']
behaviour, error = get_variables(folder, variable_order)
df_behaviour = pandas.DataFrame(behaviour, columns=variable_order)
# print(df)
gamma_glm = sm.GLM(error, df_behaviour, sm.families.Gamma())
fitted_model = gamma_glm.fit()
params = fitted_model.params
p_values = fitted_model.pvalues
print(p_values)
predicted = fitted_model.predict()
# for i in range(len(predicted)):
# print("err: {:.2f} - {:.2f} prediction".format(error[i], predicted[i]))
print(fitted_model.summary())
pass
def get_variables(folder, order):
variables = []
error_values = []
for cell in sorted(os.listdir(folder)):
fit = get_best_fit(folder + cell)
error = fit.get_error_value()
error_values.append(error)
cell_behaviour, _ = fit.get_behaviour_values()
cell_behaviour_variables = []
for b in order:
cell_behaviour_variables.append(cell_behaviour[b])
variables.append(np.array(cell_behaviour_variables, dtype=np.float64))
return np.array(variables), np.array(error_values, dtype=np.float64)
def till_shorthand():
# logit_GLM = sm.GLM(df[ < result >], df[ < params >], family = sm.families.Binomial())
# fitted_model = logit_GLM.fit()
#
# fitted_model.predict()
# # --> gets you the predicted values for df[<results>] based on the fitted model
#
#
# fitted_model.params
# # --> coeff of params(sorted by input)
#
# fitted_model.pvalues
# # --> selbsterklärend
#
# fitted_model.summary()
# # ODER
# fitted_model.summary2()
# # --> überblick über das gefittete model
pass
if __name__ == '__main__':
main()

103
random_models.py Normal file
View File

@ -0,0 +1,103 @@
import numpy as np
import os
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from ModelFit import get_best_fit
import functions as fu
def main():
folder = "results/final_1/"
param_values = get_parameter_distributions(folder)
plot_distributions(param_values)
pass
def plot_distributions(param_values):
bin_number = 30
fig, axes = plt.subplots(len(param_values.keys()), 2)
for i, key in enumerate(sorted(param_values.keys())):
# normal hist:
values = param_values[key]
normal, n_bins, patches = axes[i, 0].hist(values, bins=bin_number, density=True)
axes[i, 0].set_title(key)
# fit gauss:
bin_width = np.mean(np.diff(n_bins))
middle_of_bins = n_bins + bin_width / 2
axes[i, 0].plot(middle_of_bins[:-1], normal, 'o')
try:
n_gauss_pars = fit_gauss(middle_of_bins[:-1], normal)
x = np.arange(min(param_values[key]), max(param_values[key]),
(max(param_values[key]) - min(param_values[key])) / 100)
axes[i, 0].plot(x, fu.gauss(x, n_gauss_pars[0], n_gauss_pars[1], n_gauss_pars[2]))
except RuntimeError as e:
pass
# log transformed:
if key != "v_offset":
log_values = np.log(np.array(param_values[key]))
log_trans, l_bins, patches = axes[i, 1].hist(log_values, bins=bin_number, density=True)
bin_width = np.mean(np.diff(l_bins))
middle_of_bins = l_bins + bin_width / 2
axes[i, 1].plot(middle_of_bins[:-1], log_trans, 'o')
try:
l_gauss_pars = fit_gauss(middle_of_bins[:-1], log_trans)
x = np.arange(min(log_values), max(log_values),
(max(log_values) - min(log_values)) / 100)
axes[i, 1].plot(x, fu.gauss(x, l_gauss_pars[0], l_gauss_pars[1], l_gauss_pars[2]))
except RuntimeError as e:
pass
plt.tight_layout()
plt.show()
plt.close()
def fit_gauss(x_values, y_values):
mean_v = np.mean(x_values)
std_v = np.std(x_values)
amp = max(y_values)
popt, pcov = curve_fit(fu.gauss, x_values, y_values, p0=(amp, mean_v, std_v))
return popt
def get_parameter_distributions(folder, param_keys=None):
if param_keys is None:
param_keys = ["v_offset", 'input_scaling', 'dend_tau', 'tau_a', 'delta_a',
'refractory_period', 'noise_strength', 'mem_tau']
parameter_values = {}
for key in param_keys:
parameter_values[key] = []
for cell in sorted(os.listdir(folder)):
fit = get_best_fit(folder + cell)
final_params = fit.get_final_parameters()
for key in param_keys:
parameter_values[key].append(final_params[key])
return parameter_values
if __name__ == '__main__':
main()

609
redetect_fi_curve.py Normal file
View File

@ -0,0 +1,609 @@
from CellData import CellData
from DataParserFactory import DatParser
import pyrelacs.DataLoader as Dl
import helperFunctions as hF
from thunderfish.eventdetection import detect_peaks
import os
import numpy as np
import matplotlib.pyplot as plt
TEST_SIMILARITY = True
REDETECT_SPIKES = True
TOP_PERCENTILE = 95
BOTTOM_PERCENTILE = 5
FACTOR = 0.5
# strange_cells:
# 2012-07-12-ap-invivo-1 # cell with a few traces with max similarity < 0.1
# 2012-12-13-af-invivo-1 # cell with MANY traces with max similarity < 0.1
# 2012-12-21-ak-invivo-1 # a few
# 2012-12-21-an-invivo-1 # a few
# 2013-02-21-ae-invivo-1 # "
# 2013-02-21-ag-invivo-1 # "
# 2014-06-06-ac-invivo-1 # alot below 0.4 but a good bit above the 2nd max
def main():
test_fi_trace()
quit()
# find_and_save_best_threshold()
# quit()
directory = "data/final/"
skip_to = False
skip_to_cell = "2012-12-13-af-invivo-1"
threshold_file_path = "data/fi_thresholds.tsv"
thresholds_dict = load_fi_thresholds(threshold_file_path)
for cell in sorted(os.listdir(directory)):
# if cell != "2014-01-10-ab-invivo-1":
# continue
if skip_to:
if cell == skip_to_cell:
skip_to = False
else:
continue
cell_dir = directory + cell # "data/final/2012-04-20-af-invivo-1/"
print(cell_dir)
cell_data = CellData(cell_dir)
before = cell_data.get_delay()
after = cell_data.get_after_stimulus_duration()
# parser = DatParser(cell_dir)
if os.path.exists(cell_dir + "/redetected_spikes.npy") and not REDETECT_SPIKES:
spikes = np.load(cell_dir + "/redetected_spikes.npy", allow_pickle=True)
traces = np.load(cell_dir + "/fi_time_v1_traces.npy", allow_pickle=True)
else:
step = cell_data.get_sampling_interval()
threshold_pair = thresholds_dict[cell]
spikes, traces = get_redetected_spikes(cell_dir, before, after, step, threshold_pair)
np.save(cell_dir + "/redetected_spikes.npy", spikes, allow_pickle=True)
np.save(cell_dir + "/fi_time_v1_traces.npy", traces, allow_pickle=True)
print("redetection finished")
if os.path.exists(cell_dir + "/fi_traces_contrasts.npy") and not TEST_SIMILARITY:
trace_contrasts = np.load(cell_dir + "/fi_traces_contrasts.npy", allow_pickle=True)
trace_max_similarity = np.load(cell_dir + "/fi_traces_contrasts_similarity.npy", allow_pickle=True)
else:
cell_spiketrains = cell_data.get_fi_spiketimes()
# plt.plot(traces[0][0], traces[0][1])
# plt.eventplot(cell_spiketrains[0][0], colors="black", lineoffsets=max(traces[0][1]) + 1)
# plt.eventplot(spikes[0], colors="black", lineoffsets=max(traces[0][1]) + 2)
# plt.show()
# plt.close()
# unsorted_cell_spiketimes = get_unsorted_spiketimes(cell_dir + "/fispikes1.dat")
trace_contrasts = np.zeros(len(traces), dtype=np.int) - 1
trace_max_similarity = np.zeros((len(traces), 2)) - 1
for i, spiketrain in enumerate(spikes):
similarity, max_idx, maxima = find_matching_spiketrain(spiketrain, cell_spiketrains, cell_data.get_sampling_interval())
trace_contrasts[i] = max_idx[0]
trace_max_similarity[i] = maxima
# if trace_max_similarity[i] <= 0.05:
# step = cell_data.get_sampling_interval()
# test_detected_spiketimes(traces[i], spiketrain, cell_spiketrains[max_idx[0]], step)
np.save(cell_dir + "/fi_traces_contrasts.npy", trace_contrasts, allow_pickle=True)
np.save(cell_dir + "/fi_traces_contrasts_similarity.npy", trace_max_similarity, allow_pickle=True)
print("similarity test finished")
# step_size = cell_data.get_sampling_interval()
# steps = np.arange(0, 100.1, 0.5)
# percentiles_arr = np.zeros((len(traces), len(steps)))
# for i, trace_pair in enumerate(traces):
# v1_part = trace_pair[1][-int(np.rint(0.6/step_size)):]
# percentiles = np.percentile(np.array(v1_part) - np.median(v1_part), steps)
# percentiles_arr[i, :] = percentiles
# plt.plot(steps, percentiles)
# mean_perc = np.mean(percentiles_arr, axis=0)
# plt.plot(steps, mean_perc)
# plt.show()
# plt.close()
# bins = np.arange(0, 1.001, 0.05)
# plt.hist(trace_max_similarity, bins=bins)
# plt.show()
# plt.close()
#
#
# step_size = cell_data.get_sampling_interval()
# cell_spiketrains = cell_data.get_fi_spiketimes()
# contrasts = cell_data.get_fi_contrasts()
# tested_contrasts = []
# for i, redetected in enumerate(spikes):
# idx = trace_contrasts[i]
# if idx not in tested_contrasts:
# print("Contrast: {:.3f}".format(contrasts[idx]))
# test_detected_spiketimes(traces[i], redetected, cell_spiketrains[idx], step_size)
# tested_contrasts.append(idx)
def test_fi_trace():
# cell = "2012-12-13-af-invivo-1"
# cell = "2012-07-12-ap-invivo-1"
data_dir = "data/final/"
full_count = 0
contrast_trials_below_three = 0
differences_max_second_max = []
for cell in sorted(os.listdir(data_dir)):
cell_dir = data_dir + cell
# print(cell)
cell_data = CellData(cell_dir)
step_size = cell_data.get_sampling_interval()
spiketimes = cell_data.get_fi_spiketimes()
# trials = [len(x) for x in spiketimes]
# total = sum(trials)
spikes = np.load(cell_dir + "/redetected_spikes.npy", allow_pickle=True)
# print("Cell data total: {} vs {} # traces".format(total, len(spikes)))
traces = np.load(cell_dir + "/fi_time_v1_traces.npy", allow_pickle=True)
trace_contrasts = np.load(cell_dir + "/fi_traces_contrasts.npy", allow_pickle=True)
trace_max_similarity = np.load(cell_dir + "/fi_traces_contrasts_similarity.npy", allow_pickle=True)
count_good = 0
count_bad = 0
threshold_file_path = "data/fi_thresholds.tsv"
# thresholds_dict = load_fi_thresholds(threshold_file_path)
# spikes, traces = get_redetected_spikes(cell_dir, 0.2, 0.8, cell_data.get_sampling_interval(), thresholds_dict[cell])
# print("No preduration:", len(traces))
contrast_trials = {}
for i in range(len(traces)):
differences_max_second_max.append((trace_max_similarity[i][0] - trace_max_similarity[i][1])/ trace_max_similarity[i][0])
if trace_max_similarity[i][0] > trace_max_similarity[i][1] + 0.15 and trace_max_similarity[i][0] < trace_max_similarity[i][1] + 0.2:
print("max sim: {:.2f}, {:.2f}".format(trace_max_similarity[i][0], trace_max_similarity[i][1]))
if trace_max_similarity[i][0] > trace_max_similarity[i][1] + 0.15:
count_good += 1
if trace_contrasts[i] not in contrast_trials:
contrast_trials[trace_contrasts[i]] = 0
contrast_trials[trace_contrasts[i]] += 1
continue
count_bad += 1
# count_bad += 1
# event_offset = max(traces[i][1]) + 0.5
# fig, axes = plt.subplots(2, 1, sharex="all")
# axes[0].plot(traces[i][0], traces[i][1])
# axes[0].eventplot(spikes[i], lineoffsets=event_offset, colors="black")
#
# similarity, max_idx, maxima = find_matching_spiketrain(spikes[i], spiketimes, step_size)
# axes[0].eventplot(spiketimes[max_idx[0]][max_idx[1]], lineoffsets=event_offset + 1, colors="orange")
#
# # for o, st in enumerate(spiketimes[trace_contrasts[i]]):
# # axes[0].eventplot(st, lineoffsets=event_offset + 1 + o*1, colors="orange")
#
# time, v1, eod, local_eod, stimulus = get_ith_trace(cell_dir, i)
# axes[1].plot(time, local_eod)
#
# plt.show()
# plt.close()
# t, f = hF.calculate_time_and_frequency_trace(spikes[-1], cell_data.get_sampling_interval())
# plt.plot(t, f)
# plt.eventplot(spikes[-1], lineoffsets=max(traces[-1][1]) + 0.5)
# plt.show()
# plt.close()
if count_bad > 0:
over_seven = 0
below_three = 0
for key in contrast_trials.keys():
if contrast_trials[key] >= 7:
over_seven += 1
if contrast_trials[key] < 3:
below_three += 1
if over_seven < 7:
full_count += 1
print(cell)
print(contrast_trials)
print("good:", count_good, "bad:", count_bad)
if below_three > 1:
contrast_trials_below_three += 1
# print("good:", count_good, "bad:", count_bad)
print("Cells less than 7 trials in seven contrasts:", full_count)
print("Cells less than 3 trials in a contrast:", contrast_trials_below_three)
def get_ith_trace(cell_dir, i):
count = 0
for info, key, time, x in Dl.iload_traces(cell_dir, repro="FICurve", before=0.2, after=0.8):
if '----- Control --------------------------------------------------------' in info[0].keys():
pre_duration = float(
info[0]["----- Pre-Intensities ------------------------------------------------"]["preduration"][:-2])
if pre_duration != 0:
continue
elif "preduration" in info[0].keys():
pre_duration = float(info[0]["preduration"][:-2])
if pre_duration != 0:
continue
elif len(info) == 2 and "preduration" in info[1].keys():
pre_duration = float(info[1]["preduration"][:-2])
if pre_duration != 0:
continue
if count < i:
count += 1
continue
# print(count)
# time, v1, eod, local_eod, stimulus
# print(info)
# print(key)
v1 = x[0]
eod = x[1]
local_eod = x[2]
stimulus = x[3]
return time, v1, eod, local_eod, stimulus
def load_fi_thresholds(threshold_file_path):
thresholds_dict = {}
if os.path.exists(threshold_file_path):
with open(threshold_file_path, "r") as threshold_file:
for line in threshold_file:
line = line.strip()
line = line.split('\t')
name = line[0]
bottom_percentile = float(line[1])
top_percentile = float(line[2])
thresholds_dict[name] = [bottom_percentile, top_percentile]
# print("Already done:", name)
return thresholds_dict
def find_and_save_best_threshold():
base_path = "data/final/"
threshold_file_path = "data/fi_thresholds.tsv"
re_choose_thresholds = False
thresholds_dict = load_fi_thresholds(threshold_file_path)
count = 0
for item in sorted(os.listdir(base_path)):
if item in thresholds_dict.keys() and not re_choose_thresholds:
continue
count += 1
print("cells to do:", count)
for item in sorted(os.listdir(base_path)):
if item in thresholds_dict.keys() and not re_choose_thresholds and not thresholds_dict[item][0] < 10:
print("Already done:", item)
continue
cell_dir = base_path + item
# starting assumptions:
standard_top_percentile = 95
threshold_pairs = [(40, 95), (50, 95), (60, 95)]
colors = ["blue", "orange", "red"]
if "thresholds" in item:
continue
print(item)
item_path = base_path + item
cell_data = CellData(item_path)
step_size = cell_data.get_sampling_interval()
trace_pairs = np.load(cell_dir + "/fi_time_v1_traces.npy", allow_pickle=True)
trace_contrasts = np.load(cell_dir + "/fi_traces_contrasts.npy", allow_pickle=True)
trace_max_similarity = np.load(cell_dir + "/fi_traces_contrasts_similarity.npy", allow_pickle=True)
example_trace_pairs = []
example_contrasts = []
for i, trace_pair in enumerate(trace_pairs):
if trace_contrasts[i] not in example_contrasts:
example_contrasts.append(trace_contrasts[i])
example_trace_pairs.append(trace_pair)
example_contrasts, example_trace_pairs = zip(*sorted(zip(example_contrasts, example_trace_pairs)))
stop = False
print("Thresholds are:\n ")
for i in range(len(threshold_pairs)):
print("{}: {} - {}".format(i, colors[i], threshold_pairs[i]))
plot_test_thresholds(example_trace_pairs, threshold_pairs, colors, step_size)
response = input("Choose: 'ok', 'stop', or a number (bottom threshold 0-100)")
while True:
if response == "stop":
stop = True
break
elif response.lower().startswith("ok"):
parts = response.split(" ")
if len(parts) == 1:
print("please specify an index:")
response = input("Choose: 'ok', 'stop', or a number (bottom threshold 0-100)")
continue
try:
threshold_idx = int(parts[1])
break
except:
print("{} could not be parsed as number or ok please try again.".format(response))
print("Thresholds are:\n ")
for i in range(len(threshold_pairs)):
print("{}: {} - {}".format(i, colors[i], threshold_pairs[i]))
response = input("Choose: 'ok', 'stop', or a number (bottom threshold 0-100)")
try:
parts = response.strip().split(",")
if len(parts) == 1:
extra_pair = (float(parts[0]), standard_top_percentile)
elif len(parts) == 2:
extra_pair = (float(parts[0]), float(parts[1]))
else:
raise ValueError()
except ValueError as e:
print("{} could not be parsed as number or ok please try again.".format(response))
print("Thresholds are:\n ")
for i in range(len(threshold_pairs)):
print("{}: {} - {}".format(i, colors[i], threshold_pairs[i]))
response = input("Choose: 'ok', 'stop', or a number (bottom threshold 0-100) or two numbers: bot, top")
continue
plot_test_thresholds(example_trace_pairs, threshold_pairs, colors, step_size, extra_pair=extra_pair)
print("Thresholds are:\n ")
for i in range(len(threshold_pairs)):
print("{}: {} - {}".format(i, colors[i], threshold_pairs[i]))
response = input("Choose: 'ok', 'stop', or a number (bottom threshold 0-100)")
if stop:
break
if threshold_idx < len(threshold_pairs):
thresholds_dict[item] = [threshold_pairs[threshold_idx][0], threshold_pairs[threshold_idx][1]]
else:
thresholds_dict[item] = [extra_pair[0], extra_pair[1]]
with open(threshold_file_path, "w") as threshold_file:
for name in sorted(thresholds_dict.keys()):
line = name + "\t"
line += str(thresholds_dict[name][0]) + "\t"
line += str(thresholds_dict[name][1]) + "\t"
threshold_file.write(line + "\n")
def plot_test_thresholds(trace_pairs, threshold_pairs, colors, step_size, extra_pair=None):
ncols = int(np.ceil(len(trace_pairs) / 4))
nrows = int(np.ceil(len(trace_pairs) / ncols))
fig, axes = plt.subplots(nrows, ncols, sharex="all", figsize=(12, 12))
for i, (time, v1) in enumerate(trace_pairs):
line_offset = 0
c = i % ncols
r = int(np.floor(i / ncols))
v1_max = np.max(v1)
v1_median = np.median(v1)
axes[r, c].plot(time, v1)
axes[r, c].plot((time[0], time[-1]), (v1_median, v1_median), color="black")
v1_part = v1[-int(0.6/step_size):]
if extra_pair is not None:
threshold = np.percentile(v1_part, extra_pair[1]) - np.percentile(v1_part, extra_pair[0])
axes[r, c].plot((time[0], time[-1]), (v1_median+threshold, v1_median+threshold), color="black")
peaks, _ = detect_peaks(v1, threshold=threshold)
spikes = [time[idx] for idx in peaks]
axes[r, c].eventplot(spikes, colors="black", lineoffsets=v1_max + line_offset)
line_offset += 1
for j, (bot_perc, top_perc) in enumerate(threshold_pairs):
threshold = np.percentile(v1_part, top_perc) - np.percentile(v1_part, bot_perc)
axes[r, c].plot((time[0], time[-1]), (v1_median + threshold, v1_median + threshold), color=colors[j])
peaks, _ = detect_peaks(v1, threshold=threshold)
spikes = [time[idx] for idx in peaks]
axes[r, c].eventplot(spikes, colors=colors[j], lineoffsets=v1_max + line_offset)
line_offset += 1
plt.show()
plt.close()
def test_detected_spiketimes(traces, redetected, spiketimes, step):
time = traces[0]
v1 = traces[1]
plt.plot(traces[0], traces[1])
plt.eventplot(redetected, colors="red", lineoffsets=max(traces[1]) + 1)
median = np.median(traces[1])
last_600_ms = int(np.rint(0.6 / step))
threshold_last_600 = np.percentile(v1[-last_600_ms:], TOP_PERCENTILE) - np.percentile(v1[-last_600_ms:], BOTTOM_PERCENTILE) * FACTOR
threshold_normal = np.percentile(v1, 94.5) - np.percentile(v1, 50)
print("threshold full time : {:.2f}".format(threshold_normal))
print("threshold last 600 ms: {:.2f}".format(threshold_last_600))
peaks, _ = detect_peaks(v1, threshold=threshold_last_600)
redetected_current_values = [time[idx] for idx in peaks]
plt.eventplot(redetected_current_values, colors="green", lineoffsets=max(traces[1]) + 2)
plt.plot((traces[0][0], traces[0][-1]), (median, median), color="black")
plt.plot((traces[0][0], traces[0][-1]), (median+threshold_normal, median+threshold_normal), color="black")
plt.plot((traces[0][0], traces[0][-1]), (median+threshold_last_600, median+threshold_last_600), color="grey")
for i, spiketrain in enumerate(spiketimes):
plt.eventplot(spiketrain, colors="black", lineoffsets=max(traces[1]) + 3 + i)
plt.show()
plt.close()
def plot_percentiles(trace):
steps = np.arange(0, 100.1, 0.5)
percentiles = np.percentile(trace, steps)
plt.plot(steps, percentiles)
plt.show()
plt.close()
def get_unsorted_spiketimes(fi_file):
spiketimes = []
for metadata, key, data in Dl.iload(fi_file):
spike_time_data = data[:, 0] / 1000
spiketimes.append(spike_time_data)
return spiketimes
def find_matching_spiketrain(redetected, cell_spiketrains, step_size):
# redetected_idices = [int(np.rint(s / step_size)) for s in redetected]
spikes_dict = {}
for s in redetected:
idx = int(np.rint(s / step_size))
spikes_dict[idx] = True
spikes_dict[idx+1] = True
spikes_dict[idx-1] = True
similarity = np.zeros((len(cell_spiketrains), max([len(contrast_list) for contrast_list in cell_spiketrains])))
maximum = -1
max_idx = (-1, -1)
for i, contrast_list in enumerate(cell_spiketrains):
for j, cell_spiketrain in enumerate(contrast_list):
count = 0
cell_spike_indices = [int(np.rint(s / step_size)) for s in cell_spiketrain]
# plt.plot(cell_spiketrain, cell_spike_indices, '.')
# plt.plot(redetected, redetected_idices, '.')
# plt.show()
# plt.close()
for spike in cell_spiketrain:
idx = int(np.rint(spike / step_size))
if idx in spikes_dict:
count += 1
similarity[i, j] = count / len(cell_spiketrain)
if similarity[i, j] > maximum:
maximum = similarity[i, j]
max_idx = (i, j)
# plt.imshow(similarity)
# plt.show()
# plt.close()
flattened = similarity.flatten()
sorted_flattened = sorted(flattened)
second_max = sorted_flattened[-2]
if maximum < 0.5:
print("Identification: max_sim: {:.2f} vs {:.2f} second max; Diff: {} worked".format(maximum, second_max, maximum - second_max))
return similarity, max_idx, (maximum, second_max)
def get_redetected_spikes(cell_dir, before, after, step, threshold_pair):
spikes_list = []
traces = []
count = 1
for info, key, time, x in Dl.iload_traces(cell_dir, repro="FICurve", before=before, after=after):
# print(count)
if '----- Control --------------------------------------------------------' in info[0].keys():
pre_duration = float(
info[0]["----- Pre-Intensities ------------------------------------------------"]["preduration"][:-2])
if pre_duration != 0:
continue
elif "preduration" in info[0].keys():
pre_duration = float(info[0]["preduration"][:-2])
if pre_duration != 0:
continue
elif len(info) == 2 and "preduration" in info[1].keys():
pre_duration = float(info[1]["preduration"][:-2])
if pre_duration != 0:
continue
count += 1
# time, v1, eod, local_eod, stimulus
# print(key)
# print(info)
v1 = x[0]
# percentiles = np.arange(0.0, 101, 1)
# plt.plot(percentiles, np.percentile(v1, percentiles))
# plt.show()
# plt.close()
if len(v1) > 15/step:
print("Skipping Fi-Curve trace longer than 15 seconds!")
continue
if len(v1) > 3/step:
print("Warning: A FI-Curve trace is longer than 3 seconds.")
if after < 0.8:
print("Why the f is the after stimulus time shorter than 0.8s ???")
raise ValueError("Safety error: check where the after stimulus time comes from.")
last_about_600_ms = int(np.rint((after-0.2)/step))
top = np.percentile(v1[-last_about_600_ms:], threshold_pair[1])
bottom = np.percentile(v1[-last_about_600_ms:], threshold_pair[0])
threshold = (top - bottom)
peaks, _ = detect_peaks(v1, threshold=threshold)
spikes = [time[idx] for idx in peaks]
spikes_list.append(np.array(spikes))
# eod = x[1]
# local_eod = x[2]
stimulus = x[3]
# if count % 5 == 0:
# plt.eventplot(spikes, colors="black", lineoffsets=max(v1) + 1)
# plt.plot(time, v1)
# median = np.median(v1)
# plt.plot((time[0], time[-1]), (median, median), color="grey")
# plt.plot((time[0], time[-1]), (median+threshold, median+threshold), color="grey")
# plt.show()
# plt.close()
# print(key[5])
# if "rectangle" not in key[5] and "FICurve" not in key[5][35]:
# raise ValueError("No value in key 5 is rectangle:")
traces.append([np.array(time), np.array(v1)])
return np.array(spikes_list), traces
if __name__ == '__main__':
main()

View File

@ -16,9 +16,9 @@ import multiprocessing as mp
# SAVE_DIRECTORY = "./results/invivo_results/"
SAVE_DIRECTORY = "./results/final_1/"
SAVE_DIRECTORY = "./results/final_2/"
# SAVE_DIRECTORY_BEST = "./results/invivo_best/"
SAVE_DIRECTORY_BEST = "./results/final_1_best/"
SAVE_DIRECTORY_BEST = "./results/final_2_best/"
# [bf, vs, sc, cv, isi_hist, bursty, f_inf, f_inf_slope, f_zero, f_zero_slope, f0_curve]
ERROR_WEIGHTS = (2, 2, 1, 1, 0, 1, 1, 1, 0, 1)

45
save_model_fits_as_csv.py Normal file
View File

@ -0,0 +1,45 @@
from ModelFit import get_best_fit, ModelFit
import os
def main():
dir = "results/final_1/"
cells = []
eod_freqs = []
parameters = []
for cell in sorted(os.listdir(dir)):
cell_dir = dir + cell
model = get_best_fit(cell_dir, use_comparable_error=False)
cells.append(cell)
eod_freqs.append(model.get_cell_data().get_eod_frequency())
parameters.append(model.get_final_parameters())
save_csv(dir + "models.csv", cells, eod_freqs, parameters)
def save_csv(file, cells, eod_freqs, parameters):
keys = sorted(parameters[0].keys())
with open(file, "w") as file:
header = "cell,eod_frequency"
for k in keys:
header += ",{}".format(k)
file.write(header + "\n")
for i in range(len(cells)):
line = "{},{:.2f}".format(cells[i], eod_freqs[i])
for k in keys:
line += ",{}".format(parameters[i][k])
file.write(line + "\n")
if __name__ == '__main__':
main()

202
test.py
View File

@ -17,208 +17,6 @@ from scipy.signal import find_peaks
from thunderfish.eventdetection import threshold_crossing_times, threshold_crossings, detect_peaks
folder = "./results/invivo-1/"
for cell in os.listdir(folder):
fit = get_best_fit(os.path.join(folder, cell), use_comparable_error=False)
model = fit.get_model()
baseline = BaselineModel(model, eod_frequency=fit.get_cell_data().get_eod_frequency(), trials=3)
baseline.plot_serial_correlation(3)
continue
cell_data = fit.get_cell_data()
model = fit.get_model()
fi = FICurveModel(model, np.arange(-0.5, 0.6, 0.1), cell_data.get_eod_frequency())
times, freq = fi.get_mean_time_and_freq_traces()
for i in range(len(times)):
plt.plot(freq[i])
plt.plot(fi.indices_f_zero[i], [fi.f_zero_frequencies[i]]*len(fi.indices_f_zero[i]), "o")
plt.plot(fi.indices_f_baseline[i], 2*[fi.f_baseline_frequencies[i]])
plt.plot(fi.indices_f_inf[i], 2*[fi.f_inf_frequencies[i]])
plt.show()
plt.close()
# fit.generate_master_plot("./results/invivo-1_best/")
# fit.generate_master_plot()
quit()
for item in os.listdir(folder):
fit_path = os.path.join(folder, item)
fit = ModelFit(fit_path)
print(fit.get_fit_routine_error())
fit.generate_master_plot()
quit()
def indices_of_peaks_of_distribution(y_values, stepsize_ms, eod_freq):
eod_freq_ms = eod_freq / 1000
distance = int(0.75*eod_freq_ms / stepsize_ms)
print(distance*stepsize_ms)
peaks, _ = find_peaks(np.array(y_values), distance=distance)
return peaks
def remove_close_peaks(maxima_idx, peaks, closeness=2):
to_del = []
maxima_idx = list(maxima_idx)
for idx in maxima_idx:
for i in range(-1*closeness,closeness+1, 1):
if 0 <= idx + i < len(peaks):
if peaks[idx+i] > peaks[idx]:
to_del.append(idx)
break
for val in to_del:
maxima_idx.remove(val)
return maxima_idx
def find_local_maxima(values):
local_max_idx = []
for i in range(len(values)):
maxima = True
for j in (-1, 1):
if 0 <= i+j < len(values):
if values[i+j] > values[i]:
maxima = False
break
else:
continue
if maxima:
local_max_idx.append(i)
return local_max_idx
def rms(array):
square = np.array(array)**2
return np.sqrt(np.mean(square))
def perc_smaller_value(isis, value):
isis = np.array(isis)
fullfilled = isis < value
return np.sum(fullfilled) / len(fullfilled)
cell_datas = [] # [CellData("data/invivo/2014-12-03-ad-invivo-1/")]
for cell_data in icelldata_of_dir("data/invivo/", test_for_v1_trace=False):
cell_datas.append(cell_data)
for cell_data in icelldata_of_dir("data/invivo_bursty/"):
cell_datas.append(cell_data)
burstiness = []
for cell_data in cell_datas:
base = BaselineCellData(cell_data)
burstiness.append(base.get_burstiness())
cell_data_idx = np.arange(0, len(cell_datas), 1)
burstiness, cell_data_idx = (list(t) for t in zip(*sorted(zip(burstiness, cell_data_idx))))
for i in range(len(burstiness)):
base = BaselineCellData(cell_datas[cell_data_idx[i]])
isis = np.array(base.get_interspike_intervals()) * 1000
bins = np.arange(0, 30.1, 0.2)
plt.hist(isis, bins=bins)
plt.title(str(burstiness[i]))
plt.show()
quit()
for cell_data in cell_datas:
base = BaselineCellData(cell_data)
isis = np.array(base.get_interspike_intervals()) * 1000
eod_freq = cell_data.get_eod_frequency()
bins = np.arange(0, 30.1, 0.2)
# y_values = plt.hist(isis, bins=bins, cumulative=True, density=True, alpha=0.5)
# y_values2 = plt.hist(isis, bins=bins, density=True)
value = perc_smaller_value(isis, 2.5/(eod_freq/1000)) * np.mean(isis)
dif_mean_median.append(value)
# plt.title("Diff % < 2.5eod / mean= {:.2f}".format(value))
# peaks, _ = detect_peaks(y_values[0], 0.5*np.std(y_values[0]))
# hist_x = bins[peaks]
# hist_peaks = y_values[0][peaks]
# plt.plot(hist_x, hist_peaks, '+')
# plt.plot([2.5/(eod_freq/1000)]*2, (0, 1), ":", color="black")
# plt.plot([np.median(isis)]*2, (0, 1), "--", color="darkblue")
# plt.plot([np.mean(isis)]*2, (0, 1), "--", color="darkgreen")
# plt.plot([rms(isis)]*2, (0, 1), "--", color="red")
if value < 1:
cells_sorted["below_one"].append(cell_data)
elif value < 3:
cells_sorted["below_three"].append(cell_data)
else:
cells_sorted["other"].append(cell_data)
count = 0
for cell_data in cells_sorted["below_one"]:
count += 1
if count <= 10:
base = BaselineCellData(cell_data)
isis = np.array(base.get_interspike_intervals()) * 1000
eod_freq = cell_data.get_eod_frequency()
value = perc_smaller_value(isis, 2.5 / (eod_freq / 1000)) * np.mean(isis)
bins = np.arange(0, 30.1, 0.2)
plt.title("Value < 1: {:.2f}".format(value))
plt.hist(isis, bins=bins, density=True)
plt.show()
plt.close()
count = 0
for cell_data in cells_sorted["below_three"]:
count += 1
if count <= 10:
base = BaselineCellData(cell_data)
isis = np.array(base.get_interspike_intervals()) * 1000
eod_freq = cell_data.get_eod_frequency()
value = perc_smaller_value(isis, 2.5 / (eod_freq / 1000)) * np.mean(isis)
bins = np.arange(0, 30.1, 0.2)
plt.title("1 < Value < 3: {:.2f}".format(value))
plt.hist(isis, bins=bins, density=True)
plt.show()
plt.close()
count = 0
for cell_data in cells_sorted["other"]:
count += 1
if count <= 10:
base = BaselineCellData(cell_data)
isis = np.array(base.get_interspike_intervals()) * 1000
eod_freq = cell_data.get_eod_frequency()
value = perc_smaller_value(isis, 2.5 / (eod_freq / 1000)) * np.mean(isis)
bins = np.arange(0, 30.1, 0.2)
plt.title("Value >=3: {:.2f}".format(value))
plt.hist(isis, bins=bins, density=True)
plt.show()
plt.close()
print("< one:", len(cells_sorted["below_one"]))
print("< three:", len(cells_sorted["below_three"]))
print("< more:", len(cells_sorted["other"]))
quit()
for cell_data in icelldata_of_dir("data/"):
baseline = get_baseline_class(cell_data)
baseline.get_burstiness()

View File

@ -32,8 +32,8 @@
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Cell recordings}{5}{subsection.4.1}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Stimulus Protocols}{6}{subsection.4.2}}
\newlabel{eq:am_generation}{{1}{6}{Stimulus Protocols}{equation.4.1}{}}
\newlabel{fig:stim_examples}{{2}{6}{Example of the stimulus construction. At the top a recording of the fish's EOD. In the middle a part of the recording multiplied with the AM, a step with a contrast of 130\% between 0 and 50\,ms (marked in \todo {color}). At the bottom the resulting stimulus trace when the AM is added to the EOD. This example stimulus is for visualization purposes 50\,ms short. During the measurements the stimulus was 0.4\,s or 1\,s long}{figure.2}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Example of the stimulus construction. At the top a recording of the fish's EOD. In the middle a part of the recording multiplied with the AM, a step with a contrast of 130\% between 0 and 50\tmspace +\thinmuskip {.1667em}ms (marked in {\color {red}(TODO: color)}). At the bottom the resulting stimulus trace when the AM is added to the EOD. This example stimulus is for visualization purposes 50\tmspace +\thinmuskip {.1667em}ms short. During the measurements the stimulus was 0.4\tmspace +\thinmuskip {.1667em}s or 1\tmspace +\thinmuskip {.1667em}s long. }}{6}{figure.2}}
\newlabel{fig:stim_examples}{{2}{6}{Example of the stimulus construction. At the top a recording of the fish's EOD. In the middle: EOD recording multiplied with the AM, with a step from 0 to a contrast of 30\,\% between 0 and 50\,ms (marked in \todo {color}). At the bottom the resulting stimulus trace when the AM is added to the EOD. \todo {Umformulieren}}{figure.2}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Example of the stimulus construction. At the top a recording of the fish's EOD. In the middle: EOD recording multiplied with the AM, with a step from 0 to a contrast of 30\tmspace +\thinmuskip {.1667em}\% between 0 and 50\tmspace +\thinmuskip {.1667em}ms (marked in {\color {red}(TODO: color)}). At the bottom the resulting stimulus trace when the AM is added to the EOD. {\color {red}(TODO: Umformulieren)}}}{6}{figure.2}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Cell Characteristics}{6}{subsection.4.3}}
\newlabel{eq:CV}{{2}{7}{Cell Characteristics}{equation.4.2}{}}
\newlabel{eq:VS}{{3}{7}{Cell Characteristics}{equation.4.3}{}}
@ -42,6 +42,7 @@
\newlabel{fig:f_point_detection}{{3}{8}{\todo {place right in text}On the left: The averaged response of a cell to a step in EOD amplitude. The beginning (at 0\,s) and end (at 1\,s) of the stimulus are marked by the gray lines. The detected values for the onset ($f_0$) and steady-state ($f_{inf}$) response are marked in \todo {color}. $f_0$ is detected as the highest deviation from the mean frequency before the stimulus while $f_{inf}$ is the average frequency in the 0.1\,s time window, 25\,ms before the end of the stimulus. On the right: The fi-curve visualizes the onset and steady-state response of the neuron for different stimuli contrasts. In \todo {color} the detected onset responses and the fitted Boltzmann, in \todo {color} the detected steady-state response and the linear fit}{figure.3}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Leaky Integrate and Fire Model}{8}{subsection.4.4}}
\citation{benda2010linear}
\citation{benda2005spike}
\newlabel{eq:basic_voltage_dynamics}{{5}{9}{Leaky Integrate and Fire Model}{equation.4.5}{}}
\newlabel{eq:adaption_dynamics}{{6}{9}{Leaky Integrate and Fire Model}{equation.4.6}{}}
\newlabel{eq:currents_lifac}{{7}{9}{Leaky Integrate and Fire Model}{equation.4.7}{}}
@ -55,11 +56,12 @@
\@writefile{toc}{\contentsline {subsection}{\numberline {4.5}Fitting of the Model}{11}{subsection.4.5}}
\citation{gao2012implementing}
\bibdata{citations}
\bibcite{benda2010linear}{{1}{2010}{{Benda et~al.}}{{}}}
\bibcite{gao2012implementing}{{2}{2012}{{Gao and Han}}{{}}}
\bibcite{todd1999identification}{{3}{1999}{{Todd and Andrews}}{{}}}
\bibcite{walz2013Phd}{{4}{2013}{{Walz}}{{}}}
\bibcite{walz2014static}{{5}{2014}{{Walz et~al.}}{{}}}
\bibcite{benda2005spike}{{1}{2005}{{Benda et~al.}}{{}}}
\bibcite{benda2010linear}{{2}{2010}{{Benda et~al.}}{{}}}
\bibcite{gao2012implementing}{{3}{2012}{{Gao and Han}}{{}}}
\bibcite{todd1999identification}{{4}{1999}{{Todd and Andrews}}{{}}}
\bibcite{walz2013Phd}{{5}{2013}{{Walz}}{{}}}
\bibcite{walz2014static}{{6}{2014}{{Walz et~al.}}{{}}}
\bibstyle{apalike}
\@writefile{toc}{\contentsline {section}{\numberline {5}Results}{12}{section.5}}
\@writefile{toc}{\contentsline {section}{\numberline {6}Discussion}{12}{section.6}}

View File

@ -1,5 +1,11 @@
\begin{thebibliography}{}
\bibitem[Benda et~al., 2005]{benda2005spike}
Benda, J., Longtin, A., and Maler, L. (2005).
\newblock Spike-frequency adaptation separates transient communication signals
from background oscillations.
\newblock {\em Journal of Neuroscience}, 25(9):2312--2321.
\bibitem[Benda et~al., 2010]{benda2010linear}
Benda, J., Maler, L., and Longtin, A. (2010).
\newblock Linear versus nonlinear signal transmission in neuron models with

View File

@ -1,46 +1,46 @@
This is BibTeX, Version 0.99d (TeX Live 2017/Debian)
Capacity: max_strings=100000, hash_size=100000, hash_prime=85009
This is BibTeX, Version 0.99d (TeX Live 2015/Debian)
Capacity: max_strings=35307, hash_size=35307, hash_prime=30011
The top-level auxiliary file: Masterthesis.aux
The style file: apalike.bst
Database file #1: citations.bib
You've used 5 entries,
You've used 6 entries,
1935 wiz_defined-function locations,
508 strings with 4630 characters,
and the built_in function-call counts, 1968 in all, are:
= -- 199
> -- 69
517 strings with 4845 characters,
and the built_in function-call counts, 2415 in all, are:
= -- 244
> -- 87
< -- 3
+ -- 24
- -- 22
* -- 176
:= -- 346
add.period$ -- 15
call.type$ -- 5
change.case$ -- 35
chr.to.int$ -- 5
cite$ -- 5
duplicate$ -- 68
empty$ -- 140
format.name$ -- 31
if$ -- 375
+ -- 30
- -- 28
* -- 219
:= -- 422
add.period$ -- 18
call.type$ -- 6
change.case$ -- 43
chr.to.int$ -- 6
cite$ -- 6
duplicate$ -- 82
empty$ -- 172
format.name$ -- 38
if$ -- 461
int.to.chr$ -- 1
int.to.str$ -- 0
missing$ -- 4
newline$ -- 28
num.names$ -- 15
pop$ -- 24
missing$ -- 5
newline$ -- 33
num.names$ -- 18
pop$ -- 30
preamble$ -- 1
purify$ -- 36
purify$ -- 44
quote$ -- 0
skip$ -- 53
skip$ -- 63
stack$ -- 0
substring$ -- 168
swap$ -- 5
substring$ -- 211
swap$ -- 6
text.length$ -- 0
text.prefix$ -- 0
top$ -- 0
type$ -- 30
type$ -- 36
warning$ -- 0
while$ -- 18
while$ -- 22
width$ -- 0
write$ -- 67
write$ -- 80

View File

@ -1,4 +1,4 @@
This is pdfTeX, Version 3.14159265-2.6-1.40.16 (TeX Live 2015/Debian) (preloaded format=pdflatex 2018.11.12) 1 SEP 2020 15:07
This is pdfTeX, Version 3.14159265-2.6-1.40.16 (TeX Live 2015/Debian) (preloaded format=pdflatex 2018.11.12) 4 SEP 2020 10:57
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.
@ -520,12 +520,9 @@ LaTeX Font Info: Font shape `OMS/cmr/m/n' in size <12> not available
File: figures/p_unit_example.png Graphic file (type png)
<use figures/p_unit_example.png>
Package pdftex.def Info: figures/p_unit_example.png used on input line 129.
Package pdftex.def Info: figures/p_unit_example.png used on input line 133.
(pdftex.def) Requested size: 409.71692pt x 409.73999pt.
[3] [4 <./figures/p_unit_example.png>]
(/usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-32.def
File: uni-32.def 2013/05/13 UCS: Unicode data U+2000..U+20FF
)
(/usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-34.def
File: uni-34.def 2013/05/13 UCS: Unicode data U+2200..U+22FF
) [5]
@ -533,57 +530,57 @@ File: uni-34.def 2013/05/13 UCS: Unicode data U+2200..U+22FF
File: figures/amGeneration.pdf Graphic file (type pdf)
<use figures/amGeneration.pdf>
Package pdftex.def Info: figures/amGeneration.pdf used on input line 188.
Package pdftex.def Info: figures/amGeneration.pdf used on input line 192.
(pdftex.def) Requested size: 204.85846pt x 204.86006pt.
File: figures/amGeneration.pdf Graphic file (type pdf)
<use figures/amGeneration.pdf>
Package pdftex.def Info: figures/amGeneration.pdf used on input line 188.
Package pdftex.def Info: figures/amGeneration.pdf used on input line 192.
(pdftex.def) Requested size: 204.85846pt x 204.86006pt.
File: figures/amGeneration.pdf Graphic file (type pdf)
<use figures/amGeneration.pdf>
Package pdftex.def Info: figures/amGeneration.pdf used on input line 188.
Package pdftex.def Info: figures/amGeneration.pdf used on input line 192.
(pdftex.def) Requested size: 204.85846pt x 204.86006pt.
[6 <./figures/amGeneration.pdf>]
<figures/f_point_detection.png, id=154, 433.62pt x 289.08pt>
<figures/f_point_detection.png, id=155, 433.62pt x 289.08pt>
File: figures/f_point_detection.png Graphic file (type png)
<use figures/f_point_detection.png>
Package pdftex.def Info: figures/f_point_detection.png used on input line 264.
Package pdftex.def Info: figures/f_point_detection.png used on input line 267.
(pdftex.def) Requested size: 301.37201pt x 208.5021pt.
[7] [8 <./figures/f_point_detection.png>]
<figures/model_comparison.pdf, id=185, 578.16pt x 578.16pt>
<figures/model_comparison.pdf, id=187, 578.16pt x 578.16pt>
File: figures/model_comparison.pdf Graphic file (type pdf)
<use figures/model_comparison.pdf>
Package pdftex.def Info: figures/model_comparison.pdf used on input line 312.
Package pdftex.def Info: figures/model_comparison.pdf used on input line 315.
(pdftex.def) Requested size: 346.89867pt x 346.89867pt.
[9]
<figures/stimulus_development.pdf, id=196, 433.62pt x 433.62pt>
<figures/stimulus_development.pdf, id=200, 433.62pt x 433.62pt>
File: figures/stimulus_development.pdf Graphic file (type pdf)
<use figures/stimulus_development.pdf>
Package pdftex.def Info: figures/stimulus_development.pdf used on input line 33
0.
3.
(pdftex.def) Requested size: 260.17401pt x 260.17401pt.
[10 <./figures/model_comparison.pdf>]
[11 <./figures/stimulus_development.pdf>] (./Masterthesis.bbl)
Package atveryend Info: Empty hook `BeforeClearDocument' on input line 412.
Package atveryend Info: Empty hook `BeforeClearDocument' on input line 414.
[12]
Package atveryend Info: Empty hook `AfterLastShipout' on input line 412.
Package atveryend Info: Empty hook `AfterLastShipout' on input line 414.
(./Masterthesis.aux)
Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 412.
Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 412.
Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 414.
Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 414.
Package rerunfilecheck Info: File `Masterthesis.out' has not changed.
(rerunfilecheck) Checksum: 7A3ACD7CD7DC89195072057BF8EFCD4A;622.
Package atveryend Info: Empty hook `AtVeryVeryEnd' on input line 412.
Package atveryend Info: Empty hook `AtVeryVeryEnd' on input line 414.
)
Here is how much of TeX's memory you used:
11126 strings out of 493029
158645 string characters out of 6136233
267205 words of memory out of 5000000
14484 multiletter control sequences out of 15000+600000
10879 strings out of 493029
155891 string characters out of 6136233
264880 words of memory out of 5000000
14239 multiletter control sequences out of 15000+600000
9214 words of font info for 33 fonts, out of 8000000 for 9000
1141 hyphenation exceptions out of 8191
37i,16n,38p,1246b,551s stack positions out of 5000i,500n,10000p,200000b,80000s
@ -598,10 +595,10 @@ ublic/amsfonts/cm/cmr8.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/am
sfonts/cm/cmsy10.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts
/cm/cmsy8.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmt
i12.pfb>
Output written on Masterthesis.pdf (13 pages, 328132 bytes).
Output written on Masterthesis.pdf (13 pages, 327767 bytes).
PDF statistics:
341 PDF objects out of 1000 (max. 8388607)
218 compressed objects within 3 object streams
49 named destinations out of 1000 (max. 500000)
345 PDF objects out of 1000 (max. 8388607)
222 compressed objects within 3 object streams
50 named destinations out of 1000 (max. 500000)
114 words of extra memory for PDF output out of 10000 (max. 10000000)

Binary file not shown.

Binary file not shown.

View File

@ -117,6 +117,10 @@ Außerdem erkläre ich, dass die eingereichte Arbeit weder vollständig noch in
\section{Introduction}
%\begin{figure}[H]
%\floatbox[{\capbeside\thisfloatsetup{capbesideposition={left,top},capbesidewidth=0.49\textwidth}}]{figure}[\FBwidth]
%{\caption{\label{fig:p_unit_example} Example behavior of a p-unit with a high baseline firing rate. Baseline Firing: A 100\,ms voltage trace of the recording with spikes marked by the black lines. ISI-histogram: The histogram of the ISI with the x-axis in EOD periods, showing the phase locking of the firing. Serial Correlation: The serial correlation of the ISI showing a negative correlation for lags one and two. Step Response: The response of the p-unit to a step increase in EOD amplitude. In \todo{color} the averaged frequency over 10 trials and in \todo{color} smoothed with an running average with a window of 10\,ms. The p-unit strongly reacts to the onset of the stimulus but very quickly adapts to the new stimulus and then shows a steady state response. FI-Curve: The fi-curve visualizes the onset and steady-state response of the neuron for different step sizes (contrasts). In \todo{color} the detected onset responses and the fitted Boltzmann, in %\todo{color} the detected steady-state response and the linear fit.}}
@ -150,16 +154,16 @@ Außerdem erkläre ich, dass die eingereichte Arbeit weder vollständig noch in
% EOD-freq: min 601.09, mean 753.09, max 928.45, std 82.30
% Sizes: min 11.00, mean 15.78, max 25.00, std 3.48
The cell recordings for this master thesis were collected as part of other previous studies (\cite{walz2013Phd}, \citep{walz2014static})\todo{ref other studies} and is described there but will also be repeated below. The recordings of 457 p-units were inspected. Of those 88 fulfilled the basic necessary requirements: including a measurement of at least 30 seconds of the baseline behavior and containing at least 7 different contrasts with each at least 7 trials for the FI-Curve (see below \todo{ref fi-curve? }). After pre-analysis of those cells an additional 13 cells were excluded because of analysis difficulties.
The cell recordings for this master thesis were collected as part of other previous studies (\cite{walz2013Phd}, \citep{walz2014static})\todo{ref other studies} and the recording procedure is described there but will also be repeated below. The recordings of altogether 457 p-units were inspected. Of those 88 fulfilled basic necessary requirements: including a measurement of at least 30 seconds of baseline behavior and containing at least 7 different contrasts with each at least 7 trials for the f-I curve (see below \todo{ref fi-curve? }). After pre-analysis of those cells an additional 15 cells were excluded because of spike detection difficulties.
The 75 used cells came from 32 \AptLepto (brown ghost knifefish). The fish were between 11-25\,cm long (15.78 $\pm$ 3.48\,cm) and their electric organ discharge (EOD) frequencies were between 601-928\,Hz (753.1 $\pm$ 82.3\,Hz). The gender of the fish was not determined.
The 73 used cells came from 32 \AptLepto (brown ghost knifefish). The fish were between 11--25\,cm long (15.8 $\pm$ 3.5\,cm) and their electric organ discharge (EOD) frequencies ranged between 601 and 928\,Hz (753 $\pm$ 82\,Hz). The sex of the fish was not determined.
The in vivo intracellular recordings of P-unit electroreceptors were done in the lateral line nerve . The fish were anesthetized with MS-222 (100-130 mg/l; PharmaQ; Fordingbridge, UK) and the part of the skin covering the lateral line just behind the skull was removed, while the area was anesthetized with Lidocaine (2\%; bela-pharm; Vechta, Germany). The fish were immobilized for the recordings with Tubocurarine (Sigma-Aldrich; Steinheim, Germany, 2550\,$\mu l$ of 5\,mg/ml solution) and placed in the experimental tank (47 $\times$ 42 $\times$ 12\,cm) filled with water from the fish's home tank with a conductivity of about 300$\mu$\,S/cm and the temperature was around 28°C.
All experimental protocols were approved and complied with national and regional laws (files: no. 55.2-1-54-2531-135-09 and Regierungspräsidium Tübingen no. ZP 1/13 and no. ZP 1/16 \todo{andere antrags nummern so richtig ?})
For the recordings a standard glass mircoelectrode (borosilicate; 1.5 mm outer diameter; GB150F-8P, Science Products, Hofheim, Germany) was used. They were pulled to a resistance of 50-100\,M$\Omega$ using Model P-97 from Sutter Instrument Co. (Novato, CA, USA) and filled with 1\,M KCl solution. The electrodes were controlled using microdrives (Luigs-Neumann; Ratingen, Germany) and the potentials recorded with the bridge mode of the SEC-05 amplifier (npi-electronics GmbH, Tamm, Germany) and lowpass filtered at 10 kHz.
The in vivo intracellular recordings of P-unit electroreceptors were done in the lateral line nerve. The fish were anesthetized with MS-222 (100-130 mg/l; PharmaQ; Fordingbridge, UK) and the part of the skin covering the lateral line just behind the skull was removed, while the area was anesthetized with Lidocaine (2\%; bela-pharm; Vechta, Germany). The fish were immobilized for the recordings with Tubocurarine (Sigma-Aldrich; Steinheim, Germany, 25--50\,$\mu l$ of 5\,mg/ml solution) and placed in the experimental tank (47 $\times$ 42 $\times$ 12\,cm) filled with water from the fish's home tank with a conductivity of about 300$\mu$\,S/cm and the temperature was around 28°C.
All experimental protocols were approved and complied with national and regional laws (files: no. 55.2-1-54-2531-135-09 and Regierungspräsidium Tübingen no. ZP 1/13 and no. ZP 1/16)
For the recordings a standard glass mircoelectrode (borosilicate; 1.5 mm outer diameter; GB150F-8P, Science Products, Hofheim, Germany) was used. They were pulled to a resistance of 50--100\,M$\Omega$ using Model P-97 from Sutter Instrument Co. (Novato, CA, USA) and filled with 1\,M KCl solution. The electrodes were controlled using microdrives (Luigs-Neumann; Ratingen, Germany) and the potentials recorded with the bridge mode of the SEC-05 amplifier (npi-electronics GmbH, Tamm, Germany) and lowpass filtered at 10 kHz.
During the recording spikes were detected online using the peak detection algorithm from \cite{todd1999identification}. It uses a dynamically adjusted threshold value above the previously detected trough. To detect spikes through changes in amplitude the threshold was set to 50\% of the amplitude of a detected spike while keeping the threshold above a minimum set to be higher than the noise level based on a histogram of all peak amplitudes. Trials with bad spike detection were removed from further analysis.
The fish's EOD was recorded using using two vertical carbon rods (11\,cm long, 8\,mm diameter) positioned in front of the head and behind its tail. The signal was amplified 200 to 500 times and band-pass filtered (3 1500 Hz passband, DPA2-FX, npi-electronics, Tamm, Germany). The electrodes were placed on iso-potential lines of the stimulus field to reduce the interference of the stimulus in the recording. All signals were digitized using a data acquisition board (PCI-6229; National Instruments, Austin TX, USA) at a sampling rate of 20-100\,kHz (54 cells at 20\,kHz, 20 at 100\,kHz and 1 at 40\,kHz)
The fish's EOD was recorded using two vertical carbon rods (11\,cm long, 8\,mm diameter) positioned in front of the head and behind its tail. The signal was amplified 200 to 500 times and band-pass filtered (3 1500 Hz passband, DPA2-FX, npi-electronics, Tamm, Germany). The electrodes were placed on iso-potential lines of the stimulus field to reduce the interference of the stimulus in the recording. All signals were digitized using a data acquisition board (PCI-6229; National Instruments, Austin TX, USA) at a sampling rate of 20--100\,kHz (54 cells at 20\,kHz, 20 at 100\,kHz and 1 at 40\,kHz)
The recording and stimulation was done using the ephys, efield, and efish plugins of the software RELACS (\href{www.relacs.net}{www.relacs.net}). It allowed the online spike and EOD detection, pre-analysis and visualization and ran on a Debian computer.
@ -171,25 +175,25 @@ The recording and stimulation was done using the ephys, efield, and efish plugin
% image of SAM stimulus
The stimuli used during the recordings were presented from two vertical carbon rods (30 cm long, 8 mm diameter) as stimulus electrodes. They were positioned at either side of the fish parallel to its longitudinal axis. The stimuli were computer generated, attenuated and isolated (Attenuator: ATN-01M, Isolator: ISO-02V, npi-electronics, Tamm, Germany) and then send to the stimulus electrodes.
For this work two types of recordings were made with all cells: baseline recordings and amplitude step recordings for the frequency-Intensity curve (FI-Curve).
For this work two types of recordings were made with all cells: baseline recordings and amplitude step recordings for the frequency-Intensity curve (f-I curve).
The 'stimulus' for the baseline recording is purely the EOD field the fish produces itself with no external stimulus.
The amplitude step stimulus here is a step in EOD amplitude. To be able to cause an amplitude modulation (AM) in the fish's EOD , the EOD was recorded and the multiplied with the modulation (see fig. \ref{fig:stim_examples}). This modified EOD can then be presented at the right phase with the stimulus electrodes, causing constructive interference and adding the used amplitude modulation to the EOD (Fig. \ref{fig:stim_examples}). This stimuli construction as seen in equation \ref{eq:am_generation} works for any AM as long as the EOD of the fish is stable.
The amplitude step stimulus here is a step in EOD amplitude. To be able to cause an amplitude modulation (AM) in the fish's EOD , the EOD was recorded and multiplied with the modulation (see fig. \ref{fig:stim_examples}). This modified EOD can then be presented at the right phase with the stimulus electrodes, causing constructive interference and adding the used amplitude modulation to the EOD (Fig. \ref{fig:stim_examples}). This stimuli construction as seen in equation~\ref{eq:am_generation} works for any AM as long as the EOD of the fish is stable.
\begin{equation}
Stimulus = EOD(t) + AM(t) * EOD(t) \todo{acceptable?}
V_{Stim}(t) = EOD(t)(1 + AM(t))
\label{eq:am_generation}
\end{equation}
\begin{figure}[H]
\floatbox[{\capbeside\thisfloatsetup{capbesideposition={left, center}, capbesidewidth=0.45\textwidth}}]{figure}[\FBwidth]
{\caption{\label{fig:stim_examples} Example of the stimulus construction. At the top a recording of the fish's EOD. In the middle a part of the recording multiplied with the AM, a step with a contrast of 130\% between 0 and 50\,ms (marked in \todo{color}). At the bottom the resulting stimulus trace when the AM is added to the EOD. This example stimulus is for visualization purposes 50\,ms short. During the measurements the stimulus was 0.4\,s or 1\,s long. }}
{\caption{\label{fig:stim_examples} Example of the stimulus construction. At the top a recording of the fish's EOD. In the middle: EOD recording multiplied with the AM, with a step from 0 to a contrast of 30\,\% between 0 and 50\,ms (marked in \todo{color}). At the bottom the resulting stimulus trace when the AM is added to the EOD. \todo{Umformulieren}}}
{\includegraphics[width=0.45\textwidth]{figures/amGeneration.pdf}}
\end{figure}
The step stimuli all consisted of a delay of 0.2\,s followed by a 0.4\,s (n=68) or 1\,s (n=7) long step and a 0.8\,s long recovery time. The contrast range measured was for the most cells 80-120\% of EOD amplitude. Some cells were measured in a larger range up to 20-180\%. In the range at least 7 contrasts were measured with at least 7 trials, but again many cells were measured with more contrasts and trials. The additionally measured contrasts were used for the model if they had at least 3 trials.
All step stimuli consisted of a delay of 0.2\,s followed by a 0.4\,s (n=68) or 1\,s (n=7) long step and a 0.8\,s long recovery time. The contrast range measured was for the most cells 80--120\% of EOD amplitude. Some cells were measured in a larger range up to 20--180\%. In the range at least 7 contrasts were measured with at least 7 trials, but again many cells were measured with more contrasts and trials. The additionally measured contrasts were used for the model if they had at least 3 trials.
%That means for every cell the FI-Curve was measured at at least 7 Points each with at least 7 trials. If more contrasts were measured during the recording the additional information was used as long as there were at least 3 trials available.
%All presentations had 0.2\,s delay at the start and then started the stimulus at time 0. The step stimulus was presented for 0.4\,s (7 cells) or 1\,s(68 cells) and followed by 0.8\,s time for the cell to recover back to baseline.
@ -221,36 +225,35 @@ The step stimuli all consisted of a delay of 0.2\,s followed by a 0.4\,s (n=68)
\subsection{Cell Characteristics}
The cells were characterized by ten parameters: 6 for the baseline and 4 for the fi-curve.
For the baseline the mean frequency was calculated by dividing the number of spikes in the recording by the recording time. Then the set of all interspike intervals (ISI) $T$ of the spikes in the recording further parameter was calculated and the other parameters were calculated from it.
The coefficient of variation (CV) is defined as the standard deviation (STD) of $T$ divided by the mean ISI, see equation \ref{eq:CV} with angled brackets as the averaging operator.
The cells were characterized by ten parameters: 6 for the baseline and 4 for the f-I curve.
For the baseline the mean firing rate was calculated by dividing the number of spikes in the recording by the recording time. Then the set of all interspike intervals (ISI) $T$ was computed and further parameters were calculated from it.
The coefficient of variation
\begin{equation}
CV = \frac{STD(T)}{\langle T \rangle}
\label{eq:CV}
\end{equation}
is defined as the standard deviation (STD) of $T$ divided by the mean ISI, see equation \ref{eq:CV} with angled brackets as the averaging operator.
The vector strength (VS) is a measure of how strong the cell locks to a phase of the EOD. It was calculated as seen in Eq. \ref{eq:VS}, by placing each spike on a unit circle depending on the relative spike time $t_i$ of how much time has passed since the start of the current EOD period in relation to the EOD period length. This set of vectors is then averaged and the absolute value of this average vector describes the VS. If the VS is zero the spikes happen equally in all phases of the EOD while if it is one all spikes happen at the exact same phase of the EOD.
\begin{equation}
p(\omega) = \frac{1}{n} \sum_n e^{iwt_i}
vs = |\frac{1}{n} \sum_n e^{iwt_i}|
\label{eq:VS}
\end{equation}
The serial correlation with lag x ($SC_x$) of $T$ is a measure how the ISI $T_i$ (the i-th ISI) influences the $T_{i+x}$ the ISI with a lag of x intervals. This is calculated as,
The serial correlation with lag k ($SC_k$) of $T$ is a measure how the ISI $T_i$ (the $i$-th ISI) influences the $T_{i+k}$ the ISI with a lag of x intervals. This is calculated as,
\begin{equation}
SC_x = \frac{\langle (T_{i} - \langle T \rangle)(T_{i+x} - \langle T \rangle) \rangle}{\sqrt{\langle (T_i - \langle T \rangle)^2 \rangle}\sqrt{\langle (T_{i+x} - \langle T \rangle)^2 \rangle}}
SC_k = \frac{\langle (T_{i} - \langle T \rangle)(T_{i+k} - \langle T \rangle) \rangle}{\sqrt{\langle (T_i - \langle T \rangle)^2 \rangle}\sqrt{\langle (T_{i+k} - \langle T \rangle)^2 \rangle}}
\label{eq:SC}
\end{equation}
with the angled brackets again the averaging operator.
Finally the ISI-histogram was calculated within a range of 0-50\,ms and a bin size of 0.1\,ms and the burstiness was calculated as the percentage of ISI smaller than 2.5 EOD periods multiplied by the average ISI. This gives a rough measure of how how often a cell fires in the immediately following EOD periods compared to its average firing frequency. With a cell being more bursty the higher the percentage of small ISI and the lower the mean firing frequency of the cell.
Finally the ISI-histogram was calculated within a range of 0--50\,ms and a bin size of 0.1\,ms. The burstiness was calculated as the percentage of ISI smaller than 2.5 EOD periods multiplied by the average ISI. This gives a rough measure of how how often a cell fires in the immediately following EOD periods compared to its average firing frequency. With a cell being more bursty the higher the percentage of small ISI and the lower the mean firing frequency of the cell.
%burstiness: \todo{how to write as equation, ignore and don't show an equation?}
@ -289,7 +292,7 @@ The next slightly more complex model is the leaky integrate-and-fire (LIF) model
\label{eq:basic_voltage_dynamics}
\end{equation}
To reproduce this behavior the model needs some form of memory of previous spikes. There are two main ways this can be added to the model as an adaptive current or a dynamic threshold. The biophysical mechanism of the adaption in p-units is unknown because the cell bodies are not accessible for intra-cellular recordings. Following the results of \cite{benda2010linear} a negative adaptive current was chosen, because the dynamic threshold causes divisive adaption instead of the subtractive adaption of p-units \todo{reference}. This results in an leaky integrate-and-fire model with adaption current (LIFAC) (fig. \ref{fig:model_comparison} LIFAC). The added adaptive current follow the dynamics:
To reproduce this behavior the model needs some form of memory of previous spikes. There are two main ways this can be added to the model as an adaptive current or a dynamic threshold. The biophysical mechanism of the adaption in p-units is unknown because the cell bodies are not accessible for intra-cellular recordings. Following the results of \cite{benda2010linear} a negative adaptive current was chosen, because the dynamic threshold causes divisive adaption instead of the subtractive adaption of p-units \citep{benda2005spike}. This results in an leaky integrate-and-fire model with adaption current (LIFAC) (fig. \ref{fig:model_comparison} LIFAC). The added adaptive current follow the dynamics:
\begin{equation}
\tau_A \frac{dI_A}{dt} = -I_A + \Delta_A \sum \delta (t)
@ -305,7 +308,7 @@ It is modeled as an exponential decay with the time constant $\tau_A$ and a stre
The stimulus current $I_{Input}$, the bias current $I_{Bias}$ and the already discussed adaption current $I_A$. Note that in this p-unit model all currents are measured in mV because as mentioned above the cell body is not accessible for intra-cellular recordings and as such the membrane resistance $R_m$ is unknown \todo{ref mem res p-units}. $I_{Input}$ is the current of the stimulus, an amplitude modulated sine wave mimicking the frequency EOD. This stimulus is then rectified to model the receptor synapse and low-pass filtered with a time constant of $\tau_{dend}$ to simulate the low-pass filter properties of the dendrite (fig. \ref{fig:stim_development}). Afterwards it is multiplied with $\alpha$ a cell specific gain factor. This gain factor has the unit of cm because the $I_{Input}$ stimulus represents the EOD with a unit of mV/cm. $I_{Bias}$ is the bias current that causes the cells spontaneous spiking.
Finally noise and an absolute refractory period were added to the model. The noise $\xi$ is drawn in from a Gaussian noise with values between 0 and 1 and divided by $\sqrt{\Delta t}$ to get a noise which autocorrelation function is independent of the simulation step size $\Delta t$. The implemented form of the absolute refractory period $t_{ref}$ keeps the model voltage at zero for the duration of $t_{ref}$ after a spike.
Finally noise and an absolute refractory period were added to the model. The noise $\xi$ is drawn in from a Gaussian noise distribution and divided by $\sqrt{\Delta t}$ to get a noise which autocorrelation function is independent of the simulation step size $\Delta t$. The implemented form of the absolute refractory period $t_{ref}$ keeps the model voltage at zero for the duration of $t_{ref}$ after a spike.
\begin{figure}[H]
@ -369,7 +372,6 @@ The error of the VS, CV, SC, and burstiness was calculated as the scaled absolut
\begin{equation}
err_i = |x^M_i - x^C_i| * c_i
\end{equation}
with $x^M_i$ the model value for the characteristic $i$, $x^C_i$ the corresponding cell value and $c_i$ a scaling factor that is the same for all cells but different between characteristics. The scaling factor was used to make all errors a similar size.
The error for the slope of the $f_{inf}$ fit was the scaled relative difference: