various
This commit is contained in:
parent
5d009b29d7
commit
f08f962bf3
@ -291,7 +291,14 @@ class DatParser(AbstractParser):
|
||||
else:
|
||||
print("DataParser Dat: Unknown time notation:", key[1][0])
|
||||
if len(metadata) != 0:
|
||||
if not "----- Stimulus -------------------------------------------------------" in metadata[0].keys():
|
||||
eod_freq = float(metadata[0]["EOD rate"][:-2]) # in Hz
|
||||
trans_amplitude = metadata[0]["trans. amplitude"][:-2] # in mV
|
||||
|
||||
duration = float(metadata[0]["duration"][:-2]) * factor # normally saved in ms? so change it with the factor
|
||||
contrast = float(metadata[0]["contrast"][:-1]) # in percent
|
||||
delta_f = float(metadata[0]["deltaf"][:-2])
|
||||
else:
|
||||
stimulus_dict = metadata[0]["----- Stimulus -------------------------------------------------------"]
|
||||
analysis_dict = metadata[0]["----- Analysis -------------------------------------------------------"]
|
||||
eod_freq = float(metadata[0]["EOD rate"][:-2]) # in Hz
|
||||
@ -427,77 +434,6 @@ class DatParser(AbstractParser):
|
||||
# if not exists(self.sam_file):
|
||||
# raise RuntimeError(self.sam_file + " file doesn't exist!")
|
||||
|
||||
# MODEL PARSER: ------------------------------
|
||||
|
||||
|
||||
class ModelParser(AbstractParser):
|
||||
|
||||
def __init__(self, model: AbstractModel):
|
||||
self.model = model
|
||||
|
||||
def cell_get_metadata(self):
|
||||
raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
|
||||
|
||||
def get_baseline_traces(self):
|
||||
raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
|
||||
|
||||
def get_fi_curve_traces(self):
|
||||
if not self.model.simulates_voltage_trace():
|
||||
raise NotImplementedError("Model doesn't simulated voltage traces!")
|
||||
|
||||
traces = []
|
||||
for stimulus in self.model.get_stimuli_for_fi_curve():
|
||||
self.model.simulate(stimulus, self.model.total_stimulation_time_fi_curve)
|
||||
traces.append(self.model.get_voltage_trace())
|
||||
|
||||
return traces
|
||||
|
||||
def get_fi_curve_spiketimes(self):
|
||||
if not self.model.simulates_spiketimes():
|
||||
raise NotImplementedError("Model doesn't simulated spiketimes!")
|
||||
|
||||
all_spiketimes = []
|
||||
for stimulus in self.model.get_stimuli_for_fi_curve():
|
||||
self.model.simulate(stimulus, self.model.total_stimulation_time_fi_curve)
|
||||
all_spiketimes.append(self.model.get_spiketimes())
|
||||
|
||||
return all_spiketimes
|
||||
|
||||
def get_fi_frequency_traces(self):
|
||||
if not self.model.simulates_frequency():
|
||||
raise NotImplementedError("Model doesn't simulated frequency!")
|
||||
|
||||
frequency_traces = []
|
||||
for stimulus in self.model.get_stimuli_for_fi_curve():
|
||||
self.model.simulate(stimulus, self.model.total_stimulation_time_fi_curve)
|
||||
frequency_traces.append(self.model.get_frequency())
|
||||
|
||||
return frequency_traces
|
||||
|
||||
def get_sampling_interval(self):
|
||||
self.model.get_sampling_interval()
|
||||
|
||||
def get_recording_times(self):
|
||||
raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
|
||||
|
||||
def traces_available(self) -> bool:
|
||||
return self.model.simulates_voltage_trace()
|
||||
|
||||
def spiketimes_available(self) -> bool:
|
||||
return self.model.simulates_spiketimes()
|
||||
|
||||
def frequencies_available(self) -> bool:
|
||||
return self.model.simulates_frequency()
|
||||
|
||||
# TODO ####################################
|
||||
|
||||
class NixParser(AbstractParser):
|
||||
|
||||
def __init__(self, nix_file_path):
|
||||
self.file_path = nix_file_path
|
||||
warn("NIX PARSER: NOT YET IMPLEMENTED!")
|
||||
# TODO ####################################
|
||||
|
||||
|
||||
def get_parser(data_path) -> AbstractParser:
|
||||
data_format = __test_for_format__(data_path)
|
||||
@ -505,9 +441,9 @@ def get_parser(data_path) -> AbstractParser:
|
||||
if data_format == DAT_FORMAT:
|
||||
return DatParser(data_path)
|
||||
elif data_format == NIX_FORMAT:
|
||||
return NixParser(data_path)
|
||||
raise NotImplementedError("DataParserFactory:get_parser(data_path): nix format doesn't have a parser yet")
|
||||
elif data_format == MODEL:
|
||||
return ModelParser(data_path)
|
||||
raise NotImplementedError("DataParserFactory:get_parser(data_path): Model doesn't have a parser yet")
|
||||
elif data_format == UNKNOWN:
|
||||
raise TypeError("DataParserFactory:get_parser(data_path):\nCannot determine type of data for:" + data_path)
|
||||
|
||||
|
@ -9,6 +9,7 @@ from FiCurve import FICurveModel, FICurveCellData
|
||||
from CellData import CellData
|
||||
import functions as fu
|
||||
import Figure_constants as consts
|
||||
from scipy.stats import pearsonr
|
||||
|
||||
from matplotlib.ticker import FormatStrFormatter
|
||||
|
||||
@ -39,18 +40,19 @@ def main():
|
||||
# quit()
|
||||
|
||||
fits_info = get_filtered_fit_info(dir_path, filter=True)
|
||||
# visualize_tested_correlations(fits_info)
|
||||
quit()
|
||||
print("Cells left:", len(fits_info))
|
||||
# cell_behaviour, model_behaviour = get_behaviour_values(fits_info)
|
||||
cell_behaviour, model_behaviour = get_behaviour_values(fits_info)
|
||||
# plot_cell_model_comp_baseline(cell_behaviour, model_behaviour)
|
||||
# plot_cell_model_comp_burstiness(cell_behaviour, model_behaviour)
|
||||
# plot_cell_model_comp_adaption(cell_behaviour, model_behaviour)
|
||||
|
||||
plot_cell_model_comp_adaption(cell_behaviour, model_behaviour)
|
||||
|
||||
# behaviour_correlations_plot(fits_info)
|
||||
behaviour_correlations_plot(fits_info)
|
||||
parameter_correlation_plot(fits_info)
|
||||
#
|
||||
# create_parameter_distributions(get_parameter_values(fits_info))
|
||||
create_parameter_distributions(get_parameter_values(fits_info, scaled=True, goal_eodf=800), "scaled_to_800_")
|
||||
# create_parameter_distributions(get_parameter_values(fits_info, scaled=True, goal_eodf=800), "scaled_to_800_")
|
||||
# errors = calculate_percent_errors(fits_info)
|
||||
# create_boxplots(errors)
|
||||
|
||||
@ -82,6 +84,142 @@ def run_all_images():
|
||||
example_bad_fi_fits(dir_path)
|
||||
|
||||
|
||||
def visualize_tested_correlations(fits_info):
|
||||
|
||||
for leave_out in range(1, 11, 1):
|
||||
significance_count, total_count, labels = test_correlations(fits_info, leave_out, model_values=False)
|
||||
percentages = significance_count / total_count
|
||||
border = total_count * 0.01
|
||||
fig = plt.figure(tight_layout=True, figsize=consts.FIG_SIZE_MEDIUM_WIDE)
|
||||
gs = gridspec.GridSpec(2, 2, width_ratios=(1, 1), height_ratios=(5, 0.5), hspace=0.5, wspace=0.4, left=0.2)
|
||||
|
||||
ax = fig.add_subplot(gs[0, 0])
|
||||
# We want to show all ticks...
|
||||
|
||||
ax.imshow(percentages)
|
||||
ax.set_xticks(np.arange(len(labels)))
|
||||
ax.set_xticklabels([behaviour_titles[l] for l in labels])
|
||||
# remove frame:
|
||||
ax.spines['top'].set_visible(False)
|
||||
ax.spines['right'].set_visible(False)
|
||||
# ... and label them with the respective list entries
|
||||
ax.set_yticks(np.arange(len(labels)))
|
||||
ax.set_yticklabels([behaviour_titles[l] for l in labels])
|
||||
|
||||
ax.set_title("Percent: removed {}".format(leave_out))
|
||||
|
||||
# Rotate the tick labels and set their alignment.
|
||||
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
|
||||
rotation_mode="anchor")
|
||||
|
||||
# Loop over data dimensions and create text annotations.
|
||||
for i in range(len(labels)):
|
||||
for j in range(len(labels)):
|
||||
if percentages[i, j] > 0.5:
|
||||
text = ax.text(j, i, "{:.2f}".format(percentages[i, j]), ha="center", va="center",
|
||||
color="black", size=6)
|
||||
else:
|
||||
text = ax.text(j, i, "{:.2f}".format(percentages[i, j]), ha="center", va="center",
|
||||
color="white", size=6)
|
||||
|
||||
ax = fig.add_subplot(gs[0, 1])
|
||||
ax.imshow(percentages)
|
||||
ax.set_xticks(np.arange(len(labels)))
|
||||
ax.set_xticklabels([behaviour_titles[l] for l in labels])
|
||||
# remove frame:
|
||||
ax.spines['top'].set_visible(False)
|
||||
ax.spines['right'].set_visible(False)
|
||||
# ... and label them with the respective list entries
|
||||
ax.set_yticks(np.arange(len(labels)))
|
||||
ax.set_yticklabels([behaviour_titles[l] for l in labels])
|
||||
|
||||
ax.set_title("Counts - removed {}".format(leave_out))
|
||||
|
||||
# Rotate the tick labels and set their alignment.
|
||||
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
|
||||
rotation_mode="anchor")
|
||||
|
||||
# Loop over data dimensions and create text annotations.
|
||||
for i in range(len(labels)):
|
||||
for j in range(len(labels)):
|
||||
if percentages[i, j] > 0.5:
|
||||
text = ax.text(j, i, "{:.0f}".format(significance_count[i, j]), ha="center", va="center",
|
||||
color="black", size=6)
|
||||
else:
|
||||
text = ax.text(j, i, "{:.0f}".format(significance_count[i, j]), ha="center", va="center",
|
||||
color="white", size=6)
|
||||
|
||||
|
||||
ax_col = fig.add_subplot(gs[1, :])
|
||||
data = [np.arange(0, 1.001, 0.01)] * 10
|
||||
ax_col.set_xticks([0, 25, 50, 75, 100])
|
||||
ax_col.set_xticklabels([0, 0.25, 0.5, 0.75, 1])
|
||||
ax_col.set_yticks([])
|
||||
ax_col.imshow(data)
|
||||
ax_col.set_xlabel("Correlation Coefficients")
|
||||
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig("figures/consistency_correlations_removed_{}.pdf".format(leave_out))
|
||||
|
||||
|
||||
def test_correlations(fits_info, left_out, model_values=False):
|
||||
bv_cell, bv_model = get_behaviour_values(fits_info)
|
||||
# eod_frequencies = [fits_info[cell][3] for cell in sorted(fits_info.keys())]
|
||||
if model_values:
|
||||
behaviour_values = bv_model
|
||||
else:
|
||||
behaviour_values = bv_cell
|
||||
|
||||
labels = ["baseline_frequency", "serial_correlation", "vector_strength", "coefficient_of_variation",
|
||||
"Burstiness", "f_inf_slope", "f_zero_slope"] # , "eodf"]
|
||||
significance_counts = np.zeros((len(labels), len(labels)))
|
||||
correction_factor = sum(range(len(labels)))
|
||||
total_count = 0
|
||||
for mask in iall_masks(len(behaviour_values["f_inf_slope"]), left_out):
|
||||
total_count += 1
|
||||
idx = np.ones(len(behaviour_values["f_inf_slope"]), dtype=np.int32)
|
||||
for masked in mask:
|
||||
idx[masked] = 0
|
||||
for i in range(len(labels)):
|
||||
for j in range(len(labels)):
|
||||
if j > i:
|
||||
continue
|
||||
idx = np.array(idx, dtype=np.bool)
|
||||
values_i = np.array(behaviour_values[labels[i]])[idx]
|
||||
values_j = np.array(behaviour_values[labels[j]])[idx]
|
||||
c, p = pearsonr(values_i, values_j)
|
||||
if p*correction_factor < 0.05:
|
||||
significance_counts[i, j] += 1
|
||||
|
||||
return significance_counts, total_count, labels
|
||||
|
||||
|
||||
def iall_masks(values_count: int, left_out: int):
|
||||
mask = np.array(range(left_out))
|
||||
|
||||
while True:
|
||||
if mask[0] == values_count - left_out + 1:
|
||||
break
|
||||
yield mask
|
||||
|
||||
mask[-1] += 1
|
||||
|
||||
if mask[-1] >= values_count:
|
||||
idx_to_start = 0
|
||||
for i in range(left_out-1):
|
||||
if mask[-1 - i] >= values_count-i:
|
||||
mask[-1 - (i+1)] += 1
|
||||
idx_to_start -= 1
|
||||
else:
|
||||
break
|
||||
while idx_to_start < 0:
|
||||
# print("i:", idx_to_start, "mask:", mask)
|
||||
mask[idx_to_start] = mask[idx_to_start -1] + 1
|
||||
idx_to_start += 1
|
||||
# print("i:", idx_to_start, "mask:", mask, "end")
|
||||
|
||||
|
||||
def dend_tau_and_ref_effect():
|
||||
cells = ["2012-12-21-am-invivo-1", "2014-03-19-ad-invivo-1", "2014-03-25-aa-invivo-1"]
|
||||
cell_type = ["no burster", "burster", "strong burster"]
|
||||
@ -147,10 +285,7 @@ def create_parameter_distributions(par_values, prefix=""):
|
||||
x_labels = ["[cm]", "[mV]", "[ms]", r"[mV$\sqrt{s}$]", "[ms]", "[mVms]", "[ms]", "[ms]"]
|
||||
axes_flat = axes.flatten()
|
||||
for i, l in enumerate(labels):
|
||||
min_v = min(par_values[l]) * 0.95
|
||||
max_v = max(par_values[l]) * 1.05
|
||||
step = (max_v - min_v) / 20
|
||||
bins = np.arange(min_v, max_v+step, step)
|
||||
bins = calculate_bins(par_values[l], 20)
|
||||
if "ms" in x_labels[i]:
|
||||
bins *= 1000
|
||||
par_values[l] = np.array(par_values[l]) * 1000
|
||||
@ -582,19 +717,17 @@ def plot_cell_model_comp_burstiness(cell_behavior, model_behaviour):
|
||||
|
||||
|
||||
def plot_cell_model_comp_adaption(cell_behavior, model_behaviour):
|
||||
fig = plt.figure(figsize=consts.FIG_SIZE_MEDIUM_WIDE)
|
||||
|
||||
fig = plt.figure(figsize=(8, 4))
|
||||
gs = fig.add_gridspec(2, 3, width_ratios=[5, 5, 5], height_ratios=[3, 7],
|
||||
left=0.1, right=0.95, bottom=0.1, top=0.9,
|
||||
wspace=0.4, hspace=0.3)
|
||||
# ("f_inf_slope", "f_zero_slope")
|
||||
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
|
||||
# the size of the marginal axes and the main axes in both directions.
|
||||
# Also adjust the subplot parameters for a square plot.
|
||||
mpl.rc("axes.formatter", limits=(-5, 2))
|
||||
gs = fig.add_gridspec(2, 2, width_ratios=[5, 5], height_ratios=[3, 7],
|
||||
left=0.1, right=0.9, bottom=0.1, top=0.9,
|
||||
wspace=0.3, hspace=0.3)
|
||||
mpl.rc("axes.formatter", limits=(-5, 3))
|
||||
num_of_bins = 20
|
||||
cmap = 'jet'
|
||||
cell_bursting = cell_behavior["Burstiness"]
|
||||
|
||||
# baseline freq plot:
|
||||
i = 0
|
||||
cell = cell_behavior["f_inf_slope"]
|
||||
@ -607,7 +740,7 @@ def plot_cell_model_comp_adaption(cell_behavior, model_behaviour):
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["f_inf_slope"], bins) # , cmap, cell_bursting)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["f_inf_slope"], bins)
|
||||
ax.set_xlabel(r"Cell [Hz]")
|
||||
ax.set_ylabel(r"Model [Hz]")
|
||||
ax_histx.set_ylabel("Count")
|
||||
@ -619,12 +752,11 @@ def plot_cell_model_comp_adaption(cell_behavior, model_behaviour):
|
||||
idx = np.array(cell) < 25000
|
||||
cell = np.array(cell)[idx]
|
||||
model = np.array(model)[idx]
|
||||
cell_bursting = np.array(cell_bursting)[idx]
|
||||
|
||||
idx = np.array(model) < 25000
|
||||
cell = np.array(cell)[idx]
|
||||
model = np.array(model)[idx]
|
||||
cell_bursting = np.array(cell_bursting)[idx]
|
||||
|
||||
print("removed {} values from f_zero_slope plot.".format(length_before - len(cell)))
|
||||
|
||||
minimum = min(min(cell), min(model))
|
||||
@ -634,21 +766,52 @@ def plot_cell_model_comp_adaption(cell_behavior, model_behaviour):
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["f_zero_slope"], bins) # , cmap, cell_bursting)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["f_zero_slope"], bins)
|
||||
ax.set_xlabel("Cell [Hz]")
|
||||
ax.set_ylabel("Model [Hz]")
|
||||
ax_histx.set_ylabel("Count")
|
||||
i += 1
|
||||
|
||||
# ratio:
|
||||
cell_inf = cell_behavior["f_inf_slope"]
|
||||
model_inf = model_behaviour["f_inf_slope"]
|
||||
cell_zero = cell_behavior["f_zero_slope"]
|
||||
model_zero = model_behaviour["f_zero_slope"]
|
||||
|
||||
cell_ratio = [cell_zero[i]/cell_inf[i] for i in range(len(cell_inf))]
|
||||
model_ratio = [model_zero[i]/model_inf[i] for i in range(len(model_inf))]
|
||||
|
||||
idx = np.array(cell_ratio) < 60
|
||||
cell_ratio = np.array(cell_ratio)[idx]
|
||||
model_ratio = np.array(model_ratio)[idx]
|
||||
|
||||
idx = np.array(model_ratio) < 60
|
||||
cell_ratio = np.array(cell_ratio)[idx]
|
||||
model_ratio = np.array(model_ratio)[idx]
|
||||
|
||||
both_ratios = list(cell_ratio.copy())
|
||||
both_ratios.extend(model_ratio)
|
||||
|
||||
bins = calculate_bins(both_ratios, num_of_bins)
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
scatter_hist(cell_ratio, model_ratio, ax, ax_histx, r"$f_0$ / $f_{\infty}$ slope ratio", bins)
|
||||
ax.set_xlabel("Cell")
|
||||
ax.set_ylabel("Model")
|
||||
ax_histx.set_ylabel("Count")
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
fig.text(0.085, 0.925, 'A', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.54, 0.925, 'B', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
# fig.text(0.085, 0.925, 'A', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
# fig.text(0.54, 0.925, 'B', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
|
||||
plt.savefig(consts.SAVE_FOLDER + "fit_adaption_comparison.pdf", transparent=True)
|
||||
plt.savefig(consts.SAVE_FOLDER + "fit_adaption_comparison_with_ratio.pdf", transparent=True)
|
||||
plt.close()
|
||||
|
||||
mpl.rc("axes.formatter", limits=(-5, 6))
|
||||
|
||||
|
||||
def scatter_hist(cell_values, model_values, ax, ax_histx, behaviour, bins, cmap=None, color_values=None):
|
||||
# copied from matplotlib
|
||||
|
||||
@ -665,5 +828,15 @@ def scatter_hist(cell_values, model_values, ax, ax_histx, behaviour, bins, cmap=
|
||||
|
||||
ax_histx.set_title(behaviour)
|
||||
|
||||
|
||||
def calculate_bins(values, num_of_bins):
|
||||
minimum = np.min(values)
|
||||
maximum = np.max(values)
|
||||
step = (maximum - minimum) / (num_of_bins-1)
|
||||
|
||||
bins = np.arange(minimum-0.5*step, maximum + step, step)
|
||||
return bins
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
9
Sam.py
9
Sam.py
@ -8,15 +8,22 @@ class SamAnalysis:
|
||||
|
||||
|
||||
class SamAnalysisData(SamAnalysis):
|
||||
pass
|
||||
|
||||
def __init__(self, cell_data):
|
||||
self.cell_data = cell_data
|
||||
|
||||
self.mean_mod_freq_responses = []
|
||||
|
||||
|
||||
class SamAnalysisModel(SamAnalysis):
|
||||
|
||||
def __init__(self, model):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def get_sam_class(data) -> SamAnalysis:
|
||||
if isinstance(data, CellData):
|
||||
return SamAnalysisData(data)
|
||||
|
@ -12,9 +12,67 @@ import os
|
||||
|
||||
|
||||
def main():
|
||||
sam_analysis("results/invivo_results/2013-01-08-ad-invivo-1/")
|
||||
sam_analysis("results/final_2/2011-10-25-ad-invivo-1/")
|
||||
|
||||
# plot_traces_with_spiketimes()
|
||||
# plot_mean_of_cuts()
|
||||
|
||||
quit()
|
||||
modelfit = get_best_fit("results/invivo_results/2013-01-08-ad-invivo-1/", use_comparable_error=False)
|
||||
modelfit = get_best_fit("results/final_2/2011-10-25-ad-invivo-1/")
|
||||
cell_data = CellData(modelfit.get_cell_path())
|
||||
|
||||
eod_freq = cell_data.get_eod_frequency()
|
||||
model = modelfit.get_model()
|
||||
|
||||
test_model_response(model, eod_freq, 0.1, np.arange(5, 2500, 5))
|
||||
|
||||
|
||||
def test_model_response(model: LifacNoiseModel, eod_freq, contrast, modulation_frequencies):
|
||||
|
||||
stds = []
|
||||
|
||||
for m_freq in modulation_frequencies:
|
||||
if (1/m_freq) / 10 <= model.parameters["step_size"]:
|
||||
model.parameters["step_size"] = (1/m_freq) / 10
|
||||
step_size = model.parameters["step_size"]
|
||||
print("mode_freq:", m_freq, "- step size:", step_size)
|
||||
stimulus = SAM(eod_freq, contrast / 100, m_freq)
|
||||
duration = 30
|
||||
v1, spikes_model = model.simulate(stimulus, duration)
|
||||
prob_density_function_model = spiketimes_calculate_pdf(spikes_model, step_size, kernel_width=0.005)
|
||||
|
||||
fig, ax = plt.subplots(1, 1)
|
||||
ax.plot(prob_density_function_model)
|
||||
ax.set_title("pdf with m_freq: {}".format(int(m_freq)))
|
||||
|
||||
plt.savefig("figures/sam/pdf_mfreq_{}.png".format(m_freq))
|
||||
plt.close()
|
||||
stds.append(np.std(prob_density_function_model))
|
||||
|
||||
plt.plot((np.array(modulation_frequencies)) / eod_freq, stds)
|
||||
plt.show()
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_traces_with_spiketimes():
|
||||
modelfit = get_best_fit("results/final_2/2011-10-25-ad-invivo-1/")
|
||||
cell_data = modelfit.get_cell_data()
|
||||
|
||||
traces = cell_data.parser.__get_traces__("SAM")
|
||||
# [time_traces, v1_traces, eod_traces, local_eod_traces, stimulus_traces]
|
||||
sam_spiketimes = cell_data.get_sam_spiketimes()
|
||||
for i in range(len(traces[0])):
|
||||
fig, axes = plt.subplots(2, 1, sharex=True)
|
||||
axes[0].plot(traces[0][i], traces[1][i])
|
||||
axes[0].plot(list(sam_spiketimes[i]), list([max(traces[1][i])] * len(sam_spiketimes[i])), 'o')
|
||||
axes[1].plot(traces[0][i], traces[3][i])
|
||||
|
||||
plt.show()
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_mean_of_cuts():
|
||||
modelfit = get_best_fit("results/final_2/2018-05-08-ac-invivo-1/")
|
||||
|
||||
if not os.path.exists(os.path.join(modelfit.get_cell_path(), "samallspikes1.dat")):
|
||||
print("Cell: {} \n Has no measured sam stimuli.")
|
||||
@ -24,20 +82,6 @@ def main():
|
||||
eod_freq = cell_data.get_eod_frequency()
|
||||
model = modelfit.get_model()
|
||||
|
||||
# base_cell = get_baseline_class(cell_data)
|
||||
# base_model = get_baseline_class(model, cell_data.get_eod_frequency())
|
||||
# isis_cell = np.array(base_cell.get_interspike_intervals()) * 1000
|
||||
# isi_model = np.array(base_model.get_interspike_intervals()) * 1000
|
||||
|
||||
# bins = np.arange(0, 20, 0.1)
|
||||
# plt.hist(isi_model, bins=bins, alpha=0.5)
|
||||
# plt.hist(isis_cell, bins=bins, alpha=0.5)
|
||||
# plt.show()
|
||||
# plt.close()
|
||||
|
||||
# ficurve = FICurveModel(model, np.arange(-1, 1.1, 0.1), eod_freq)
|
||||
#
|
||||
# ficurve.plot_fi_curve()
|
||||
durations = cell_data.get_sam_durations()
|
||||
u_durations = np.unique(durations)
|
||||
mean_duration = np.mean(durations)
|
||||
@ -56,17 +100,16 @@ def main():
|
||||
spikes_dictionary[m_freq] = [spiketimes[i]]
|
||||
|
||||
for m_freq in sorted(spikes_dictionary.keys()):
|
||||
if mean_duration < 2*1/float(m_freq):
|
||||
if mean_duration < 2 * (1 / float(m_freq)):
|
||||
print("meep")
|
||||
continue
|
||||
stimulus = SAM(eod_freq, contrast/100, m_freq)
|
||||
v1, spikes_model = model.simulate(stimulus, mean_duration * 4)
|
||||
stimulus = SAM(eod_freq, contrast / 100, m_freq)
|
||||
v1, spikes_model = model.simulate(stimulus, 4)
|
||||
prob_density_function_model = spiketimes_calculate_pdf(spikes_model, step_size)
|
||||
# plt.plot(prob_density_function_model)
|
||||
# plt.show()
|
||||
# plt.close()
|
||||
|
||||
fig, axes = plt.subplots(1, 4)
|
||||
cuts = cut_pdf_into_periods(prob_density_function_model, 1/float(m_freq), step_size)
|
||||
start_idx = int(2 / step_size)
|
||||
cuts = cut_pdf_into_periods(prob_density_function_model[start_idx:], 1 / float(m_freq), step_size)
|
||||
for c in cuts:
|
||||
axes[0].plot(c, color="gray", alpha=0.2)
|
||||
axes[0].set_title("model")
|
||||
@ -77,14 +120,13 @@ def main():
|
||||
for spikes_cell in spikes_dictionary[m_freq]:
|
||||
prob_density_cell = spiketimes_calculate_pdf(spikes_cell[0], step_size)
|
||||
|
||||
if len(prob_density_cell) < 3 * (eod_freq / step_size):
|
||||
continue
|
||||
cuts_cell = cut_pdf_into_periods(prob_density_cell, 1/float(m_freq), step_size)
|
||||
cuts_cell = cut_pdf_into_periods(prob_density_cell, 1 / float(m_freq), step_size)
|
||||
for c in cuts_cell:
|
||||
axes[1].plot(c, color="gray", alpha=0.15)
|
||||
print(cuts_cell.shape)
|
||||
means_cell.append(np.mean(cuts_cell, axis=0))
|
||||
if len(means_cell) == 0:
|
||||
print("means cell length zero")
|
||||
continue
|
||||
means_cell = np.array(means_cell)
|
||||
total_mean_cell = np.mean(means_cell, axis=0)
|
||||
@ -92,7 +134,7 @@ def main():
|
||||
axes[1].plot(total_mean_cell, color="black")
|
||||
|
||||
axes[2].set_title("difference")
|
||||
diff = [(total_mean_cell[i]-mean_model[i]) for i in range(len(total_mean_cell))]
|
||||
diff = [(total_mean_cell[i] - mean_model[i]) for i in range(len(total_mean_cell))]
|
||||
axes[2].plot(diff)
|
||||
|
||||
axes[3].plot(total_mean_cell)
|
||||
@ -129,9 +171,11 @@ def sam_analysis(fit_path):
|
||||
delta_freqs = cell_data.get_sam_delta_frequencies()
|
||||
u_delta_freqs = np.unique(delta_freqs)
|
||||
|
||||
all_data = []
|
||||
cell_stds = []
|
||||
model_stds = []
|
||||
for mod_freq in sorted(u_delta_freqs):
|
||||
# TODO problem of cutting the pdf as in some cases the pdf is shorter than 1 modulation frequency period!
|
||||
# length info wrong ? always at least one period?
|
||||
|
||||
if 1/mod_freq > durations[0] / 4:
|
||||
print("skipped mod_freq: {}".format(mod_freq))
|
||||
@ -152,53 +196,93 @@ def sam_analysis(fit_path):
|
||||
print("There are more spiketimes in one 'point'! Only the first was used! ")
|
||||
spikes = spiketimes[i][0]
|
||||
|
||||
|
||||
cell_pdf = spiketimes_calculate_pdf(spikes, step_size)
|
||||
|
||||
cell_cuts = cut_pdf_into_periods(cell_pdf, 1/mod_freq, step_size, factor=1.1, use_all=True)
|
||||
cell_cuts = cut_pdf_into_periods(cell_pdf, 1/mod_freq, step_size, factor=1.0)
|
||||
cell_mean = np.mean(cell_cuts, axis=0)
|
||||
cell_means.append(cell_mean)
|
||||
# fig, axes = plt.subplots(1, 2)
|
||||
# for c in cell_cuts:
|
||||
# axes[0].plot(c, color="grey", alpha=0.2)
|
||||
# axes[0].plot(np.mean(cell_means, axis=0), color="black")
|
||||
|
||||
stimulus = SAM(eod_freq, contrasts[i] / 100, mod_freq)
|
||||
v1, spikes_model = model.simulate(stimulus, durations[i] * 4)
|
||||
model_pdf = spiketimes_calculate_pdf(spikes_model, step_size)
|
||||
model_cuts = cut_pdf_into_periods(model_pdf, 1/mod_freq, step_size, factor=1.1)
|
||||
model_cuts = cut_pdf_into_periods(model_pdf, 1/mod_freq, step_size, factor=1.0)
|
||||
model_mean = np.mean(model_cuts, axis=0)
|
||||
model_means.append(model_mean)
|
||||
|
||||
# for c in model_cuts:
|
||||
# axes[1].plot(c, color="grey", alpha=0.2)
|
||||
# axes[1].plot(np.mean(model_cuts, axis=0), color="black")
|
||||
# plt.title("mod_freq: {}".format(mod_freq))
|
||||
# plt.show()
|
||||
# plt.close()
|
||||
final_cell_mean = np.mean(cell_means, axis=0)
|
||||
final_model_mean = np.mean(model_means, axis=0)
|
||||
cell_stds.append(np.std(final_cell_mean))
|
||||
model_stds.append(np.std(final_model_mean))
|
||||
final_model_mean_phase_corrected = correct_phase(final_cell_mean, final_model_mean, step_size)
|
||||
|
||||
|
||||
fig, axes = plt.subplots(1, 4)
|
||||
# PLOT EVERY MOD FREQ
|
||||
fig, axes = plt.subplots(1, 5, figsize=(15, 5), sharex=True)
|
||||
for c in cell_means:
|
||||
axes[0].plot(c, color="grey", alpha=0.2)
|
||||
axes[0].plot(np.mean(cell_means, axis=0), color="black")
|
||||
axes[0].set_title("Cell response")
|
||||
axis_cell = axes[0].axis()
|
||||
|
||||
for m in model_means:
|
||||
axes[1].plot(m, color="grey", alpha=0.2)
|
||||
axes[1].plot(np.mean(model_means, axis=0), color="black")
|
||||
axes[1].set_title("Model response")
|
||||
axis_model = axes[1].axis()
|
||||
ylim_top = max(axis_cell[3], axis_model[3])
|
||||
axes[1].set_ylim(0, ylim_top)
|
||||
axes[0].set_ylim(0, ylim_top)
|
||||
axes[2].set_ylim(0, ylim_top)
|
||||
|
||||
|
||||
axes[2].plot(final_cell_mean, label="cell")
|
||||
axes[2].plot(final_model_mean, label="model")
|
||||
axes[2].plot(final_model_mean_phase_corrected, label="model p-cor")
|
||||
axes[2].legend()
|
||||
axes[2].set_title("cell-model overlapped")
|
||||
axes[3].plot((final_model_mean - final_cell_mean) / final_cell_mean, label="normal")
|
||||
axes[3].plot((final_model_mean_phase_corrected- final_cell_mean) / final_cell_mean, label="phase cor")
|
||||
axes[3].set_title("rel. error")
|
||||
axes[3].legend()
|
||||
axes[4].plot(final_model_mean - final_cell_mean, label="normal")
|
||||
axes[4].plot(final_model_mean_phase_corrected - final_cell_mean, label="phase cor")
|
||||
axes[4].set_title("abs. error (Hz)")
|
||||
axes[4].legend()
|
||||
|
||||
fig.suptitle("modulation frequency: {}".format(mod_freq))
|
||||
|
||||
# plt.tight_layout()
|
||||
plt.show()
|
||||
plt.close()
|
||||
|
||||
axes[2].plot((np.mean(model_means, axis=0) - np.mean(cell_means, axis=0)) / np.mean(model_means, axis=0))
|
||||
fig, ax = plt.subplots(1, 1)
|
||||
|
||||
plt.title("modulation frequency: {}".format(mod_freq))
|
||||
ax.plot(u_delta_freqs, cell_stds, label="cell stds")
|
||||
ax.plot(u_delta_freqs, model_stds, label="model stds")
|
||||
ax.set_title("response modulation depth")
|
||||
ax.set_xlabel("Modulation frequency")
|
||||
ax.set_ylabel("STD")
|
||||
ax.legend()
|
||||
plt.show()
|
||||
plt.close()
|
||||
|
||||
|
||||
def correct_phase(cell_mean, model_mean, step_size):
|
||||
|
||||
# test for every 0.2 ms roll in the total time:
|
||||
lowest_err = np.inf
|
||||
roll_idx = 0
|
||||
for i in range(int(len(cell_mean) * step_size * 1000) * 5):
|
||||
roll_by = int((i / 5 / 1000) / step_size)
|
||||
rolled = np.roll(model_mean, roll_by)
|
||||
# rms = np.sqrt(np.mean(np.power((cell_mean - rolled), 2)))
|
||||
abs = np.sum(np.abs(cell_mean-rolled))
|
||||
if abs < lowest_err:
|
||||
lowest_err = abs
|
||||
roll_idx = roll_by
|
||||
|
||||
return np.roll(model_mean, roll_idx)
|
||||
|
||||
|
||||
def generate_pdf(model, stimulus, trials=4, sim_length=3, kernel_width=0.005):
|
||||
|
||||
trials_rate_list = []
|
||||
@ -221,7 +305,7 @@ def generate_pdf(model, stimulus, trials=4, sim_length=3, kernel_width=0.005):
|
||||
return mean_rate
|
||||
|
||||
|
||||
def spiketimes_calculate_pdf(spikes, step_size, kernel_width=0.005):
|
||||
def spiketimes_calculate_pdf(spikes, step_size, kernel_width=0.001):
|
||||
length = int(spikes[len(spikes)-1] / step_size)+1
|
||||
binary = np.zeros(length)
|
||||
spikes = [int(s / step_size) for s in spikes]
|
||||
@ -234,7 +318,11 @@ def spiketimes_calculate_pdf(spikes, step_size, kernel_width=0.005):
|
||||
return rate
|
||||
|
||||
|
||||
def cut_pdf_into_periods(pdf, period, step_size, factor=1.5, use_all=False):
|
||||
def cut_pdf_into_periods(pdf, period, step_size, factor=1.5):
|
||||
|
||||
if period < 0:
|
||||
print("cut_pdf_into_periods(): Period was negative! Absolute value taken to continue")
|
||||
period = abs(period)
|
||||
|
||||
if period / step_size > len(pdf):
|
||||
return [pdf]
|
||||
|
@ -1,6 +1,5 @@
|
||||
from stimuli.AbstractStimulus import AbstractStimulus
|
||||
import numpy as np
|
||||
from numba import jit, njit
|
||||
from warnings import warn
|
||||
|
||||
|
||||
@ -63,7 +62,6 @@ def convert_to_array(carrier_freq, amplitude, modulation_freq, contrast, start_t
|
||||
else:
|
||||
am_end = time_start + total_time
|
||||
|
||||
|
||||
idx_start = (am_start - time_start) / step_size_s
|
||||
idx_end = (am_end - time_start) / step_size_s
|
||||
|
||||
@ -81,45 +79,3 @@ def convert_to_array(carrier_freq, amplitude, modulation_freq, contrast, start_t
|
||||
values[idx_start:idx_end] = values[idx_start:idx_end]*am
|
||||
|
||||
return values
|
||||
|
||||
|
||||
# # if the whole stimulus time has the amplitude modulation just built it at once;
|
||||
# if time_start >= start_time and start_time+duration < time_start+total_time:
|
||||
# carrier = np.sin(2 * np.pi * carrier_freq * np.arange(start_time, total_time - start_time, step_size_s))
|
||||
# modulation = 1 + contrast * np.sin(2 * np.pi * modulation_freq * np.arange(start_time, total_time - start_time, step_size_s))
|
||||
# values = amplitude * carrier * modulation
|
||||
# return values
|
||||
#
|
||||
# # if it is split into parts with and without amplitude modulation built it in parts:
|
||||
# values = np.array([])
|
||||
#
|
||||
# # there is some time before the modulation starts:
|
||||
# if time_start < start_time:
|
||||
# carrier_before_am = np.sin(2 * np.pi * carrier_freq * np.arange(time_start, start_time, step_size_s))
|
||||
# values = np.concatenate((values, amplitude * carrier_before_am))
|
||||
#
|
||||
# # there is at least a second part of the stimulus that contains the amplitude:
|
||||
# # time starts before the end of the am and ends after it was started
|
||||
# if time_start < start_time+duration and time_start+total_time > start_time:
|
||||
# if duration is np.inf:
|
||||
#
|
||||
# carrier_during_am = np.sin(
|
||||
# 2 * np.pi * carrier_freq * np.arange(start_time, time_start + total_time, step_size_s))
|
||||
# am = 1 + contrast * np.sin(
|
||||
# 2 * np.pi * modulation_freq * np.arange(start_time, time_start + total_time, step_size_s))
|
||||
# else:
|
||||
# carrier_during_am = np.sin(
|
||||
# 2 * np.pi * carrier_freq * np.arange(start_time, start_time + duration, step_size_s))
|
||||
# am = 1 + contrast * np.sin(
|
||||
# 2 * np.pi * modulation_freq * np.arange(start_time, start_time + duration, step_size_s))
|
||||
# values = np.concatenate((values, amplitude * am * carrier_during_am))
|
||||
#
|
||||
# else:
|
||||
# if contrast != 0:
|
||||
# print("Given stimulus time parameters (start, total) result in no part of it containing the amplitude modulation!")
|
||||
#
|
||||
# if time_start+total_time > start_time+duration:
|
||||
# carrier_after_am = np.sin(2 * np.pi * carrier_freq * np.arange(start_time + duration, time_start + total_time, step_size_s))
|
||||
# values = np.concatenate((values, amplitude*carrier_after_am))
|
||||
#
|
||||
# return values
|
||||
|
70
test.py
70
test.py
@ -22,67 +22,15 @@ from matplotlib import gridspec
|
||||
# from plottools.axes import labelaxes_params
|
||||
|
||||
|
||||
|
||||
cell = "data/final/2018-05-08-ab-invivo-1/"
|
||||
cell_data = CellData(cell)
|
||||
step = cell_data.get_sampling_interval()
|
||||
v1 = cell_data.get_base_traces(cell_data.V1)[0]
|
||||
time = cell_data.get_base_traces(cell_data.TIME)[0]
|
||||
spiketimes = cell_data.get_base_spikes()[0]
|
||||
start = 0
|
||||
duration = 25
|
||||
|
||||
fig, ax = plt.subplots(1, 1)
|
||||
ax.plot((np.array(time[:int(duration/step)]) - start) * 1000, v1[:int(duration/step)])
|
||||
ax.eventplot([s * 1000 for s in spiketimes if start < s < start + duration],
|
||||
lineoffsets=max(v1[:int(duration/step)])+1.25, color="black", linelengths=2)
|
||||
|
||||
plt.show()
|
||||
plt.close()
|
||||
quit()
|
||||
|
||||
# sp = self.spikes(index)
|
||||
# binary = np.zeros(t.shape)
|
||||
# spike_indices = ((sp - t[0]) / dt).astype(int)
|
||||
# binary[spike_indices[(spike_indices >= 0) & (spike_indices < len(binary))]] = 1
|
||||
# g = gaussian_kernel(kernel_width, dt)
|
||||
# rate = np.convolve(binary, g, mode='same')
|
||||
|
||||
fit = get_best_fit("results/final_2/2012-12-21-am-invivo-1/")
|
||||
model = fit.get_model()
|
||||
cell_data = fit.get_cell_data()
|
||||
eodf = cell_data.get_eod_frequency()
|
||||
parameters = model.parameters
|
||||
|
||||
time_param_keys = ["refractory_period", "tau_a", "mem_tau", "dend_tau"]
|
||||
contrasts = np.arange(-0.3, 0.3, 0.05)
|
||||
baseline_normal = BaselineModel(model, eodf)
|
||||
fi_curve_normal = FICurveModel(model, contrasts, eodf)
|
||||
fi_curve_normal.plot_fi_curve()
|
||||
normal_isis = baseline_normal.get_interspike_intervals() * eodf
|
||||
normal_bins = np.arange(0, 0.05, 0.0001) * eodf
|
||||
|
||||
factor = 1.1
|
||||
scaled_eodf = eodf * factor
|
||||
scaled_model = model.get_model_copy()
|
||||
|
||||
for key in time_param_keys:
|
||||
scaled_model.parameters[key] = parameters[key] / factor
|
||||
|
||||
baseline_scaled = BaselineModel(scaled_model, scaled_eodf)
|
||||
fi_curve_scaled = FICurveModel(scaled_model, contrasts, scaled_eodf)
|
||||
fi_curve_scaled.plot_fi_curve()
|
||||
scaled_isis = np.array(baseline_scaled.get_interspike_intervals()) * scaled_eodf
|
||||
scaled_bins = np.arange(0, 0.05, 0.0001) * scaled_eodf
|
||||
|
||||
# plt.hist(normal_isis, bins=normal_bins, alpha=0.5, label="normal")
|
||||
# plt.hist(scaled_isis, bins=scaled_bins, alpha=0.5, label="scaled")
|
||||
# plt.legend()
|
||||
# plt.show()
|
||||
|
||||
|
||||
|
||||
|
||||
directory = "data/final"
|
||||
count = 0
|
||||
for cell in sorted(os.listdir(directory)):
|
||||
cell_dir = os.path.join(directory, cell)
|
||||
if os.path.exists(cell_dir + "/samallspikes1.dat"):
|
||||
print(cell)
|
||||
count += 1
|
||||
|
||||
print(count)
|
||||
|
||||
|
||||
|
||||
|
BIN
tests/consistency_correlations_removed_1.pdf
Normal file
BIN
tests/consistency_correlations_removed_1.pdf
Normal file
Binary file not shown.
BIN
tests/consistency_correlations_removed_2.pdf
Normal file
BIN
tests/consistency_correlations_removed_2.pdf
Normal file
Binary file not shown.
BIN
tests/consistency_correlations_removed_3.pdf
Normal file
BIN
tests/consistency_correlations_removed_3.pdf
Normal file
Binary file not shown.
BIN
tests/consistency_correlations_removed_4.pdf
Normal file
BIN
tests/consistency_correlations_removed_4.pdf
Normal file
Binary file not shown.
Binary file not shown.
@ -511,7 +511,7 @@ Before the parameter distributions (fig. \ref{fig:parameter_distributions}) and
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics{figures/parameter_distributions.pdf}
|
||||
\includegraphics{figures/scaled_to_800_parameter_distributions.pdf}
|
||||
\caption{\label{fig:parameter_distributions} Distributions of all eight model parameters with the time scaled for all models so their driving EOD frequency has 800\,Hz. \textbf{A}: input scaling $\alpha$, \textbf{B}: Bias current $I_{Bias}$, \textbf{C}: membrane time constant $\tau_m$, \textbf{D}: noise strength $\sqrt{2D}$, \textbf{E}: adaption time constant $\tau_A$, \textbf{F}: adaption strength $\Delta_A$, \textbf{G}: time constant of the dendritic low pass filter $\tau_{dend}$, \textbf{H}: refractory period $t_{ref}$}
|
||||
\end{figure}
|
||||
|
||||
|
BIN
thesis/figures/fit_adaption_comparison_with_ratio.pdf
Normal file
BIN
thesis/figures/fit_adaption_comparison_with_ratio.pdf
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading…
Reference in New Issue
Block a user