rescale images, correct discussion
This commit is contained in:
parent
fe34966e6a
commit
64d696935e
@ -1,15 +1,17 @@
|
||||
|
||||
import plottools.colors as ptc
|
||||
from plottools.axes import labelaxes_params
|
||||
SAVE_FOLDER = "./thesis/figures/"
|
||||
|
||||
FIG_SIZE_SMALL = (4, 4)
|
||||
FIG_SIZE_MEDIUM = (6, 6)
|
||||
FIG_SIZE_LARGE = (8, 8)
|
||||
FIG_SIZE_SMALL_WIDE = (6, 4)
|
||||
FIG_SIZE_MEDIUM_WIDE = (8, 6)
|
||||
FIG_SIZE_MEDIUM_HIGH = (6, 8)
|
||||
|
||||
SAVE_FOLDER = "./thesis/figures/"
|
||||
|
||||
FIG_SIZE_SMALL = (2, 2)
|
||||
FIG_SIZE_MEDIUM = (4, 4)
|
||||
FIG_SIZE_LARGE = (6, 6)
|
||||
FIG_SIZE_SMALL_WIDE = (4, 2)
|
||||
FIG_SIZE_SMALL_EXTRA_WIDE = (6, 3)
|
||||
FIG_SIZE_MEDIUM_WIDE = (6, 4)
|
||||
FIG_SIZE_LARGE_HIGH = (6, 8)
|
||||
|
||||
|
||||
""" Muted colors. """
|
||||
|
@ -104,7 +104,7 @@ def p_unit_example():
|
||||
step = cell_data.get_sampling_interval()
|
||||
|
||||
# Overview figure for p-unit behaviour
|
||||
fig = plt.figure(tight_layout=True, figsize=consts.FIG_SIZE_MEDIUM)
|
||||
fig = plt.figure(tight_layout=True, figsize=consts.FIG_SIZE_LARGE)
|
||||
gs = gridspec.GridSpec(3, 2)
|
||||
|
||||
# a bit of trace with detected spikes
|
||||
@ -214,7 +214,7 @@ def fi_point_detection():
|
||||
fi = FICurveCellData(cell_data, cell_data.get_fi_contrasts())
|
||||
step = cell_data.get_sampling_interval()
|
||||
|
||||
fig, axes = plt.subplots(1, 2, figsize=consts.FIG_SIZE_SMALL_WIDE, sharey="row")
|
||||
fig, axes = plt.subplots(1, 2, figsize=consts.FIG_SIZE_MEDIUM_WIDE, sharey="row")
|
||||
|
||||
f_trace_times, f_traces = fi.get_mean_time_and_freq_traces()
|
||||
|
||||
|
@ -11,7 +11,7 @@ import models.smallModels as sM
|
||||
def main():
|
||||
stimulus_development()
|
||||
# model_adaption_example()
|
||||
# model_comparison()
|
||||
model_comparison()
|
||||
pass
|
||||
|
||||
|
||||
@ -107,7 +107,7 @@ def stimulus_development():
|
||||
rectified = hF.rectify_stimulus_array(stim_array)
|
||||
filtered = dendritic_lowpass(rectified, 0.001, step_size)
|
||||
|
||||
fig, axes = plt.subplots(3, 1, figsize=(6, 6), sharex="col")
|
||||
fig, axes = plt.subplots(3, 1, figsize=consts.FIG_SIZE_MEDIUM, sharex="col")
|
||||
time = np.arange(time_start, time_start+time_duration, step_size)
|
||||
|
||||
axes[0].plot(time, stim_array)
|
||||
@ -119,11 +119,11 @@ def stimulus_development():
|
||||
axes[2].plot(time, filtered)
|
||||
axes[2].set_title("rectified plus dendritic filter")
|
||||
|
||||
for ax in axes:
|
||||
ax.set_ylim((-1.55, 1.55))
|
||||
axes[0].set_ylim((-1.55, 1.55))
|
||||
axes[1].set_ylim((-0.1, 1.55))
|
||||
axes[2].set_ylim((-0.1, 1.55))
|
||||
|
||||
for ax in axes:
|
||||
ax.set_ylabel("Amplitude [mV]")
|
||||
axes[1].set_ylabel("Amplitude [mV]")
|
||||
axes[2].set_xlabel("Time [s]")
|
||||
axes[0].set_xlim((-0.02, 0.04))
|
||||
# axes[2].set_ylim((0, 1.05))
|
||||
|
@ -16,7 +16,7 @@ def main():
|
||||
def am_generation():
|
||||
cell = "data/final/2013-04-17-ac-invivo-1"
|
||||
cell_data = CellData(cell)
|
||||
fig, axes = plt.subplots(3, 1, sharey=True, sharex=True, figsize=consts.FIG_SIZE_MEDIUM)
|
||||
fig, axes = plt.subplots(3, 1, sharey=True, sharex=True, figsize=consts.FIG_SIZE_SMALL)
|
||||
|
||||
start = 0
|
||||
end = 0.05
|
||||
|
@ -1,6 +1,7 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.gridspec as gridspec
|
||||
import matplotlib as mpl
|
||||
from analysis import get_filtered_fit_info, get_behaviour_values, get_parameter_values, behaviour_correlations, parameter_correlations
|
||||
from ModelFit import get_best_fit
|
||||
from Baseline import BaselineModel, BaselineCellData
|
||||
@ -9,6 +10,8 @@ from CellData import CellData
|
||||
import functions as fu
|
||||
import Figure_constants as consts
|
||||
|
||||
from matplotlib.ticker import FormatStrFormatter
|
||||
|
||||
|
||||
parameter_titles = {"input_scaling": r"$\alpha$", "delta_a": r"$\Delta_A$",
|
||||
"mem_tau": r"$\tau_m$", "noise_strength": r"$\sqrt{2D}$",
|
||||
@ -27,39 +30,64 @@ behaviour_titles = {"baseline_frequency": "Base Rate", "Burstiness": "Burst", "c
|
||||
|
||||
|
||||
def main():
|
||||
# run_all_images()
|
||||
# quit()
|
||||
|
||||
# dend_tau_and_ref_effect()
|
||||
# quit()
|
||||
dir_path = "results/final_2/"
|
||||
fits_info = get_filtered_fit_info(dir_path, filter=True)
|
||||
# print("Cells left:", len(fits_info))
|
||||
# cell_behaviour, model_behaviour = get_behaviour_values(fits_info)
|
||||
# plot_cell_model_comp_baseline(cell_behaviour, model_behaviour)
|
||||
# plot_cell_model_comp_adaption(cell_behaviour, model_behaviour)
|
||||
print("Cells left:", len(fits_info))
|
||||
cell_behaviour, model_behaviour = get_behaviour_values(fits_info)
|
||||
plot_cell_model_comp_baseline(cell_behaviour, model_behaviour)
|
||||
# plot_cell_model_comp_burstiness(cell_behaviour, model_behaviour)
|
||||
# #
|
||||
# plot_cell_model_comp_adaption(cell_behaviour, model_behaviour)
|
||||
|
||||
|
||||
# behaviour_correlations_plot(fits_info)
|
||||
# #
|
||||
parameter_correlation_plot(fits_info)
|
||||
# #
|
||||
# parameter_correlation_plot(fits_info)
|
||||
|
||||
create_parameter_distributions(get_parameter_values(fits_info))
|
||||
# create_parameter_distributions(get_parameter_values(fits_info, scaled=True, goal_eodf=800), "scaled_to_800_")
|
||||
create_parameter_distributions(get_parameter_values(fits_info, scaled=True, goal_eodf=800), "scaled_to_800_")
|
||||
# errors = calculate_percent_errors(fits_info)
|
||||
# create_boxplots(errors)
|
||||
|
||||
# example_good_hist_fits(dir_path)
|
||||
example_bad_hist_fits(dir_path)
|
||||
# example_bad_hist_fits(dir_path)
|
||||
# example_good_fi_fits(dir_path)
|
||||
# example_bad_fi_fits(dir_path)
|
||||
|
||||
|
||||
def run_all_images():
|
||||
dend_tau_and_ref_effect()
|
||||
|
||||
dir_path = "results/final_2/"
|
||||
fits_info = get_filtered_fit_info(dir_path, filter=True)
|
||||
cell_behaviour, model_behaviour = get_behaviour_values(fits_info)
|
||||
|
||||
plot_cell_model_comp_baseline(cell_behaviour, model_behaviour)
|
||||
plot_cell_model_comp_adaption(cell_behaviour, model_behaviour)
|
||||
plot_cell_model_comp_burstiness(cell_behaviour, model_behaviour)
|
||||
|
||||
behaviour_correlations_plot(fits_info)
|
||||
parameter_correlation_plot(fits_info)
|
||||
|
||||
create_parameter_distributions(get_parameter_values(fits_info))
|
||||
create_parameter_distributions(get_parameter_values(fits_info, scaled=True, goal_eodf=800), "scaled_to_800_")
|
||||
|
||||
example_good_hist_fits(dir_path)
|
||||
example_bad_hist_fits(dir_path)
|
||||
example_good_fi_fits(dir_path)
|
||||
example_bad_fi_fits(dir_path)
|
||||
|
||||
|
||||
def dend_tau_and_ref_effect():
|
||||
cells = ["2012-12-21-am-invivo-1", "2014-03-19-ad-invivo-1", "2018-05-08-ac-invivo-1"]
|
||||
cell_type = ["no burster", "burster", "strong burster"]
|
||||
folders = ["results/ref_and_tau/no_dend_tau/", "results/ref_and_tau/no_ref_period/", "results/final_2/"]
|
||||
title = [r"without $\tau_{dend}$", r"without $t_{ref}$", "with both"]
|
||||
|
||||
fig, axes = plt.subplots(len(cells), 3, figsize=(12, 9), sharey="row", sharex="all")
|
||||
fig, axes = plt.subplots(len(cells), 3, figsize=consts.FIG_SIZE_LARGE, sharey="row", sharex="all")
|
||||
|
||||
for i, cell in enumerate(cells):
|
||||
cell_data = CellData("data/final/" + cell)
|
||||
@ -72,7 +100,11 @@ def dend_tau_and_ref_effect():
|
||||
cell_isis = cell_baseline.get_interspike_intervals() * eodf
|
||||
model_isis = model_baseline.get_interspike_intervals() * eodf
|
||||
bins = np.arange(0, 0.025, 0.0001) * eodf
|
||||
|
||||
if i == 0 and j == 2:
|
||||
axes[i, j].hist(cell_isis, density=True, bins=bins, color=consts.COLOR_DATA, alpha=0.5, label="data")
|
||||
axes[i, j].hist(model_isis, density=True, bins=bins, color=consts.COLOR_MODEL, alpha=0.75, label="model")
|
||||
axes[i, j].legend(loc="upper right", frameon=False)
|
||||
else:
|
||||
axes[i, j].hist(cell_isis, density=True, bins=bins, color=consts.COLOR_DATA, alpha=0.5)
|
||||
axes[i, j].hist(model_isis, density=True, bins=bins, color=consts.COLOR_MODEL, alpha=0.75)
|
||||
if j == 0:
|
||||
@ -80,23 +112,24 @@ def dend_tau_and_ref_effect():
|
||||
axes[i, j].set_yticklabels([])
|
||||
if i == 0:
|
||||
axes[0, j].set_title(title[j])
|
||||
|
||||
plt.xlim(0, 17.5)
|
||||
fig.text(0.5, 0.04, 'Time in EOD periods', ha='center', va='center') # shared x label
|
||||
fig.text(0.06, 0.5, 'ISI Density', ha='center', va='center', rotation='vertical') # shared y label
|
||||
|
||||
fig.text(0.135, 0.9, 'A', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.4075, 0.9, 'B', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.68, 0.9, 'C', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.11, 0.86, '1', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.11, 0.59, '2', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.11, 0.32, '3', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.11, 0.9, 'A', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.3825, 0.9, 'B', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.655, 0.9, 'C', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
# fig.text(0.11, 0.86, '1', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
# fig.text(0.11, 0.59, '2', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
# fig.text(0.11, 0.32, '3', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
|
||||
plt.savefig(consts.SAVE_FOLDER + "dend_ref_effect.pdf", transparent=True)
|
||||
plt.close()
|
||||
|
||||
|
||||
def create_parameter_distributions(par_values, prefix=""):
|
||||
fig, axes = plt.subplots(4, 2, gridspec_kw={"left": 0.1, "hspace": 0.5}, figsize=consts.FIG_SIZE_MEDIUM_HIGH)
|
||||
fig, axes = plt.subplots(4, 2, gridspec_kw={"left": 0.1, "hspace": 0.5}, figsize=consts.FIG_SIZE_LARGE_HIGH)
|
||||
|
||||
if len(par_values.keys()) != 8:
|
||||
print("not eight parameters")
|
||||
@ -116,7 +149,7 @@ def create_parameter_distributions(par_values, prefix=""):
|
||||
axes_flat[i].hist(par_values[l], bins=bins, color=consts.COLOR_MODEL, alpha=0.75)
|
||||
# axes_flat[i].set_title(parameter_titles[l])
|
||||
axes_flat[i].set_xlabel(parameter_titles[l] + " " + x_labels[i])
|
||||
fig.text(0.01, 0.5, 'Count', ha='center', va='center', rotation='vertical') # shared y label
|
||||
fig.text(0.03, 0.5, 'Count', ha='center', va='center', rotation='vertical', size=12) # shared y label
|
||||
plt.tight_layout()
|
||||
|
||||
consts.set_figure_labels(xoffset=-2.5, yoffset=1.5)
|
||||
@ -128,7 +161,7 @@ def create_parameter_distributions(par_values, prefix=""):
|
||||
|
||||
def behaviour_correlations_plot(fits_info):
|
||||
fig = plt.figure(tight_layout=True, figsize=consts.FIG_SIZE_MEDIUM_WIDE)
|
||||
gs = gridspec.GridSpec(2, 2, width_ratios=(1, 1), height_ratios=(5, 1), hspace=0.025, wspace=0.05)
|
||||
gs = gridspec.GridSpec(2, 2, width_ratios=(1, 1), height_ratios=(5, 0.5), hspace=0.5, wspace=0.15, left=0.2)
|
||||
# fig, axes = plt.subplots(1, 2, figsize=consts.FIG_SIZE_MEDIUM_WIDE)
|
||||
|
||||
keys, corr_values, corrected_p_values = behaviour_correlations(fits_info, model_values=False)
|
||||
@ -137,8 +170,9 @@ def behaviour_correlations_plot(fits_info):
|
||||
|
||||
keys, corr_values, corrected_p_values = behaviour_correlations(fits_info, model_values=True)
|
||||
labels = [behaviour_titles[k] for k in keys]
|
||||
img = create_correlation_plot(fig.add_subplot(gs[0, 1]), labels, corr_values, corrected_p_values, "Model", y_label=False)
|
||||
|
||||
ax = fig.add_subplot(gs[0, 1])
|
||||
img = create_correlation_plot(ax, labels, corr_values, corrected_p_values, "Model", y_label=False)
|
||||
# cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
|
||||
ax_col = fig.add_subplot(gs[1, :])
|
||||
data = [np.arange(-1, 1.001, 0.01)] * 10
|
||||
ax_col.set_xticks([0, 25, 50, 75, 100, 125, 150, 175, 200])
|
||||
@ -154,7 +188,7 @@ def behaviour_correlations_plot(fits_info):
|
||||
def parameter_correlation_plot(fits_info):
|
||||
labels, corr_values, corrected_p_values = parameter_correlations(fits_info)
|
||||
par_labels = [parameter_titles[l] for l in labels]
|
||||
fig, ax = plt.subplots(1, 1)
|
||||
fig, ax = plt.subplots(1, 1, figsize=consts.FIG_SIZE_MEDIUM)
|
||||
# ax, labels, correlations, p_values, title, y_label=True
|
||||
im = create_correlation_plot(ax, par_labels, corr_values, corrected_p_values, "")
|
||||
fig.colorbar(im, ax=ax)
|
||||
@ -200,9 +234,11 @@ def create_correlation_plot(ax, labels, correlations, p_values, title, y_label=T
|
||||
if j >= i:
|
||||
continue
|
||||
|
||||
if cleaned_cors[i, j] != np.NAN:
|
||||
text = ax.text(j, i, "{:.2f}".format(cleaned_cors[i, j]), ha="center", va="center", color="w")
|
||||
|
||||
if not np.isnan(cleaned_cors[i, j]):
|
||||
if cleaned_cors[i, j] > 0:
|
||||
text = ax.text(j, i, "{:.2f}".format(cleaned_cors[i, j]), ha="center", va="center", color="black", size=6)
|
||||
else:
|
||||
text = ax.text(j, i, "{:.2f}".format(cleaned_cors[i, j]), ha="center", va="center", color="white", size=6)
|
||||
# if p_values[i][j] < 0.0001:
|
||||
# text = ax.text(j, i, "***", ha="center", va="center", color="b")
|
||||
# elif p_values[i][j] < 0.001:
|
||||
@ -251,7 +287,7 @@ def example_bad_hist_fits(dir_path):
|
||||
strong_bursty_cell = "2018-05-08-ab-invivo-1"
|
||||
extra_structure_cell = "2014-12-11-ad-invivo-1"
|
||||
|
||||
fig, axes = plt.subplots(1, 3, sharex="all", figsize=(8, 4))
|
||||
fig, axes = plt.subplots(1, 3, sharex="all", figsize=consts.FIG_SIZE_SMALL_EXTRA_WIDE) # , gridspec_kw={"top": 0.95})
|
||||
|
||||
for i, cell in enumerate([bursty_cell, strong_bursty_cell, extra_structure_cell]):
|
||||
fit_dir = dir_path + cell + "/"
|
||||
@ -267,13 +303,18 @@ def example_bad_hist_fits(dir_path):
|
||||
cell_isi = BaselineCellData(cell_data).get_interspike_intervals() * eodf
|
||||
|
||||
bins = np.arange(0, 0.025, 0.0001) * eodf
|
||||
if i == 0:
|
||||
axes[i].hist(model_isi, bins=bins, density=True, alpha=0.75, color=consts.COLOR_MODEL, label="model")
|
||||
axes[i].hist(cell_isi, bins=bins, density=True, alpha=0.5, color=consts.COLOR_DATA, label="data")
|
||||
axes[i].legend(loc="upper right", frameon=False)
|
||||
else:
|
||||
axes[i].hist(model_isi, bins=bins, density=True, alpha=0.75, color=consts.COLOR_MODEL)
|
||||
axes[i].hist(cell_isi, bins=bins, density=True, alpha=0.5, color=consts.COLOR_DATA)
|
||||
|
||||
axes[i].set_xlabel("ISI in EOD periods")
|
||||
axes[0].set_ylabel("Density")
|
||||
plt.tight_layout()
|
||||
consts.set_figure_labels(xoffset=-2.5)
|
||||
consts.set_figure_labels(xoffset=-2.5, yoffset=1.25)
|
||||
fig.label_axes()
|
||||
|
||||
plt.savefig(consts.SAVE_FOLDER + "example_bad_isi_hist_fits.pdf", transparent=True)
|
||||
@ -281,7 +322,7 @@ def example_bad_hist_fits(dir_path):
|
||||
|
||||
|
||||
def example_good_fi_fits(dir_path):
|
||||
fig, axes = plt.subplots(1, 3, figsize=(8, 4), sharey="all")
|
||||
fig, axes = plt.subplots(1, 3, figsize=consts.FIG_SIZE_SMALL_EXTRA_WIDE, sharey="all")
|
||||
for i, cell in enumerate(["2012-12-21-am-invivo-1", "2013-02-21-ag-invivo-1", "2014-03-19-ae-invivo-1"]):
|
||||
fit_dir = dir_path + cell + "/"
|
||||
fit = get_best_fit(fit_dir)
|
||||
@ -301,21 +342,23 @@ def example_good_fi_fits(dir_path):
|
||||
|
||||
# f zero response
|
||||
axes[i].plot(contrasts, fi_curve_data.get_f_zero_frequencies(), ',',
|
||||
marker=consts.f0_marker, alpha=0.75, color=consts.COLOR_DATA_f0)
|
||||
marker=consts.f0_marker, alpha=0.75, color=consts.COLOR_DATA_f0, label=r"data $f_0$")
|
||||
axes[i].plot(x_values, fu.full_boltzmann(x_values, f_zero_fit[0], f_zero_fit[1], f_zero_fit[2], f_zero_fit[3]),
|
||||
color=consts.COLOR_DATA_f0, alpha=0.75)
|
||||
axes[i].plot(contrasts, fi_curve_model.get_f_zero_frequencies(), ',',
|
||||
marker=consts.f0_marker, alpha=0.75, color=consts.COLOR_MODEL_f0)
|
||||
marker=consts.f0_marker, alpha=0.75, color=consts.COLOR_MODEL_f0, label=r"model $f_0$")
|
||||
|
||||
# f inf response
|
||||
axes[i].plot(contrasts, fi_curve_data.get_f_inf_frequencies(), ',',
|
||||
marker=consts.finf_marker, alpha=0.5, color=consts.COLOR_DATA_finf)
|
||||
marker=consts.finf_marker, alpha=0.5, color=consts.COLOR_DATA_finf, label=r"data $f_{\infty}$")
|
||||
axes[i].plot(x_values, fu.clipped_line(x_values, f_inf_fit[0], f_inf_fit[1]),
|
||||
color=consts.COLOR_DATA_finf, alpha=0.5)
|
||||
axes[i].plot(contrasts, fi_curve_model.get_f_inf_frequencies(), ',',
|
||||
marker=consts.finf_marker, alpha=0.75, color=consts.COLOR_MODEL_finf)
|
||||
marker=consts.finf_marker, alpha=0.75, color=consts.COLOR_MODEL_finf, label=r"model $f_{\infty}$")
|
||||
|
||||
axes[i].set_xlabel("Contrast")
|
||||
|
||||
axes[0].legend(loc="upper left", frameon=False)
|
||||
axes[0].set_ylabel("Frequency [Hz]")
|
||||
plt.tight_layout()
|
||||
consts.set_figure_labels(xoffset=-2.5)
|
||||
@ -326,7 +369,7 @@ def example_good_fi_fits(dir_path):
|
||||
|
||||
|
||||
def example_bad_fi_fits(dir_path):
|
||||
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
|
||||
fig, axes = plt.subplots(1, 2, figsize=consts.FIG_SIZE_SMALL_EXTRA_WIDE)
|
||||
# "2013-01-08-aa-invivo-1" candidate cell
|
||||
for i, cell in enumerate(["2012-12-13-ao-invivo-1", "2014-01-23-ab-invivo-1"]):
|
||||
fit_dir = dir_path + cell + "/"
|
||||
@ -347,22 +390,24 @@ def example_bad_fi_fits(dir_path):
|
||||
|
||||
# f zero response
|
||||
axes[i].plot(contrasts, fi_curve_data.get_f_zero_frequencies(), ',',
|
||||
marker=consts.f0_marker, alpha=0.75, color=consts.COLOR_DATA_f0)
|
||||
marker=consts.f0_marker, alpha=0.75, color=consts.COLOR_DATA_f0, label=r"data $f_0$")
|
||||
axes[i].plot(x_values, fu.full_boltzmann(x_values, f_zero_fit[0], f_zero_fit[1], f_zero_fit[2], f_zero_fit[3]),
|
||||
color=consts.COLOR_DATA_f0, alpha=0.75)
|
||||
axes[i].plot(contrasts, fi_curve_model.get_f_zero_frequencies(), ',',
|
||||
marker=consts.f0_marker, alpha=0.75, color=consts.COLOR_MODEL_f0)
|
||||
marker=consts.f0_marker, alpha=0.75, color=consts.COLOR_MODEL_f0, label=r"model $f_0$")
|
||||
|
||||
# f inf response
|
||||
axes[i].plot(contrasts, fi_curve_data.get_f_inf_frequencies(), ',',
|
||||
marker=consts.finf_marker, alpha=0.5, color=consts.COLOR_DATA_finf)
|
||||
marker=consts.finf_marker, alpha=0.5, color=consts.COLOR_DATA_finf, label=r"data $f_{\infty}$")
|
||||
axes[i].plot(x_values, fu.clipped_line(x_values, f_inf_fit[0], f_inf_fit[1]),
|
||||
color=consts.COLOR_DATA_finf, alpha=0.5)
|
||||
axes[i].plot(contrasts, fi_curve_model.get_f_inf_frequencies(), ',',
|
||||
marker=consts.finf_marker, alpha=0.75, color=consts.COLOR_MODEL_finf)
|
||||
marker=consts.finf_marker, alpha=0.75, color=consts.COLOR_MODEL_finf, label=r"model $f_{\infty}$")
|
||||
|
||||
axes[i].set_xlabel("Contrast")
|
||||
|
||||
axes[0].set_ylabel("Frequency [Hz]")
|
||||
axes[0].legend(loc="upper left", frameon=False)
|
||||
plt.tight_layout()
|
||||
consts.set_figure_labels(xoffset=-2.5)
|
||||
fig.label_axes()
|
||||
@ -385,11 +430,13 @@ def create_boxplots(errors):
|
||||
|
||||
|
||||
def plot_cell_model_comp_baseline(cell_behavior, model_behaviour):
|
||||
fig = plt.figure(figsize=(12, 6))
|
||||
fig = plt.figure(figsize=(8, 4))
|
||||
gs = fig.add_gridspec(2, 3, width_ratios=[5, 5, 5], height_ratios=[3, 7],
|
||||
left=0.1, right=0.9, bottom=0.1, top=0.9,
|
||||
wspace=0.25, hspace=0.2)
|
||||
left=0.1, right=0.95, bottom=0.1, top=0.9,
|
||||
wspace=0.4, hspace=0.2)
|
||||
num_of_bins = 20
|
||||
cmap = 'jet'
|
||||
cell_bursting = cell_behavior["Burstiness"]
|
||||
# baseline freq plot:
|
||||
i = 0
|
||||
cell = cell_behavior["baseline_frequency"]
|
||||
@ -401,7 +448,7 @@ def plot_cell_model_comp_baseline(cell_behavior, model_behaviour):
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["baseline_frequency"], bins)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["baseline_frequency"], bins) # , cmap, cell_bursting)
|
||||
ax.set_xlabel(r"Cell [Hz]")
|
||||
ax.set_ylabel(r"Model [Hz]")
|
||||
ax_histx.set_ylabel("Count")
|
||||
@ -416,7 +463,7 @@ def plot_cell_model_comp_baseline(cell_behavior, model_behaviour):
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["vector_strength"], bins)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["vector_strength"], bins) # , cmap, cell_bursting)
|
||||
ax.set_xlabel(r"Cell")
|
||||
ax.set_ylabel(r"Model")
|
||||
ax_histx.set_ylabel("Count")
|
||||
@ -431,7 +478,7 @@ def plot_cell_model_comp_baseline(cell_behavior, model_behaviour):
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["serial_correlation"], bins)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["serial_correlation"], bins) # , cmap, cell_bursting)
|
||||
ax.set_xlabel(r"Cell")
|
||||
ax.set_ylabel(r"Model")
|
||||
fig.text(0.09, 0.925, 'A', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
@ -445,10 +492,10 @@ def plot_cell_model_comp_baseline(cell_behavior, model_behaviour):
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_cell_model_comp_adaption(cell_behavior, model_behaviour):
|
||||
fig = plt.figure(figsize=(8, 6))
|
||||
def plot_cell_model_comp_burstiness(cell_behavior, model_behaviour):
|
||||
fig = plt.figure(figsize=consts.FIG_SIZE_MEDIUM_WIDE)
|
||||
|
||||
# ("f_inf_slope", "f_zero_slope")
|
||||
# ("Burstiness", "coefficient_of_variation")
|
||||
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
|
||||
# the size of the marginal axes and the main axes in both directions.
|
||||
# Also adjust the subplot parameters for a square plot.
|
||||
@ -458,32 +505,25 @@ def plot_cell_model_comp_adaption(cell_behavior, model_behaviour):
|
||||
num_of_bins = 20
|
||||
# baseline freq plot:
|
||||
i = 0
|
||||
cell = cell_behavior["f_inf_slope"]
|
||||
model = model_behaviour["f_inf_slope"]
|
||||
cmap = 'jet'
|
||||
cell = cell_behavior["Burstiness"]
|
||||
cell_bursting = cell
|
||||
model = model_behaviour["Burstiness"]
|
||||
minimum = min(min(cell), min(model))
|
||||
maximum = max(max(cell), max(model))
|
||||
step = (maximum - minimum) / num_of_bins
|
||||
bins = np.arange(minimum, maximum + step, step)
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax.set_xlabel("Cell [%ms]")
|
||||
ax.set_ylabel("Model [%ms]")
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["f_inf_slope"], bins)
|
||||
ax.set_xlabel(r"Cell [Hz]")
|
||||
ax.set_ylabel(r"Model [Hz]")
|
||||
ax_histx.set_ylabel("Count")
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["Burstiness"], bins, cmap, cell_bursting)
|
||||
i += 1
|
||||
|
||||
cell = cell_behavior["f_zero_slope"]
|
||||
model = model_behaviour["f_zero_slope"]
|
||||
length_before = len(cell)
|
||||
idx = np.array(cell) < 25000
|
||||
cell = np.array(cell)[idx]
|
||||
model = np.array(model)[idx]
|
||||
|
||||
idx = np.array(model) < 25000
|
||||
cell = np.array(cell)[idx]
|
||||
model = np.array(model)[idx]
|
||||
print("removed {} values from f_zero_slope plot.".format(length_before - len(cell)))
|
||||
cell = cell_behavior["coefficient_of_variation"]
|
||||
model = model_behaviour["coefficient_of_variation"]
|
||||
|
||||
minimum = min(min(cell), min(model))
|
||||
maximum = max(max(cell), max(model))
|
||||
@ -492,50 +532,66 @@ def plot_cell_model_comp_adaption(cell_behavior, model_behaviour):
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["f_zero_slope"], bins)
|
||||
ax.set_xlabel("Cell [Hz]")
|
||||
ax.set_ylabel("Model [Hz]")
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["coefficient_of_variation"], bins, cmap, cell_bursting)
|
||||
|
||||
ax.set_xlabel("Cell")
|
||||
ax.set_ylabel("Model")
|
||||
ax_histx.set_ylabel("Count")
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
fig.text(0.085, 0.925, 'A', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.54, 0.925, 'B', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.53, 0.925, 'B', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
|
||||
plt.savefig(consts.SAVE_FOLDER + "fit_adaption_comparison.pdf", transparent=True)
|
||||
plt.savefig(consts.SAVE_FOLDER + "fit_burstiness_comparison.pdf", transparent=True)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_cell_model_comp_burstiness(cell_behavior, model_behaviour):
|
||||
fig = plt.figure(figsize=(8, 6))
|
||||
def plot_cell_model_comp_adaption(cell_behavior, model_behaviour):
|
||||
fig = plt.figure(figsize=consts.FIG_SIZE_MEDIUM_WIDE)
|
||||
|
||||
# ("Burstiness", "coefficient_of_variation")
|
||||
# ("f_inf_slope", "f_zero_slope")
|
||||
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
|
||||
# the size of the marginal axes and the main axes in both directions.
|
||||
# Also adjust the subplot parameters for a square plot.
|
||||
mpl.rc("axes.formatter", limits=(-5, 2))
|
||||
gs = fig.add_gridspec(2, 2, width_ratios=[5, 5], height_ratios=[3, 7],
|
||||
left=0.1, right=0.9, bottom=0.1, top=0.9,
|
||||
wspace=0.25, hspace=0.2)
|
||||
wspace=0.3, hspace=0.3)
|
||||
num_of_bins = 20
|
||||
cmap = 'jet'
|
||||
cell_bursting = cell_behavior["Burstiness"]
|
||||
# baseline freq plot:
|
||||
i = 0
|
||||
cell = cell_behavior["Burstiness"]
|
||||
model = model_behaviour["Burstiness"]
|
||||
cell = cell_behavior["f_inf_slope"]
|
||||
model = model_behaviour["f_inf_slope"]
|
||||
minimum = min(min(cell), min(model))
|
||||
maximum = max(max(cell), max(model))
|
||||
step = (maximum - minimum) / num_of_bins
|
||||
bins = np.arange(minimum, maximum + step, step)
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax.set_xlabel("Cell [%ms]")
|
||||
ax.set_ylabel("Model [%ms]")
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["f_inf_slope"], bins) # , cmap, cell_bursting)
|
||||
ax.set_xlabel(r"Cell [Hz]")
|
||||
ax.set_ylabel(r"Model [Hz]")
|
||||
ax_histx.set_ylabel("Count")
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["Burstiness"], bins)
|
||||
i += 1
|
||||
|
||||
cell = cell_behavior["coefficient_of_variation"]
|
||||
model = model_behaviour["coefficient_of_variation"]
|
||||
cell = cell_behavior["f_zero_slope"]
|
||||
model = model_behaviour["f_zero_slope"]
|
||||
length_before = len(cell)
|
||||
idx = np.array(cell) < 25000
|
||||
cell = np.array(cell)[idx]
|
||||
model = np.array(model)[idx]
|
||||
cell_bursting = np.array(cell_bursting)[idx]
|
||||
|
||||
idx = np.array(model) < 25000
|
||||
cell = np.array(cell)[idx]
|
||||
model = np.array(model)[idx]
|
||||
cell_bursting = np.array(cell_bursting)[idx]
|
||||
print("removed {} values from f_zero_slope plot.".format(length_before - len(cell)))
|
||||
|
||||
minimum = min(min(cell), min(model))
|
||||
maximum = max(max(cell), max(model))
|
||||
@ -544,30 +600,32 @@ def plot_cell_model_comp_burstiness(cell_behavior, model_behaviour):
|
||||
|
||||
ax = fig.add_subplot(gs[1, i])
|
||||
ax_histx = fig.add_subplot(gs[0, i], sharex=ax)
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["coefficient_of_variation"], bins)
|
||||
|
||||
ax.set_xlabel("Cell")
|
||||
ax.set_ylabel("Model")
|
||||
scatter_hist(cell, model, ax, ax_histx, behaviour_titles["f_zero_slope"], bins) # , cmap, cell_bursting)
|
||||
ax.set_xlabel("Cell [Hz]")
|
||||
ax.set_ylabel("Model [Hz]")
|
||||
ax_histx.set_ylabel("Count")
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
fig.text(0.085, 0.925, 'A', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.53, 0.925, 'B', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
fig.text(0.54, 0.925, 'B', ha='center', va='center', rotation='horizontal', size=16, family='serif')
|
||||
|
||||
plt.savefig(consts.SAVE_FOLDER + "fit_burstiness_comparison.pdf", transparent=True)
|
||||
plt.savefig(consts.SAVE_FOLDER + "fit_adaption_comparison.pdf", transparent=True)
|
||||
plt.close()
|
||||
|
||||
mpl.rc("axes.formatter", limits=(-5, 6))
|
||||
|
||||
def scatter_hist(cell_values, model_values, ax, ax_histx, behaviour, bins):
|
||||
def scatter_hist(cell_values, model_values, ax, ax_histx, behaviour, bins, cmap=None, color_values=None):
|
||||
# copied from matplotlib
|
||||
|
||||
# the scatter plot:
|
||||
minimum = min(min(cell_values), min(model_values))
|
||||
maximum = max(max(cell_values), max(model_values))
|
||||
ax.plot((minimum, maximum), (minimum, maximum), color="grey")
|
||||
if cmap is None:
|
||||
ax.scatter(cell_values, model_values, color="black")
|
||||
|
||||
else:
|
||||
ax.scatter(cell_values, model_values, c=color_values, cmap=cmap)
|
||||
ax_histx.hist(model_values, bins=bins, color=consts.COLOR_MODEL, alpha=0.75)
|
||||
ax_histx.hist(cell_values, bins=bins, color=consts.COLOR_DATA, alpha=0.50)
|
||||
|
||||
|
@ -33,6 +33,10 @@ num_of_models = 100
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
rerun_all_images()
|
||||
quit()
|
||||
|
||||
folder = "results/final_2/"
|
||||
fit_infos = get_filtered_fit_info(folder, filter=True)
|
||||
goal_eodf = 800
|
||||
@ -57,6 +61,23 @@ def main():
|
||||
compare_distribution_random_vs_fitted_params(par_list, param_values)
|
||||
|
||||
|
||||
def rerun_all_images():
|
||||
|
||||
folder = "results/final_2/"
|
||||
fit_infos = get_filtered_fit_info(folder, filter=True)
|
||||
goal_eodf = 800
|
||||
param_values = get_parameter_values(fit_infos, scaled=True, goal_eodf=goal_eodf)
|
||||
|
||||
keys, means, cov_matrix = calculate_means_and_covariances(param_values)
|
||||
par_list = draw_random_models(1000, keys, means, cov_matrix, seed=1)
|
||||
parameter_correlation_plot(par_list, fit_infos)
|
||||
plot_distributions_with_set_fits(param_values)
|
||||
|
||||
behaviour, par_list = load_behavior()
|
||||
create_behaviour_distributions(behaviour, fit_infos)
|
||||
compare_distribution_random_vs_fitted_params(par_list, param_values)
|
||||
|
||||
|
||||
def compare_distribution_random_vs_fitted_params(par_list, scaled_param_values):
|
||||
labels = ["input_scaling", "v_offset", "mem_tau", "noise_strength",
|
||||
"tau_a", "delta_a", "dend_tau", "refractory_period"]
|
||||
@ -69,7 +90,7 @@ def compare_distribution_random_vs_fitted_params(par_list, scaled_param_values):
|
||||
for l in labels:
|
||||
model_parameter_values[l].append(params[l])
|
||||
|
||||
fig, axes = plt.subplots(4, 2, gridspec_kw={"left": 0.1, "hspace":0.5}, figsize=consts.FIG_SIZE_MEDIUM_HIGH)
|
||||
fig, axes = plt.subplots(4, 2, gridspec_kw={"left": 0.1, "hspace":0.5}, figsize=consts.FIG_SIZE_LARGE_HIGH)
|
||||
axes_flat = axes.flatten()
|
||||
for i, l in enumerate(labels):
|
||||
rand_model_values = model_parameter_values[l]
|
||||
@ -93,7 +114,7 @@ def compare_distribution_random_vs_fitted_params(par_list, scaled_param_values):
|
||||
axes_flat[i].set_yticks([])
|
||||
axes_flat[i].set_yticklabels([])
|
||||
|
||||
fig.text(0.01, 0.5, 'Density', ha='center', va='center', rotation='vertical') # shared y label
|
||||
fig.text(0.03, 0.5, 'Density', ha='center', va='center', rotation='vertical', size=12) # shared y label
|
||||
plt.tight_layout()
|
||||
|
||||
consts.set_figure_labels(xoffset=-2.5, yoffset=0)
|
||||
@ -106,17 +127,20 @@ def compare_distribution_random_vs_fitted_params(par_list, scaled_param_values):
|
||||
def parameter_correlation_plot(par_list, fits_info):
|
||||
|
||||
fig = plt.figure(tight_layout=True, figsize=consts.FIG_SIZE_MEDIUM_WIDE)
|
||||
gs = gridspec.GridSpec(2, 2, width_ratios=(1, 1), height_ratios=(5, 1), hspace=0.025, wspace=0.05)
|
||||
gs = gridspec.GridSpec(2, 2, width_ratios=(1, 1), height_ratios=(5, 1), hspace=0.05, wspace=0.05)
|
||||
# fig, axes = plt.subplots(1, 2, figsize=consts.FIG_SIZE_MEDIUM_WIDE)
|
||||
|
||||
labels, corr_values, corrected_p_values = parameter_correlations(fits_info)
|
||||
par_labels = [parameter_titles[l] for l in labels]
|
||||
img = create_correlation_plot(fig.add_subplot(gs[0, 0]), par_labels, corr_values, corrected_p_values,
|
||||
"Fitted Models", y_label=True)
|
||||
|
||||
rand_labels, rand_corr_values, rand_corrected_p_values = parameter_correlations_from_par_list(par_list)
|
||||
par_labels = [parameter_titles[l] for l in rand_labels]
|
||||
img = create_correlation_plot(fig.add_subplot(gs[0, 1]), par_labels, rand_corr_values, rand_corrected_p_values * 10e50, "Drawn Models")
|
||||
img = create_correlation_plot(fig.add_subplot(gs[0, 1]), par_labels, rand_corr_values, rand_corrected_p_values * 10e50, "Drawn Models", y_label=False)
|
||||
|
||||
labels, corr_values, corrected_p_values = parameter_correlations(fits_info)
|
||||
par_labels = [parameter_titles[l] for l in labels]
|
||||
img = create_correlation_plot(fig.add_subplot(gs[0, 0]), par_labels, corr_values, corrected_p_values, "Fitted Models",
|
||||
y_label=False)
|
||||
consts.set_figure_labels(xoffset=-2.5, yoffset=1.5)
|
||||
fig.label_axes()
|
||||
|
||||
ax_col = fig.add_subplot(gs[1, :])
|
||||
data = [np.arange(-1, 1.001, 0.01)] * 10
|
||||
@ -125,10 +149,8 @@ def parameter_correlation_plot(par_list, fits_info):
|
||||
ax_col.set_yticks([])
|
||||
ax_col.imshow(data)
|
||||
ax_col.set_xlabel("Correlation Coefficients")
|
||||
plt.tight_layout()
|
||||
|
||||
consts.set_figure_labels(xoffset=-2.5, yoffset=1.5)
|
||||
fig.label_axes()
|
||||
|
||||
|
||||
plt.savefig(consts.SAVE_FOLDER + "rand_parameter_correlations_comparison.pdf")
|
||||
plt.close()
|
||||
@ -234,7 +256,7 @@ def load_behavior():
|
||||
|
||||
|
||||
def create_behaviour_distributions(drawn_model_behaviour, fits_info):
|
||||
fig, axes = plt.subplots(4, 2, gridspec_kw={"left": 0.1, "hspace":0.5}, figsize=consts.FIG_SIZE_MEDIUM_HIGH)
|
||||
fig, axes = plt.subplots(4, 2, gridspec_kw={"left": 0.1, "hspace":0.5}, figsize=consts.FIG_SIZE_LARGE_HIGH)
|
||||
cell_behaviour, fitted_model_behaviour = get_behaviour_values(fits_info)
|
||||
labels = ['Burstiness', 'baseline_frequency', 'coefficient_of_variation', 'f_inf_slope', 'f_zero_slope', 'serial_correlation', 'vector_strength']
|
||||
unit = ["[%ms]", "[Hz]", "", "[Hz]", "[Hz]", "", ""]
|
||||
@ -247,7 +269,7 @@ def create_behaviour_distributions(drawn_model_behaviour, fits_info):
|
||||
# if max_v > limit:
|
||||
# print("For {} the max value was limited to {}, {} values were excluded!".format(l, limit, np.sum(np.array(cell_b_values[l]) > limit)))
|
||||
# max_v = limit
|
||||
step = (max_v - min_v) / 15
|
||||
step = (max_v - min_v) / 20
|
||||
bins = np.arange(min_v, max_v + step, step)
|
||||
axes_flat[i].hist(drawn_model_behaviour[l], bins=bins, alpha=0.75, density=True, color=consts.COLOR_MODEL)
|
||||
axes_flat[i].hist(cell_behaviour[l], bins=bins, alpha=0.5, density=True, color=consts.COLOR_DATA)
|
||||
@ -260,7 +282,7 @@ def create_behaviour_distributions(drawn_model_behaviour, fits_info):
|
||||
|
||||
consts.set_figure_labels(xoffset=-2.5, yoffset=0)
|
||||
fig.label_axes()
|
||||
fig.text(0.02, 0.5, 'Density', ha='center', va='center', rotation='vertical') # shared y label
|
||||
fig.text(0.03, 0.5, 'Density', ha='center', va='center', rotation='vertical', size=12) # shared y label
|
||||
|
||||
plt.savefig(consts.SAVE_FOLDER + "random_models_behaviour_dist.pdf")
|
||||
plt.close()
|
||||
@ -337,7 +359,7 @@ def get_gauss_fits():
|
||||
|
||||
def plot_distributions_with_set_fits(param_values):
|
||||
|
||||
fig, axes = plt.subplots(4, 2, gridspec_kw={"left": 0.1, "hspace":0.5}, figsize=consts.FIG_SIZE_MEDIUM_HIGH)
|
||||
fig, axes = plt.subplots(4, 2, gridspec_kw={"left": 0.1, "hspace":0.5}, figsize=consts.FIG_SIZE_LARGE_HIGH)
|
||||
|
||||
gauss_fits = get_gauss_fits()
|
||||
bin_number = 30
|
||||
@ -373,7 +395,7 @@ def plot_distributions_with_set_fits(param_values):
|
||||
|
||||
consts.set_figure_labels(xoffset=-2.5, yoffset=0)
|
||||
fig.label_axes()
|
||||
fig.text(0.02, 0.5, 'Density', ha='center', va='center', rotation='vertical') # shared y label
|
||||
fig.text(0.03, 0.5, 'Density', ha='center', va='center', rotation='vertical', size=12) # shared y label
|
||||
|
||||
plt.savefig(consts.SAVE_FOLDER + "parameter_distribution_with_gauss_fits.pdf")
|
||||
plt.close()
|
||||
|
@ -62,6 +62,12 @@ Olypher, A.~V. and Calabrese, R.~L. (2007).
|
||||
in neuronal parameters.
|
||||
\newblock {\em Journal of Neurophysiology}, 98(6):3749--3758.
|
||||
|
||||
\bibitem[Padmanabhan and Urban, 2010]{padmanabhan2010intrinsic}
|
||||
Padmanabhan, K. and Urban, N.~N. (2010).
|
||||
\newblock Intrinsic biophysical diversity decorrelates neuronal firing while
|
||||
increasing information content.
|
||||
\newblock {\em Nature neuroscience}, 13(10):1276--1282.
|
||||
|
||||
\bibitem[Ratnam and Nelson, 2000]{ratnam2000nonrenewal}
|
||||
Ratnam, R. and Nelson, M.~E. (2000).
|
||||
\newblock Nonrenewal statistics of electrosensory afferent spike trains:
|
||||
@ -79,6 +85,12 @@ Todd, B.~S. and Andrews, D.~C. (1999).
|
||||
\newblock The identification of peaks in physiological signals.
|
||||
\newblock {\em Computers and biomedical research}, 32(4):322--335.
|
||||
|
||||
\bibitem[Tripathy et~al., 2013]{tripathy2013intermediate}
|
||||
Tripathy, S.~J., Padmanabhan, K., Gerkin, R.~C., and Urban, N.~N. (2013).
|
||||
\newblock Intermediate intrinsic diversity enhances neural population coding.
|
||||
\newblock {\em Proceedings of the National Academy of Sciences},
|
||||
110(20):8248--8253.
|
||||
|
||||
\bibitem[Walz, 2013]{walz2013Phd}
|
||||
Walz, H. (2013).
|
||||
\newblock {\em Encoding of Communication Signals in Heterogeneous Populations
|
||||
|
@ -3,44 +3,44 @@ Capacity: max_strings=35307, hash_size=35307, hash_prime=30011
|
||||
The top-level auxiliary file: Masterthesis.aux
|
||||
The style file: apalike.bst
|
||||
Database file #1: citations.bib
|
||||
You've used 17 entries,
|
||||
You've used 19 entries,
|
||||
1935 wiz_defined-function locations,
|
||||
591 strings with 6859 characters,
|
||||
and the built_in function-call counts, 7014 in all, are:
|
||||
= -- 709
|
||||
> -- 264
|
||||
< -- 7
|
||||
+ -- 88
|
||||
- -- 86
|
||||
* -- 645
|
||||
:= -- 1213
|
||||
add.period$ -- 51
|
||||
call.type$ -- 17
|
||||
change.case$ -- 126
|
||||
chr.to.int$ -- 17
|
||||
cite$ -- 17
|
||||
duplicate$ -- 236
|
||||
empty$ -- 502
|
||||
format.name$ -- 112
|
||||
if$ -- 1348
|
||||
606 strings with 7343 characters,
|
||||
and the built_in function-call counts, 7914 in all, are:
|
||||
= -- 800
|
||||
> -- 300
|
||||
< -- 8
|
||||
+ -- 100
|
||||
- -- 98
|
||||
* -- 731
|
||||
:= -- 1365
|
||||
add.period$ -- 57
|
||||
call.type$ -- 19
|
||||
change.case$ -- 142
|
||||
chr.to.int$ -- 19
|
||||
cite$ -- 19
|
||||
duplicate$ -- 264
|
||||
empty$ -- 566
|
||||
format.name$ -- 128
|
||||
if$ -- 1522
|
||||
int.to.chr$ -- 1
|
||||
int.to.str$ -- 0
|
||||
missing$ -- 16
|
||||
newline$ -- 88
|
||||
num.names$ -- 51
|
||||
pop$ -- 87
|
||||
missing$ -- 18
|
||||
newline$ -- 98
|
||||
num.names$ -- 57
|
||||
pop$ -- 98
|
||||
preamble$ -- 1
|
||||
purify$ -- 127
|
||||
purify$ -- 143
|
||||
quote$ -- 0
|
||||
skip$ -- 178
|
||||
skip$ -- 199
|
||||
stack$ -- 0
|
||||
substring$ -- 620
|
||||
swap$ -- 17
|
||||
substring$ -- 706
|
||||
swap$ -- 19
|
||||
text.length$ -- 0
|
||||
text.prefix$ -- 0
|
||||
top$ -- 0
|
||||
type$ -- 102
|
||||
type$ -- 114
|
||||
warning$ -- 0
|
||||
while$ -- 65
|
||||
while$ -- 73
|
||||
width$ -- 0
|
||||
write$ -- 223
|
||||
write$ -- 249
|
||||
|
Binary file not shown.
@ -3,6 +3,7 @@
|
||||
\usepackage{graphicx}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{natbib}
|
||||
\usepackage{comment}
|
||||
\usepackage{xcolor}
|
||||
\usepackage[breaklinks=true,colorlinks=true,citecolor=blue!30!black,urlcolor=blue!30!black,linkcolor=blue!30!black]{hyperref}
|
||||
|
||||
@ -120,6 +121,7 @@ Außerdem erkläre ich, dass die eingereichte Arbeit weder vollständig noch in
|
||||
% Einleitung
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Introduction}
|
||||
\begin{comment}
|
||||
\begin{enumerate}
|
||||
\item sensory input important for all life etc.
|
||||
|
||||
@ -145,7 +147,9 @@ Außerdem erkläre ich, dass die eingereichte Arbeit weder vollständig noch in
|
||||
\end{enumerate}
|
||||
\newpage
|
||||
|
||||
The environment of an organism holds important information that it needs to survive. Information about predators to avoid, food to find and potential mates. That means that the ability to sense and process this information is of vital importance for any organism. At the same time the environment also contains a lot of information that is irrelevant to an organism. \cite{barlow1961possible} suggested already that the sensory systems of an organism should be specialized to extract the information it needs while filtering out the noise and irrelevant information, to efficiently use the limited coding capacity of the sensory systems.
|
||||
\end{comment}
|
||||
|
||||
The environment of an organism holds important information that it needs to survive. Information about predators to avoid, food to find and potential mates. The ability to sense and process this information is of vital importance for any organism. At the same time the environment also contains a lot of information that is irrelevant to an organism. \cite{barlow1961possible} suggested already that the sensory systems of an organism should be specialized to extract the information it needs while filtering out the noise and irrelevant information, to efficiently use the limited coding capacity of the sensory systems.
|
||||
|
||||
One interesting model system for questions adaptive signal processing is the electric fish \AptLepto (Brown ghost knife fish).
|
||||
\lepto generate a sinusoidal electric field with the electric organ in their tail enabling them to use active electroreception which they use to find prey and communicate with each other (\cite{maciver2001prey}, \cite{zupanc2006electric}). The different use cases of this electric organ discharge (EOD) come with the necessity to detect a wide range of different amplitude modulations (AMs). Electrolocation of object in the surrounding water like small prey or rocks cause small low frequency AMs \citep{babineau2007spatial}. At the same time other electric fish can cause stronger and higher frequency AMs through interference between the electric fields and their communication signals like chirps, short increases in their EOD frequency \citep{zupanc2006electric}. This means that the electroreceptors need to be able to encode a wide range of changes in EOD amplitude, in speed as well as strength.
|
||||
@ -153,24 +157,23 @@ The EOD and its AMs are encoded by electroreceptor organs in the skin. \lepto ha
|
||||
When the fish's EOD is unperturbed P units fire every few EOD periods but they have a certain variability in their firing (fig. \ref{fig:p_unit_example} B) and show negative correlation between successive interspike intervals (ISIs)(fig. \ref{fig:p_unit_example} C). When presented with a step increase in EOD amplitude P units show strong adaption behavior. After a strong increase in firing rate reacting to the onset of the step, the firing rate quickly decays back to a steady state (fig. \ref{fig:p_unit_example} D). When using different sizes of steps both the onset and the steady state response scale with its size and direction of the step (fig. \ref{fig:p_unit_example} E).
|
||||
|
||||
|
||||
%
|
||||
|
||||
\begin{figure}[H]
|
||||
{\caption{\label{fig:p_unit_example} Example behavior of a p-unit with a high baseline firing rate and an EODf of 744\,Hz. \textbf{A}: A 100\,ms voltage trace of the baseline recording with spikes marked by the black lines. \textbf{B}: The histogram of the ISI with the x-axis in EOD periods, showing the phase locking of the firing. \textbf{C}: The serial correlation of the ISI showing a negative correlation for lags one and two. \textbf{D}: The response of the p-unit to a step increase in EOD amplitude. In \todo{color} the averaged frequency over 10 trials. The P-unit strongly reacts to the onset of the stimulus but very quickly adapts to the new stimulus and then shows a steady state response. \textbf{E}: The f-I curve visualizes the onset and steady-state response of the neuron for different step sizes (contrasts). In \todo{color} the detected onset responses and the fitted Boltzmann, in \todo{color} the detected steady-state response and the linear fit.}}
|
||||
{\includegraphics[width=1\textwidth]{figures/p_unit_example.pdf}}
|
||||
{\caption{\label{fig:p_unit_example} Example behavior of a P-unit with a high baseline firing rate and an EOD frequency of 744\,Hz. \textbf{A}: A 100\,ms voltage trace of the baseline recording with spikes marked by the black strokes. \textbf{B}: ISI histogram showing the phase locking of the P-unit firing to the EOD period. \textbf{C}: The serial correlation of the ISIs showing the negative correlation at lag one of most P-units. \textbf{D}: The response of the p-unit to a step increase in EOD amplitude. In \todo{color} the averaged firing frequency (1/ISI) averaged over 10 trials. The P-unit strongly reacts to the onset of the stimulus but very quickly adapts to the new stimulus and then shows a reduced steady state response. \textbf{E}: The onset \todo{color} and steady-state \todo{color} f-I curves of the neuron display the dependence of both responses on the stimulus contrast. The lines are fits with a Boltzmann (eq.: \ref{eq:Boltzmann}) and a rectified line (\ref{eq:rectified_line}) for the onset and steady state f-I curve respectively.}}
|
||||
{\includegraphics{figures/p_unit_example.pdf}}
|
||||
\end{figure}
|
||||
\newpage
|
||||
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
{\caption{\label{fig:heterogeneity_isi_hist} Variability in spiking behavior between P units under baseline conditions. \textbf{A--C} 100\,ms of cell membrane voltage and \textbf{D--F} interspike interval histograms, each for three different cells. \textbf{A} and \textbf{D}: A non bursting cell with a baseline firing rate of 133\,Hz (EODf: 806\,Hz), \textbf{B} and \textbf{E}: A cell with some bursts and a baseline firing rate of 235\,Hz (EODf: 682\,Hz) and \textbf{C} and \textbf{F}: A strongly bursting cell with longer breaks between bursts. Baseline rate of 153\,Hz and EODf of 670\,Hz }}
|
||||
{\includegraphics[width=\textwidth]{figures/isi_hist_heterogeneity.pdf}}
|
||||
{\caption{\label{fig:heterogeneity_isi_hist} Variability in spiking behavior between P units under baseline conditions. \textbf{A--C} 100\,ms of cell membrane voltage and \textbf{D--F} interspike interval histograms, each for three different cells. \textbf{A} and \textbf{D}: A non bursting cell with a baseline firing rate of 133\,Hz (EODf: 806\,Hz), \textbf{B} and \textbf{E}: A cell with some bursts and a baseline firing rate of 235\,Hz (EODf: 682\,Hz) and \textbf{C} and \textbf{F}: A strongly bursting cell with longer pauses between bursts (baseline rate of 153\,Hz and EOD frequency of 670\,Hz). }}
|
||||
{\includegraphics{figures/isi_hist_heterogeneity.pdf}}
|
||||
\end{figure}
|
||||
|
||||
\todo{heterogeneity more, bursts important for coding in other systems}
|
||||
|
||||
Furthermore show P units a pronounced heterogeneity in their spiking behavior (fig.~\ref{fig:heterogeneity_isi_hist}, \cite{gussin2007limits}). This is an important aspect one needs to consider when trying to understand what and how information is encoded in the spike trains of the neuron. A single neuron might be an independent unit from all other neurons but through different tuning curves a full picture of the stimulus can be encoded in the population even when a single neuron only encodes a small feature space. This type of encoding is ubiquitous in the nervous system and is used in the visual sense for color vision, PLUS MORE... \todo{refs}. Even though P units were already modelled based on a simple leaky integrate-and-fire neuron \citep{chacron2001simple} and conductance based \citep{kashimori1996model} and well studied (\cite{bastian1981electrolocation}, \cite{ratnam2000nonrenewal} \cite{benda2005spike}). There is up to this point no model that tries to cover the full breadth of heterogeneity of the P unit population. Having such a model could help shed light into the population code used in the electric sense, allow researchers gain a better picture how higher brain areas might process the information and get one step closer to the full path between sensory input and behavioral output.
|
||||
Furthermore show P-units a pronounced heterogeneity in their spiking behavior (fig.~\ref{fig:heterogeneity_isi_hist}, \cite{gussin2007limits}). This is an important aspect one needs to consider when trying to understand what and how information is encoded in the spike trains of the neuron (\cite{padmanabhan2010intrinsic}, \cite{tripathy2013intermediate}). A single neuron might be an independent unit from all other neurons but through different tuning curves a full picture of the stimulus can be encoded in the population even when a single neuron only encodes a small feature space. This type of encoding is ubiquitous in the nervous system and is used in the visual sense for color vision, PLUS MORE... \todo{refs labeled line vs summation code}. Even though P units were already modeled based on a simple leaky integrate-and-fire neuron \citep{chacron2001simple} and conductance based \citep{kashimori1996model} and well studied (\cite{bastian1981electrolocation}, \cite{ratnam2000nonrenewal} \cite{benda2005spike}). Up to this point there is no model that tries to cover the full breadth of heterogeneity of the P unit population. Having such a model could help shed light into the population code used in the electric sense and allow researchers gain a better picture how higher brain areas might process the information and get one step closer to the full path between sensory input and behavioral output.
|
||||
\todo{viel wichtiger: das ist das perfekte modellsystem um die Kodierung in einer heterogenen population veon neuronen zu untersuchen. Relativ unabhaengig vom efish.}
|
||||
|
||||
|
||||
|
||||
@ -229,9 +232,9 @@ V_{Stim}(t) = EOD(t)(1 + AM(t))
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\floatbox[{\capbeside\thisfloatsetup{capbesideposition={left, center}, capbesidewidth=0.45\textwidth}}]{figure}[\FBwidth]
|
||||
\floatbox[{\capbeside\thisfloatsetup{capbesideposition={left, center}, capbesidewidth=0.33\textwidth}}]{figure}[\FBwidth]
|
||||
{\caption{\label{fig:stim_examples} Example of the stimulus construction. At the top a recording of the fish's EOD. In the middle: EOD recording multiplied with the AM, with a step between 0 and 50\,ms to a contrast of 30\,\% (marked in \todo{color}). At the bottom the resulting stimulus trace when the AM is added to the EOD. \todo{Umformulieren add figure labels A, B, C}}}
|
||||
{\includegraphics[width=0.45\textwidth]{figures/amGeneration.pdf}}
|
||||
{\includegraphics{figures/amGeneration.pdf}}
|
||||
\end{figure}
|
||||
|
||||
|
||||
@ -281,7 +284,7 @@ Finally the ISI-histogram was calculated within a range of 0--50\,ms and a bin s
|
||||
% trim={<left> <lower> <right> <upper>}
|
||||
%\parbox[c][0mm][t]{80mm}{\hspace{-10.5mm}\large\sffamily A\hspace{50.5mm} \large\sffamily B}
|
||||
%\raisebox{70mm}[10]{\large\sffamily A)}
|
||||
\includegraphics[trim={10mm 5mm 10mm 5mm}, scale=0.8]{figures/f_point_detection.pdf}
|
||||
\includegraphics[trim={10mm 5mm 10mm 5mm}]{figures/f_point_detection.pdf}
|
||||
|
||||
\caption{\label{fig:f_point_detection} \textbf{A}: The averaged response of a cell to a step in EOD amplitude. The step of the stimulus is marked by the back bar. The detected values for the onset ($f_0$) and steady-state ($f_{\infty}$) response are marked in \todo{color}. $f_0$ is detected as the highest deviation from the mean frequency before the stimulus while $f_{\infty}$ is the average frequency in the 0.1\,s time window, 25\,ms before the end of the stimulus. \textbf{B}: The fi-curve visualizes the onset and steady-state response of the neuron for different stimuli contrasts. In \todo{color} the detected onset responses and the fitted Boltzmann, in \todo{color} the detected steady-state response and the linear fit.}
|
||||
\end{figure}
|
||||
@ -293,10 +296,12 @@ The $f_{\infty}$ response was estimated as the average firing frequency in the 1
|
||||
Afterwards a Boltzmann:
|
||||
\begin{equation}
|
||||
f_{0}(I) = (f_{max}-f_{min}) (1 / (1 + e^{-k * (I - I_0)})) + f_{min}
|
||||
\label{eq:Boltzmann}
|
||||
\end{equation}
|
||||
was fitted to the onset response and a rectified line:
|
||||
\begin{equation}
|
||||
f_{\infty}(I) = \lfloor mI+c \rfloor_0
|
||||
\label{eq:rectified_line}
|
||||
\end{equation}
|
||||
(with $\lfloor x \rfloor_0$ the rectify operator) was fitted to the steady-state responses (fig.~\ref{fig:f_point_detection}~B).
|
||||
|
||||
@ -352,7 +357,7 @@ Finally, noise and an absolute refractory period were added to the model. The no
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/model_comparison.pdf}
|
||||
\includegraphics{figures/model_comparison.pdf}
|
||||
\caption{\label{fig:model_comparison} Comparison of different simple models normed to a spontaneous firing rate of ~10 Hz stimulated with a step stimulus. In the left column y-axis in mV in the right column the y-axis shows the frequency in Hz. PIF: Shows a continuously increasing membrane voltage with a fixed slope and as such constant frequency for a given stimulus strength. LIF: Approaches a stimulus dependent membrane voltage steady state exponentially Also has constant frequency for a fixed stimulus value. LIFAC: Exponentially approaches its new membrane voltage value but also shows adaption after changes in the stimulus the frequency takes some time to adapt and arrive at the new stable value. }
|
||||
% LIFAC + ref: Very similar to LIFAC the added absolute refractory period keeps the voltage constant for a short time after the spike and limits high fire rates. \todo{how to deal with the parameters}
|
||||
|
||||
@ -363,7 +368,7 @@ Together this results in the dynamics seen in equations \ref{eq:full_model_dynam
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/stimulus_development.pdf}
|
||||
\includegraphics{figures/stimulus_development.pdf}
|
||||
\caption{\label{fig:stim_development} The stimulus modification in the model. The fish's EOD is simulated with a sin wave. It is rectified at the synapse and then low-pass filtered in the dendrite.}
|
||||
\end{figure}
|
||||
|
||||
@ -387,8 +392,6 @@ Together this results in the dynamics seen in equations \ref{eq:full_model_dynam
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
\subsection{Fitting of the Model}
|
||||
%überleitung!
|
||||
The full model has, as described above, eight parameters that need to be fitted so it can reproduce the behavior of the cell. During the fitting and the analysis all models were integrated with at time step of 0.05\,ms.
|
||||
@ -446,104 +449,111 @@ All errors were then summed up for the full error. The fits were done with the N
|
||||
\section{Results}
|
||||
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{figures/dend_ref_effect.pdf}
|
||||
\caption{\label{fig:dend_ref_effect} Effect of the addition of $\tau_{dend}$ and $t_{ref}$ to the model. Rows \textbf{1--3} different cells: not bursting, bursting or strongly bursting in \todo{color} the cell and in \todo{color} the model. The fits in each column (\textbf{A--C}) were done with different parameters to show their effect on the model. Column \textbf{A}: The cells were fit without $\tau_{dend}$. This causes the model to be unable to fit the vector strength correctly and the models are too strongly locked to the EOD phase. \textbf{B}: The models were fit without $t_{ref}$, because of that the model cannot match the burstiness of the cell. Visible in the missing high peak at the first EOD period. In column \textbf{C} the model all parameters. It can match the full spiking behavior of the cells for the different strengths of bursting.}
|
||||
\includegraphics[trim={5mm 0mm 0mm 0mm}]{figures/dend_ref_effect.pdf}
|
||||
\caption{\label{fig:dend_ref_effect} Effect of the dendritic filter and the refractory period on baseline firing. In each row data (blue) and model fits (orange) to three example cells are shown that differ in their burstiness as indicated on the left. \todo{Top: cell 2012-xxx, r=z Hz, b=y; center: cell 2014-xxx, r= b=...; bottom ...}
|
||||
A: Without dendritic filter ($\tau_{dend}$) the spikes are too strongly locked to the EOD, resulting in very high vector strength and too narrow peaks in the baseline ISIH. B: Without refactory period ($t_{ref}$) the model cannot capture burstiness. While this is no problem for the non-bursting cell (top), the peak in the ISIH at one EOD period cannot be reproduced without refacory period. C: with both the dendritic filter and the refracory period ISIhs can be faithfully reproduced for all three cells.
|
||||
%Effect of the addition of $\tau_{dend}$ and $t_{ref}$ to the model. Rows \textbf{1--3} different cells: not bursting, bursting or strongly bursting in \todo{color} the cell and in \todo{color} the model. The fits in each column (\textbf{A--C}) were done with different parameters to show their effect on the model. Column \textbf{A}: The cells were fit without $\tau_{dend}$. This causes the model to be unable to fit the vector strength correctly and the models are too strongly locked to the EOD phase. \textbf{B}: The models were fit without $t_{ref}$, because of that the model cannot match the burstiness of the cell. Visible in the missing high peak at the first EOD period. In column \textbf{C} the model all parameters. It can match the full spiking behavior of the cells for the different strengths of bursting.
|
||||
}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/example_bad_isi_hist_fits.pdf}
|
||||
\caption{\label{fig:example_bad_isi_fits} \todo{Add pointer arrows in plot?} Problem cases in which the model ISI histogram wasn't fit correctly to the cell. \textbf{A--C} ISI histograms of different cells (\todo{color}) and their corresponding model (\todo{color}). \textbf{A}: Strongly bursting cell with large pauses between bursts, where the Model doesn't manage to reproduce the long pauses. \textbf{B}: Bursting cell with a high probability of firing in the first and second following EOD period. Here the model can't reproduce the high probability on the second following EOD period. \textbf{C}: Cell with a higher order structure \todo{??} in its ISI histogram. It only has a high firing probability every second EOD period which is also not represented in the model.}
|
||||
\includegraphics{figures/example_bad_isi_hist_fits.pdf}
|
||||
\caption{\label{fig:example_bad_isi_fits} \todo{Add pointer arrows in plot?} Problem cases in which the model ISI histogram wasn't fit correctly to the cell. ISI histograms of different cells (\todo{color}) and their corresponding model (\todo{color}). \textbf{A}: Strongly bursting cell with large pauses between bursts, where the model doesn't manage to reproduce the long pauses. \textbf{B}: Bursting cell with a high probability of firing in the first and second following EOD period. Here the model can't reproduce the high probability on the second following EOD period. \textbf{C}: Cell with a higher order structure \todo{??} in its ISI histogram. It only has a high firing probability every second EOD period which is also not represented in the model.}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/example_good_fi_fits.pdf}
|
||||
\includegraphics{figures/example_good_fi_fits.pdf}
|
||||
\caption{\label{fig:example_good_fi_fits} Good fit examples of the f-I curve. \textbf{A--C}: Three cells with different response patterns which are all well matched by their models. \todo{Color explanation} }
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/example_bad_fi_fits.pdf}
|
||||
\includegraphics{figures/example_bad_fi_fits.pdf}
|
||||
\caption{\label{fig:example_bad_fi_fits} Examples of bad fits of the f-I curve. \textbf{A--C}: Different cells. \todo{Color explanation}. \textbf{A}: model that did not fit the negative contrast responses of the $f_0$ response well but was successful in the positive half. It also was not successful in the $f_\infty$ response and shows a wrong slope. \textbf{B}: A fit that was successful for the lower $f_0$ response but overshoots the limit of the cell and fires too fast for high positive contrasts. It also has a slightly wrong $f_\infty$ response slope.}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.5]{figures/fit_baseline_comparison.pdf}
|
||||
|
||||
\makebox[\textwidth][c]{\includegraphics{figures/fit_baseline_comparison.pdf}}
|
||||
\caption{\label{fig:comp_baseline} Comparison of the cell behavior and the behavior of the corresponding fit: \textbf{A} baseline firing rate, \textbf{B} vector strength (VS) and \textbf{C} serial correlation (SC). The histograms compare the distributions of the cell (\todo{color}) and the model (\todo{color}). Below \todo{what is this plot called} In grey the line on which cell and model values are equal. \textbf{A}: The baseline firing rate of the cell and the model. The base rate agrees near perfectly as it is set to be equal within a margin of 2\,Hz during the fitting process. \textbf{B}: The vector strength agrees well for most cells but if the cells have a vector strength above 0.8 the probability increases for the models to show often a weaker VS than the cell. \textbf{C}: Comparison of the SC with lag 1. Here the models cluster more strongly and don't show quite the same range like the cells do. Models of cells with a strongly negative SC often have a weaker negative SC while the models in the opposite case show too strong negative correlations. In general is the fitting of the SC a lot more variable than the precise fitting of the VS.}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.5]{figures/fit_burstiness_comparison.pdf}
|
||||
\includegraphics{figures/fit_burstiness_comparison.pdf}
|
||||
\caption{\label{fig:comp_burstiness} Comparison of the cell behavior and the behavior of the corresponding fit: \textbf{A} burstiness, \textbf{B} coefficient of variation (CV). \textbf{A}: The model values for the burstiness agree well with the values of the model but again show a tendency that the higher the value of the cell the more the model value is below it. \textbf{B}: The CV also shows the problem of the burstiness but the values drift apart more slowly starting around 0.6.}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.5]{figures/fit_adaption_comparison.pdf}
|
||||
\includegraphics{figures/fit_adaption_comparison.pdf}
|
||||
\caption{\label{fig:comp_adaption} Comparison of the cell behavior and the behavior of the corresponding fit: \textbf{A} steady state $f_\infty$ and \textbf{B} onset $f_0$ response slope. In grey the line on which cell and model values are equal. \todo{how many} value pairs from \textbf{B} lie outside of the shown area. They had slopes between \todo{}. In \textbf{A} the $f_\infty$ slope pairs. Cell and models show good agreement with a low scattering in both direction. \textbf{B} The $f_0$ values show a higher spread and for steeper slopes the models have more often too flat slopes.}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/behaviour_correlations.pdf}
|
||||
\includegraphics{figures/behaviour_correlations.pdf}
|
||||
\caption{\label{fig:behavior_correlations} Significant correlations between the behavior variables in the data and the fitted models $p < 0.05$ (Bonferroni corrected). The models contain all the same correlations as the data except for the correlation between the baseline firing rate and the VS, but they also show four additional correlations not seen within the cells: bursting - base rate, SC - $f_\infty$ slope, $f_0$ slope - base rate, SC - base rate.}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/parameter_distributions.pdf}
|
||||
\includegraphics{figures/parameter_distributions.pdf}
|
||||
\caption{\label{fig:parameter_distributions} Distributions of all eight model parameters. \textbf{A}: input scaling $\alpha$, \textbf{B}: Bias current $I_{Bias}$, \textbf{C}: membrane time constant $\tau_m$, \textbf{D}: noise strength $\sqrt{2D}$, \textbf{E}: adaption time constant $\tau_A$, \textbf{F}: adaption strength $\Delta_A$, \textbf{G}: time constant of the dendritic low pass filter $\tau_{dend}$, \textbf{H}: refractory period $t_{ref}$}
|
||||
\end{figure}
|
||||
|
||||
\todo{image with rescaled time parameters to 800\,Hz, add to above figure?}
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/parameter_correlations.pdf}
|
||||
\includegraphics{figures/parameter_correlations.pdf}
|
||||
\caption{\label{fig:parameter_correlations} Significant correlations between model parameters $p < 0.05$ (Bonferroni corrected).}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/parameter_distribution_with_gauss_fits.pdf}
|
||||
\includegraphics{figures/parameter_distribution_with_gauss_fits.pdf}
|
||||
\caption{\label{fig:parameter_dist_with_gauss_fits} Gauss fits used as approximations for the parameter distribution. In black the Gaussian fit used. All parameters except for $t_{ref}$ and $I_{Bias}$ were log transformed to get a more Gaussian distribution. \textbf{A}: Log input scaling $\alpha$, \textbf{B}: bias current $I_{Bias}$, \textbf{C}: Log membrane time constant $\tau_m$, \textbf{D}: Log noise strength $\sqrt{2D}$, \textbf{E}: Log adaption time constant $\tau_A$, \textbf{F}: Log adaption strength $\Delta_A$, \textbf{G}: Log time constant of the dendritic low pass filter $\tau_{dend}$, \textbf{H}: refractory period $t_{ref}$}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/compare_parameter_dist_random_models.pdf}
|
||||
\includegraphics{figures/compare_parameter_dist_random_models.pdf}
|
||||
\caption{\label{fig:drawn_parameter_dist} Parameter distribution between randomly drawn models \todo{color}orange and the fitted ones blue\todo{color}. \textbf{A}: input scaling $\alpha$, \textbf{B}: Bias current $I_{Bias}$, \textbf{C}: membrane time constant $\tau_m$, \textbf{D}: noise strength $\sqrt{2D}$, \textbf{E}: adaption time constant $\tau_A$, \textbf{F}: adaption strength $\Delta_A$, \textbf{G}: time constant of the dendritic low pass filter $\tau_{dend}$, \textbf{H}: refractory period $t_{ref}$}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/rand_parameter_correlations_comparison.pdf}
|
||||
\includegraphics{figures/rand_parameter_correlations_comparison.pdf}
|
||||
\caption{\label{fig:drawn_parameter_corr} Parameter correlation comparison between the fitted parameters and the ones drawn from the multivariant normal distribution. There are four correlations that do not agree between the two, but those are inconsistent in the drawn models (see discussion).}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\includegraphics[scale=0.6]{figures/random_models_behaviour_dist.pdf}
|
||||
\includegraphics{figures/random_models_behaviour_dist.pdf}
|
||||
\caption{\label{fig:drawn_behavior_dist} Behavior distribution of the randomly drawn models \todo{color}(orange) and the original cells \todo{color}(blue). The distribution of the seven behavior characteristics agree well for the most part, but especially the vector strength (VS) in \textbf{G} is offset to the distribution seen in the cells.}
|
||||
\end{figure}
|
||||
|
||||
\newpage
|
||||
\section{Discussion}
|
||||
|
||||
In this thesis a simple model based on the leaky integrate-and-fire (LIF) model was built to allow the simulation of a neuron population correctly representing the heterogeneity of P-units in the electrosensory pathway of the electric fish \textit{A. leptorhynchus}. The LIF model was extended by an adaption current, a refractory period and simulated the input synapses by rectifying and low pass filtering the input current, building on the model proposed by \cite{walz2013Phd}. This model was then fit to single in vivo recordings of P-units characterized by seven behavior parameters and the resulting models compared to the reference cell. Additionally estimations of the model parameter distributions and their covariances were used to draw random parameter sets and the generated population of P-units compared to the data set.
|
||||
In this thesis a simple model based on the leaky integrate-and-fire (LIF) model was developed to allow the simulation of a neuron population that correctly represents the heterogeneity of P-units in the electrosensory pathway of the electric fish \textit{A. leptorhynchus}. The LIF model was extended by an adaption current, a refractory period and simulated the input synapses by rectifying and low pass filtering the input current, building on the model proposed by \cite{walz2013Phd}. This model was then fit to in vivo recordings of single P-units characterized by seven firing properties and the resulting models were compared to their respective reference cell. Additionally estimates of the distributions and covariances of the model parameters were used to draw random parameter sets. Simulations of these generated populations were compared with the data.
|
||||
|
||||
\todo{comparison to the other already existing models}
|
||||
|
||||
It was shown in figure \ref{fig:dend_ref_effect} that the expansion of the model by the dendtritic low pass filter and the refractory period was necessary for the model to match the firing behavior of the P-units. The effect that without the low pass filter the model is not able to match the VS and locks to strongly to the EOD was confirmed from \cite{walz2013Phd}. While the new addition of the refractory period $t_{ref}$ is necessary for the model to deviate from the Gaussian firing probability and show bursting behavior.
|
||||
The dendtritic low pass filter and the refractory period were necessary for the model to match the firing behavior of the P-units (fig. \ref{fig:dend_ref_effect}). As \cite{walz2013Phd} demonstrated a model without the low pass filter is not able to match the VS and locks too strongly to the EOD. A refractory period $t_{ref}$ is necessary for the model to deviate from the Gaussian firing probability \todo{explain what that is} and show bursting behavior and is flexible enough to match different strengths of burstiness.
|
||||
|
||||
With these additions behavior of the cells was generally matched well by the models with very similar final distributions but there were some limitations. For cells with a high burstiness or a high coefficient of variation the models could not fully match the cells. This may be caused by cells as seen in figure \ref{fig:example_bad_isi_fits} \textbf{A} and \textbf{B}. These cells high values In both burstiness and CV. In the case of fig. \ref{fig:example_bad_isi_fits} \textbf{A} the model can show this type firing behavior but it seems difficult to reach the parameter configuration needed with the fitting approach used here. In contrast to that the firing behavior of the cells in fig. \ref{fig:example_bad_isi_fits} \textbf{B} and \textbf{C} are not possible for the model in its current form. The addition of the refractory period $t_{ref}$ does not also allow for an increased firing probability at the 2nd EOD period and the cell \textbf{C} shows a higher order structure in its ISI histogram on a comparatively long timescale which this simple model cannot reproduce. % These kind of cells showing higher order structure in their ISI histogram are rare but might provide interesting insights in the physiological properties of P-units when further studied.
|
||||
With these additions the behavior of the cells was generally matched well by the models with very similar final distributions of the firing properties but there were some limitations. The model failed to reproduce cells with a very high burstiness (long bursts with long pauses between) or a high coefficient of variation could not fully be matched by the model \todo{burst or CV not really correct because of the correlation!}. As seen in figure \ref{fig:comp_burstiness}. The example of fig. \ref{fig:example_bad_isi_fits} \textbf{A} is a case where the model can show this type firing behavior (long bursts and pauses) but it seems difficult to reach the parameter configuration needed with the fitting approach used. In contrast to that the firing behavior of the cells in fig. \ref{fig:example_bad_isi_fits} \textbf{B} and \textbf{C} are not possible for the model in its current form. The addition of the refractory period $t_{ref}$ does not also allow for an increased firing probability at the 2nd EOD period and the cell \textbf{C} shows a higher order structure in its ISI histogram on a comparatively long timescale which the proposed simple model cannot reproduce. These kind of cells showing higher order structure in their ISI histogram are rare but might provide interesting insights in the physiological properties of P-units when further studied.
|
||||
|
||||
Two firing properties had a high spread in the fitted models. In the serial correlation the models had a tendency to underestimate the cells SC. The second property was the slope of the $f_0$ response. Here one possible source is that the fitted Boltzmann function and its slope are quite sensitive to mis detections so especially for steep slopes a change in the detected frequency for a contrast can strongly influence the slope of the Boltzmann function. Also unlike the baseline firing properties there don't seem to be cases in which the model cannot fit the cell the cases shown in figure \ref{fig:example_bad_fi_fits} are both generally possible so improvements in the cost function and fitting routine should also further improve the model consistency for the adaption responses.
|
||||
Two firing properties had a high spread in the fitted models. In the serial correlation the models had some tendency to underestimate the cell's SC. The second property was the slope of the $f_0$ response. Here one possible source is that the fitted Boltzmann function and its slope are quite sensitive to miss-detections of spikes. A wrong estimate of the firing frequency for a single contrast can strongly influence the slope of the fitted Boltzmann function. Unlike the baseline firing properties there don't seem to be cases in which the model cannot fit the f-I curves. The problematic cases shown in figure \ref{fig:example_bad_fi_fits} are both generally possible (fig. \ref{fig:example_good_fi_fits}) so improvements in the cost function and fitting routine should also further improve the model consistency for the adaption responses.
|
||||
|
||||
Comparing the correlation between the firing properties of the data and the models showed clear discrepancies (fig. \ref{fig:behavior_correlations}) with four additional and one missing significant correlation. The added correlation between bursts and baseline firing rate could be a result of the slightly stronger correlations of CV-base rate and bursts-CV but it may also be caused by the problems of fitting strongly bursting cells with a long pause between bursts that would have a lower firing rate even with a high burstiness. The correlation between $f_0$ slope-base rate might also be chain correlation caused by a slight increase in the correlations between $f_\infty$ slope-base rate and $f_\infty$ slope-$f_0$ slope. The other two added correlations are between the SC and the base rate as well as the $f_\infty$ slope, where the first may again cause the second because of the $f_\infty$-base rate correlation.
|
||||
Comparing the correlation between the firing properties of the data and the models showed clear discrepancies (fig. \ref{fig:behavior_correlations}) with four additional and one missing significant correlation. The added correlation between bursts and baseline firing rate could be a result of the slightly stronger correlations between CV and base rate and between bursts and CV. The difficulties of the model to fit strongly bursting cells with a long pause between bursts could also have introduced this correlation as these cells would show high burstiness and a low firing rate. The correlation between $f_0$ slope and base rate might also be caused by a slight increase in the correlations between $f_\infty$ slope and base rate and $f_\infty$ slope and $f_0$ slope. The other two added correlations are between the SC and the base rate as well as the $f_\infty$ slope, where the former may again caused the latter because of the correlation between $f_\infty$ and base rate.
|
||||
Finally the one missing correlation in the models is the one between base rate and VS, which is an unexpected correlation.
|
||||
This was also looked at in \cite{walz2013Phd} but only 23 cells were used and they were exclusively non bursting cells which makes a direct comparison difficult. The data showed the SC-base rate correlation which is shown by the models in this work which might indicate that the highest bursting cells that are not fitted well, "remove" this correlation from the population in the data or that there is not enough data to robustly define the correlation. The data there also only showed four correlations in total: SC-base rate, $f_\infty$ slope-CV, $f_0$ slope-CV and $f_\infty$ slope -$f_0$ slope.
|
||||
This was also looked at in \cite{walz2013Phd} but only 23 cells were used and they were exclusively non bursting cells which makes a direct comparison difficult. The data there showed the correlation between SC and base rate which is shown by the models in this work. This might indicate that the highest bursting cells that are not fitted well, "remove" this correlation from the population in the data or that there is not enough data to robustly define the correlation.
|
||||
|
||||
The parameters of the fitted models also showed extensive correlations between each other. This is an indication of strong compensation effects between them \citep{olypher2007using}. Especially for the input gain $\alpha$ and the bias current $I_{Bias}$ that have a nearly perfect correlation and together control the models baseline firing rate. Notably is also that the refractory period $t_{ref}$ is the only completely independent variable. This might show a certain independence between the strength of the burstiness and the other firing characteristics, which could be more closely investigated by looking at the sensitivity of models firing properties to changes in $t_{ref}$.
|
||||
The parameters of the fitted models also showed extensive correlations between each other. This is an indication of strong compensation effects between them \citep{olypher2007using}. Which is especially clear for the input gain $\alpha$ and the bias current $I_{Bias}$ that have a nearly perfect correlation and control the models baseline firing rate together. Note that the refractory period $t_{ref}$ is the only completely independent variable. This might show a certain independence between the strength of the burstiness and the other firing characteristics, which could be more closely investigated by looking at the sensitivity of models firing properties to changes in $t_{ref}$.
|
||||
|
||||
|
||||
The correlations and the estimated parameter distributions were used form of their covariances to draw random parameter set from a multivariante normal distribution. The drawn parameters show the expected distributions but also show slightly different correlations. That could mean that the \todo{number} models used to calculate them were to few to give enough statistical power to the correct estimation of all correlations. Drawing more models and compensating for the increase in power showed that the involved correlations stay inconsistent, which points to an uncertainty already in the covariance matrix. This could also be further investigated with a robustness analysis, so the reliability of the calculated covariances can be estimated.
|
||||
The correlations and the estimated parameter distributions were used form of their covariances to draw random parameter sets from a multivariante normal distribution. The drawn parameters show the expected distributions but different correlations. That could mean that the \todo{number} models used to calculate them were to few to give enough statistical power for the correct estimation of all correlations. Drawing more models and compensating for the increase in power showed that the involved correlations stay inconsistent, which points to an uncertainty already in the measured covariance matrix of the data. This could be further investigated with a robustness analysis estimating the reliability of the computed covariances.
|
||||
|
||||
The firing behavior shown by the drawn models on the other hand fits the ones of the data quite well except for the VS, where it is consistently underestimating the VS of the data.
|
||||
|
||||
@ -551,11 +561,16 @@ The firing behavior shown by the drawn models on the other hand fits the ones of
|
||||
% \cite{kashimori1996model} introduced a conductances based model but the estimation of variables is difficult because of the little experimental data that is available.
|
||||
%In \cite{chacron2001simple} two P-units were modeled considering only the baseline behaviour with one bursty cell and one regularly firing one as representatives.
|
||||
|
||||
In general the model is the first that takes the burstiness as a continuum into account and seems to be able to accurately describe the firing behavior in a large part of the behavior space of the P-units. But further testing is required to get a clearer picture where and why discrepancies exist. In this work it wasn't possible to verify the models with a different type of stimulus. For this a stimulus with random or sinusoidal amplitude modulations could be used. The correlations also need further investigation a first step could be a robustness test to see if there are correlations that are not well characterized in both the cells and the models.
|
||||
In general the model is the first that takes the burstiness as a continuum into account and seems to be able to accurately describe the firing behavior in a large part of the behavior space of the P-units. But further testing is required to get a clearer picture where and why discrepancies exist. An important next step is the verification of the models with a different type of stimulus. For this a stimulus with random or sinusoidal amplitude modulations could be used. The correlations also need further investigation. As a first step a robustness test could be done to estimate if there are correlations that are not well characterized in both the cells and the models.
|
||||
|
||||
\todo{Doesn't cover long timescales described in Gussin 2007??}
|
||||
\todo{Why do we want such models - analysis of coding in heterogeneic neuron populations, possibility to "measure" responses from whole population at the same time to a single stimulus, separating the different types and analyzing their specific coding properties - finding out why the heterogeneity is necessary!}
|
||||
|
||||
\todo{comparison to existing models Chacron, Waltz, Kashimori what does this model add which the others "missed" don't deliver on.}
|
||||
|
||||
|
||||
\newpage
|
||||
\begin{comment}
|
||||
\subsection*{Fitting quality}
|
||||
|
||||
\begin{itemize}
|
||||
@ -584,7 +599,7 @@ In general the model is the first that takes the burstiness as a continuum into
|
||||
\item Parameter correlations have 4--5 correlations whose significance is strongly inconsistent/random even when using 1000 drawn models (while compensating for higher power): thus acceptable result??
|
||||
\item behavior distribution not perfect by any means but quite alright except for the VS. Which definitely needs improvement! Maybe possible with more tweaking of the gauss fits.
|
||||
\end{itemize}
|
||||
|
||||
\end{comment}
|
||||
\newpage
|
||||
\bibliography{citations}
|
||||
\bibliographystyle{apalike}
|
||||
|
@ -284,6 +284,29 @@
|
||||
publisher={American Physiological Society}
|
||||
}
|
||||
|
||||
@article{padmanabhan2010intrinsic,
|
||||
title={Intrinsic biophysical diversity decorrelates neuronal firing while increasing information content},
|
||||
author={Padmanabhan, Krishnan and Urban, Nathaniel N},
|
||||
journal={Nature neuroscience},
|
||||
volume={13},
|
||||
number={10},
|
||||
pages={1276--1282},
|
||||
year={2010},
|
||||
publisher={Nature Publishing Group}
|
||||
}
|
||||
|
||||
@article{tripathy2013intermediate,
|
||||
title={Intermediate intrinsic diversity enhances neural population coding},
|
||||
author={Tripathy, Shreejoy J and Padmanabhan, Krishnan and Gerkin, Richard C and Urban, Nathaniel N},
|
||||
journal={Proceedings of the National Academy of Sciences},
|
||||
volume={110},
|
||||
number={20},
|
||||
pages={8248--8253},
|
||||
year={2013},
|
||||
publisher={National Acad Sciences}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
thesis/sources/Tripathy2013IntrinsicDiversityEnhancesCoding.pdf
Normal file
BIN
thesis/sources/Tripathy2013IntrinsicDiversityEnhancesCoding.pdf
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user