one with everything

This commit is contained in:
a.ott 2020-07-04 11:28:33 +02:00
parent 189293ff0e
commit ce03ab68c5
34 changed files with 2102 additions and 260 deletions

7
.gitignore vendored
View File

@ -1,8 +1,13 @@
*.dat *.dat
/data/ /data/
/invivo_data/
/invivo_results/
/rejected_cells/
/figures/ /figures/
/cells/
/rejected_cells/
/venv/ /venv/
__pycache__/ __pycache__/
.idea/ .idea/
/results/ /results/
/test_routines/* /test_routines/

View File

@ -53,11 +53,11 @@ class Adaption:
end_idx = start_idx + int(used_length_of_fit/sampling_interval) end_idx = start_idx + int(used_length_of_fit/sampling_interval)
y_values = mean_frequencies[i][start_idx:end_idx+1] y_values = mean_frequencies[i][start_idx:end_idx+1]
x_values = time_axes[i][start_idx:end_idx+1] x_values = time_axes[i][start_idx:end_idx+1]
# plt.title("f_zero {:.2f}, f_inf {:.2f}".format(f_zeros[i], f_infinities[i])) plt.title("f_zero {:.2f}, f_inf {:.2f}".format(f_zeros[i], f_infinities[i]))
# plt.plot(time_axes[i], mean_frequencies[i]) plt.plot(time_axes[i], mean_frequencies[i])
# plt.plot(x_values, y_values) plt.plot(x_values, y_values)
# plt.show() plt.show()
# plt.close() plt.close()
tau = self.__approximate_tau_for_exponential_fit(x_values, y_values, i) tau = self.__approximate_tau_for_exponential_fit(x_values, y_values, i)
@ -102,8 +102,18 @@ class Adaption:
def __find_start_idx_for_exponential_fit(self, time, frequency, f_base, f_inf, f_zero): def __find_start_idx_for_exponential_fit(self, time, frequency, f_base, f_inf, f_zero):
# plt.plot(time, frequency)
# plt.plot((time[0], time[-1]), (f_base, f_base), "-.")
# plt.plot((time[0], time[-1]), (f_inf, f_inf), "-")
# plt.plot((time[0], time[-1]), (f_zero, f_zero))
stimulus_start_idx = int((self.fi_curve.get_stimulus_start() - time[0]) / self.fi_curve.get_sampling_interval()) stimulus_start_idx = int((self.fi_curve.get_stimulus_start() - time[0]) / self.fi_curve.get_sampling_interval())
# plt.plot((time[stimulus_start_idx], ), (0, ), 'o')
#
# plt.show()
# plt.close()
if f_inf > f_base * 1.1: if f_inf > f_base * 1.1:
# start setting starting variables for the fit # start setting starting variables for the fit
# search for the start_index by searching for the max # search for the start_index by searching for the max

View File

@ -5,15 +5,19 @@ from stimuli.SinusoidalStepStimulus import SinusoidalStepStimulus
import helperFunctions as hF import helperFunctions as hF
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pickle
from os.path import join, exists
class Baseline: class Baseline:
def __init__(self): def __init__(self):
self.save_file_name = "baseline_values.pkl"
self.baseline_frequency = -1 self.baseline_frequency = -1
self.serial_correlation = [] self.serial_correlation = []
self.vector_strength = -1 self.vector_strength = -1
self.coefficient_of_variation = -1 self.coefficient_of_variation = -1
self.burstiness = -1
def get_baseline_frequency(self): def get_baseline_frequency(self):
raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS") raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
@ -32,7 +36,8 @@ class Baseline:
def __get_burstiness__(self, eod_freq): def __get_burstiness__(self, eod_freq):
isis = np.array(self.get_interspike_intervals()) isis = np.array(self.get_interspike_intervals())
if len(isis) == 0:
return 0
bursts = isis[isis < 1.5 * (1.0/eod_freq)] bursts = isis[isis < 1.5 * (1.0/eod_freq)]
return len(bursts) / float(len(isis)) return len(bursts) / float(len(isis))
@ -57,12 +62,14 @@ class Baseline:
@staticmethod @staticmethod
def _get_serial_correlation_given_data(max_lag, spikestimes): def _get_serial_correlation_given_data(max_lag, spikestimes):
serial_cors = [] serial_cors = []
for st in spikestimes: for st in spikestimes:
sc = hF.calculate_serial_correlation(st, max_lag) sc = hF.calculate_serial_correlation(st, max_lag)
serial_cors.append(sc) serial_cors.append(sc)
serial_cors = np.array(serial_cors) serial_cors = np.array(serial_cors)
return np.mean(serial_cors, axis=0) res = np.mean(serial_cors, axis=0)
return res
@staticmethod @staticmethod
def _get_vector_strength_given_data(times, eods, spiketimes, sampling_interval): def _get_vector_strength_given_data(times, eods, spiketimes, sampling_interval):
@ -131,6 +138,26 @@ class Baseline:
plt.close() plt.close()
@staticmethod
def plot_isi_histogram_comparision(cell_isis, model_isis, save_path=None):
cell_isis = np.array(cell_isis) * 1000
model_isis = np.array(model_isis) * 1000
maximum = max(max(cell_isis), max(model_isis))
bins = np.arange(0, maximum * 1.01, 0.1)
plt.title('Baseline ISIs')
plt.xlabel('ISI in ms')
plt.ylabel('Count')
plt.hist(cell_isis, bins=bins, label="cell", alpha=0.5, density=True)
plt.hist(model_isis, bins=bins, label="model", alpha=0.5, density=True)
plt.legend()
if save_path is not None:
plt.savefig(save_path + "isi-histogram_comparision.png")
else:
plt.show()
plt.close()
def plot_polar_vector_strength(self, save_path=None): def plot_polar_vector_strength(self, save_path=None):
phases = self.get_spiketime_phases() phases = self.get_spiketime_phases()
fig = plt.figure() fig = plt.figure()
@ -142,14 +169,29 @@ class Baseline:
ax.hist(phases, bins=bins) ax.hist(phases, bins=bins)
if save_path is not None: if save_path is not None:
plt.savefig(save_path + "isi-histogram.png") plt.savefig(save_path + "vector_strength_polar_plot.png")
else: else:
plt.show() plt.show()
plt.close() plt.close()
def plot_interspike_interval_histogram(self, save_path=None): def plot_interspike_interval_histogram(self, save_path=None):
isi = np.array(self.get_interspike_intervals()) * 1000 # change unit to milliseconds isi = np.array(self.get_interspike_intervals()) * 1000 # change unit to milliseconds
if len(isi) == 0:
print("NON SPIKES IN BASELINE OF CELL/MODEL")
plt.title('Baseline ISIs - NO SPIKES!')
plt.xlabel('ISI in ms')
plt.ylabel('Count')
plt.hist(isi, bins=np.arange(0, 1, 0.1))
if save_path is not None:
plt.savefig(save_path + "isi-histogram.png")
else:
plt.show()
plt.close()
return
maximum = max(isi) maximum = max(isi)
bins = np.arange(0, maximum * 1.01, 0.1) bins = np.arange(0, maximum * 1.01, 0.1)
@ -179,6 +221,34 @@ class Baseline:
plt.close() plt.close()
def save_values(self, save_directory):
values = {}
values["baseline_frequency"] = self.get_baseline_frequency()
values["serial correlation"] = self.get_serial_correlation(max_lag=10)
values["vector strength"] = self.get_vector_strength()
values["coefficient of variation"] = self.get_coefficient_of_variation()
values["burstiness"] = self.get_burstiness()
with open(join(save_directory, self.save_file_name), "wb") as file:
pickle.dump(values, file)
print("Baseline: Values saved!")
def load_values(self, save_directory):
file_path = join(save_directory, self.save_file_name)
if not exists(file_path):
print("Baseline: No file to load")
return False
file = open(file_path, "rb")
values = pickle.load(file)
self.baseline_frequency = values["baseline_frequency"]
self.serial_correlation = values["serial correlation"]
self.vector_strength = values["vector strength"]
self.coefficient_of_variation = values["coefficient of variation"]
self.burstiness = values["burstiness"]
print("Baseline: Values loaded!")
return True
class BaselineCellData(Baseline): class BaselineCellData(Baseline):
@ -203,9 +273,9 @@ class BaselineCellData(Baseline):
return self.vector_strength return self.vector_strength
def get_serial_correlation(self, max_lag): def get_serial_correlation(self, max_lag):
if len(self.serial_correlation) != max_lag: if len(self.serial_correlation) < max_lag:
self.serial_correlation = self._get_serial_correlation_given_data(max_lag, self.data.get_base_spikes()) self.serial_correlation = self._get_serial_correlation_given_data(max_lag, self.data.get_base_spikes())
return self.serial_correlation return self.serial_correlation[:max_lag]
def get_coefficient_of_variation(self): def get_coefficient_of_variation(self):
if self.coefficient_of_variation == -1: if self.coefficient_of_variation == -1:
@ -232,7 +302,9 @@ class BaselineCellData(Baseline):
return phase_list return phase_list
def get_burstiness(self): def get_burstiness(self):
return self.__get_burstiness__(self.data.get_eod_frequency()) if self.burstiness == -1:
self.burstiness = self.__get_burstiness__(self.data.get_eod_frequency())
return self.burstiness
def plot_baseline(self, save_path=None, position=0.5, time_length=0.2): def plot_baseline(self, save_path=None, position=0.5, time_length=0.2):
# eod, v1, spiketimes, frequency # eod, v1, spiketimes, frequency
@ -293,7 +365,9 @@ class BaselineModel(Baseline):
return self._get_interspike_intervals_given_data(self.spiketimes) return self._get_interspike_intervals_given_data(self.spiketimes)
def get_burstiness(self): def get_burstiness(self):
return self.__get_burstiness__(self.eod_frequency) if self.burstiness == -1:
self.burstiness = self.__get_burstiness__(self.eod_frequency)
return self.burstiness
def get_spiketime_phases(self): def get_spiketime_phases(self):
sampling_interval = self.model.get_sampling_interval() sampling_interval = self.model.get_sampling_interval()

View File

@ -1,20 +1,46 @@
import DataParserFactory as dpf import DataParserFactory as dpf
from warnings import warn from warnings import warn
from os import listdir import os
import helperFunctions as hf import helperFunctions as hf
import numpy as np import numpy as np
COUNT = 0
def icelldata_of_dir(base_path):
for item in sorted(listdir(base_path)): def icelldata_of_dir(base_path, test_for_v1_trace=True):
global COUNT
for item in sorted(os.listdir(base_path)):
item_path = base_path + item item_path = base_path + item
if not os.path.isdir(item_path) and not item.endswith(".nix"):
print("ignoring path: " + item_path)
print("It isn't expected to be cell data.")
continue
try: try:
data = CellData(item_path) data = CellData(item_path)
trace = data.get_base_traces(trace_type=data.V1) if test_for_v1_trace:
if len(trace) == 0: try:
print("NO V1 TRACE FOUND: ", item_path) trace = data.get_base_traces(trace_type=data.V1)
continue if len(trace) == 0:
COUNT += 1
print("NO V1 TRACE FOUND: ", item_path)
print(COUNT)
continue
except IndexError as e:
COUNT += 1
print(data.get_data_path(), "Threw Index error!")
print(COUNT)
print(str(e), "\n")
continue
except ValueError as e:
COUNT += 1
print(data.get_data_path(), "Threw Value error!")
print(COUNT)
print(str(e), "\n")
yield data
else: else:
yield data yield data
@ -22,6 +48,8 @@ def icelldata_of_dir(base_path):
warn_msg = str(e) warn_msg = str(e)
warn(warn_msg) warn(warn_msg)
print("Currently throw errors: {}".format(COUNT))
class CellData: class CellData:
# Class to capture all the data of a single cell across all experiments (base rate, FI-curve, .?.) # Class to capture all the data of a single cell across all experiments (base rate, FI-curve, .?.)
@ -55,14 +83,18 @@ class CellData:
self.sam_durations = None self.sam_durations = None
self.sam_trans_amplitudes = None self.sam_trans_amplitudes = None
self.sampling_interval = None
self.recording_times = None
self.sampling_interval = self.parser.get_sampling_interval()
self.recording_times = self.parser.get_recording_times()
def get_data_path(self): def get_data_path(self):
return self.data_path return self.data_path
def get_baseline_length(self):
return self.parser.get_baseline_length()
def get_fi_curve_contrasts_with_trial_number(self):
return self.parser.get_fi_curve_contrasts()
def get_base_traces(self, trace_type=None): def get_base_traces(self, trace_type=None):
if self.base_traces is None: if self.base_traces is None:
self.base_traces = self.parser.get_baseline_traces() self.base_traces = self.parser.get_baseline_traces()
@ -72,16 +104,32 @@ class CellData:
else: else:
return self.base_traces[trace_type] return self.base_traces[trace_type]
def get_base_spikes(self): def get_base_spikes(self, threshold=2.5, min_length=5000, split_step=1000, re_calculate=False, only_first=False):
if self.base_spikes is None: if self.base_spikes is not None and not re_calculate:
return self.base_spikes
saved_spikes_file = "base_spikes_ndarray.npy"
full_path = os.path.join(self.data_path, saved_spikes_file)
if os.path.isdir(self.data_path) and os.path.exists(full_path) and not re_calculate:
self.base_spikes = np.load(full_path, allow_pickle=True)
print("Baseline spikes loaded from file.")
return self.base_spikes
if self.base_spikes is None or re_calculate:
print("Baseline spikes are being (re-)calculated...")
times = self.get_base_traces(self.TIME) times = self.get_base_traces(self.TIME)
eods = self.get_base_traces(self.EOD)
v1_traces = self.get_base_traces(self.V1) v1_traces = self.get_base_traces(self.V1)
spiketimes = [] spiketimes = []
for i in range(len(times)): for i in range(len(times)):
spiketimes.append(hf.detect_spiketimes(times[i], v1_traces[i])) if only_first and i > 0:
self.base_spikes = spiketimes break
spiketimes.append(hf.detect_spiketimes(times[i], v1_traces[i], threshold=threshold, min_length=min_length, split_step=split_step))
self.base_spikes = np.array(spiketimes)
if os.path.isdir(self.data_path):
np.save(full_path, self.base_spikes)
print("Calculated spikes saved to file")
return self.base_spikes return self.base_spikes
def get_base_isis(self): def get_base_isis(self):
@ -169,30 +217,48 @@ class CellData:
return np.median(base_freqs) return np.median(base_freqs)
def get_sampling_interval(self) -> float: def get_sampling_interval(self) -> float:
if self.sampling_interval is None:
self.sampling_interval = self.parser.get_sampling_interval()
return self.sampling_interval return self.sampling_interval
def get_recording_times(self) -> list: def get_recording_times(self) -> list:
if self.recording_times is None:
self.recording_times = self.parser.get_recording_times()
return self.recording_times return self.recording_times
def get_time_start(self) -> float: def get_time_start(self) -> float:
if self.recording_times is None:
self.recording_times = self.parser.get_recording_times()
return self.recording_times[0] return self.recording_times[0]
def get_delay(self) -> float: def get_delay(self) -> float:
if self.recording_times is None:
self.recording_times = self.parser.get_recording_times()
return abs(self.recording_times[0]) return abs(self.recording_times[0])
def get_time_end(self) -> float: def get_time_end(self) -> float:
if self.recording_times is None:
self.recording_times = self.parser.get_recording_times()
return self.recording_times[2] + self.recording_times[3] return self.recording_times[2] + self.recording_times[3]
def get_stimulus_start(self) -> float: def get_stimulus_start(self) -> float:
if self.recording_times is None:
self.recording_times = self.parser.get_recording_times()
return self.recording_times[1] return self.recording_times[1]
def get_stimulus_duration(self) -> float: def get_stimulus_duration(self) -> float:
if self.recording_times is None:
self.recording_times = self.parser.get_recording_times()
return self.recording_times[2] return self.recording_times[2]
def get_stimulus_end(self) -> float: def get_stimulus_end(self) -> float:
if self.recording_times is None:
self.recording_times = self.parser.get_recording_times()
return self.get_stimulus_start() + self.get_stimulus_duration() return self.get_stimulus_start() + self.get_stimulus_duration()
def get_after_stimulus_duration(self) -> float: def get_after_stimulus_duration(self) -> float:
if self.recording_times is None:
self.recording_times = self.parser.get_recording_times()
return self.recording_times[3] return self.recording_times[3]
def get_eod_frequency(self): def get_eod_frequency(self):
@ -207,10 +273,10 @@ class CellData:
def __read_fi_spiketimes_info__(self): def __read_fi_spiketimes_info__(self):
if self.fi_spiketimes is None: if self.fi_spiketimes is None:
trans_amplitudes, intensities, spiketimes = self.parser.get_fi_curve_spiketimes() self.fi_trans_amplitudes, self.fi_intensities, self.fi_spiketimes = self.parser.get_fi_curve_spiketimes()
self.fi_intensities, self.fi_spiketimes, self.fi_trans_amplitudes = hf.merge_similar_intensities( # self.fi_intensities, self.fi_spiketimes, self.fi_trans_amplitudes = hf.merge_similar_intensities(
intensities, spiketimes, trans_amplitudes) # intensities, spiketimes, trans_amplitudes)
def __read_sam_info__(self): def __read_sam_info__(self):
if self.sam_spiketimes is None: if self.sam_spiketimes is None:

View File

@ -16,6 +16,12 @@ class AbstractParser:
# def cell_get_metadata(self): # def cell_get_metadata(self):
# raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS") # raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
def get_baseline_length(self):
raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
def get_fi_curve_contrasts(self):
raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
def get_baseline_traces(self): def get_baseline_traces(self):
raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS") raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
@ -63,6 +69,32 @@ class DatParser(AbstractParser):
self.fi_recording_times = [] self.fi_recording_times = []
self.sampling_interval = -1 self.sampling_interval = -1
def get_baseline_length(self):
lengths = []
for metadata, key, data in Dl.iload(self.baseline_file):
if len(metadata) != 0:
lengths.append(float(metadata[0]["duration"][:-3]))
return lengths
def get_fi_curve_contrasts(self):
"""
:return: list of tuples [(contrast, #_of_trials), ...]
"""
contrasts = []
contrast = [-1, float("nan")]
for metadata, key, data in Dl.iload(self.fi_file):
if len(metadata) != 0:
if contrast[0] != -1:
contrasts.append(contrast)
contrast = [-1, 1]
contrast[0] = float(metadata[-1]["intensity"][:-2])
else:
contrast[1] += 1
return np.array(contrasts)
def traces_available(self) -> bool: def traces_available(self) -> bool:
return True return True
@ -129,12 +161,24 @@ class DatParser(AbstractParser):
else: else:
skip = True skip = True
continue continue
else:
if "preduration" in metadata[0].keys():
pre_duration = float(metadata[0]["preduration"][:-2])
trans_amplitude = float(metadata[0]["trans. amplitude"][:-2])
if pre_duration == 0:
skip = False
else:
skip = True
continue
if skip: if skip:
continue continue
if 'intensity' in metadata[metadata_index].keys():
intensity = float(metadata[metadata_index]['intensity'][:-2]) intensity = float(metadata[metadata_index]['intensity'][:-2])
pre_intensity = float(metadata[metadata_index]['preintensity'][:-2]) pre_intensity = float(metadata[metadata_index]['preintensity'][:-2])
else:
intensity = float(metadata[1-metadata_index]['intensity'][:-2])
pre_intensity = float(metadata[1-metadata_index]['preintensity'][:-2])
intensities.append(intensity) intensities.append(intensity)
pre_durations.append(pre_duration) pre_durations.append(pre_duration)
@ -151,6 +195,7 @@ class DatParser(AbstractParser):
spike_time_data = data[:, 0]/1000 spike_time_data = data[:, 0]/1000
if len(spike_time_data) < 10: if len(spike_time_data) < 10:
print("# ignoring spike-train that contains less than 10 spikes.")
continue continue
if spike_time_data[-1] < 1: if spike_time_data[-1] < 1:
print("# ignoring spike-train that ends before one second.") print("# ignoring spike-train that ends before one second.")
@ -158,6 +203,19 @@ class DatParser(AbstractParser):
spiketimes[index].append(spike_time_data) spiketimes[index].append(spike_time_data)
# TODO Check if sorting works!
new_order = np.arange(0, len(intensities), 1)
intensities, new_order = zip(*sorted(zip(intensities, new_order)))
intensities = list(intensities)
spiketimes = [spiketimes[i] for i in new_order]
trans_amplitudes = [trans_amplitudes[i] for i in new_order]
for i in range(len(intensities)-1, -1, -1):
if len(spiketimes[i]) < 3:
del intensities[i]
del spiketimes[i]
del trans_amplitudes[i]
return trans_amplitudes, intensities, spiketimes return trans_amplitudes, intensities, spiketimes
def get_sam_info(self): def get_sam_info(self):
@ -252,6 +310,11 @@ class DatParser(AbstractParser):
stim_key = "----- Test-Intensities -----------------------------------------------" stim_key = "----- Test-Intensities -----------------------------------------------"
stim_duration.append(float(metadata[0][stim_key]["duration"][:-2])/1000) stim_duration.append(float(metadata[0][stim_key]["duration"][:-2])/1000)
if "pause" in metadata[0].keys():
delays.append(float(metadata[0]["delay"][:-2]) / 1000)
pause.append(float(metadata[0]["pause"][:-2]) / 1000)
stim_duration.append(float(metadata[0]["duration"][:-2]) / 1000)
for l in [delays, stim_duration, pause]: for l in [delays, stim_duration, pause]:
if len(l) == 0: if len(l) == 0:
raise RuntimeError("DatParser:__read_fi_recording_times__:\n" + raise RuntimeError("DatParser:__read_fi_recording_times__:\n" +
@ -298,11 +361,14 @@ class DatParser(AbstractParser):
raise RuntimeError(self.stimuli_file + " file doesn't exist!") raise RuntimeError(self.stimuli_file + " file doesn't exist!")
if not exists(self.fi_file): if not exists(self.fi_file):
raise RuntimeError(self.fi_file + " file doesn't exist!") raise RuntimeError(self.fi_file + " file doesn't exist!")
if not exists(self.sam_file): if not exists(self.baseline_file):
raise RuntimeError(self.sam_file + " file doesn't exist!") raise RuntimeError(self.baseline_file + " file doesn't exist!")
# if not exists(self.sam_file):
# raise RuntimeError(self.sam_file + " file doesn't exist!")
# MODEL PARSER: ------------------------------ # MODEL PARSER: ------------------------------
class ModelParser(AbstractParser): class ModelParser(AbstractParser):
def __init__(self, model: AbstractModel): def __init__(self, model: AbstractModel):

View File

@ -7,11 +7,15 @@ import matplotlib.pyplot as plt
from warnings import warn from warnings import warn
import functions as fu import functions as fu
import helperFunctions as hF import helperFunctions as hF
from os.path import join, exists
import pickle
from sys import stderr
class FICurve: class FICurve:
def __init__(self, stimulus_values): def __init__(self, stimulus_values, save_dir=None):
self.save_file_name = "fi_curve_values.pkl"
self.stimulus_values = stimulus_values self.stimulus_values = stimulus_values
self.f_baseline_frequencies = [] self.f_baseline_frequencies = []
@ -23,7 +27,13 @@ class FICurve:
# f_max, f_min, k, x_zero # f_max, f_min, k, x_zero
self.f_zero_fit = [] self.f_zero_fit = []
self.initialize() if save_dir is None:
self.initialize()
else:
if not self.load_values(save_dir):
self.initialize()
self.save_values(save_dir)
def initialize(self): def initialize(self):
self.calculate_all_frequency_points() self.calculate_all_frequency_points()
@ -219,12 +229,48 @@ class FICurve:
def plot_f_point_detections(self, save_path=None): def plot_f_point_detections(self, save_path=None):
raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS") raise NotImplementedError("NOT YET OVERRIDDEN FROM ABSTRACT CLASS")
def save_values(self, save_directory):
values = {}
values["stimulus_values"] = self.stimulus_values
values["f_baseline_frequencies"] = self.f_baseline_frequencies
values["f_inf_frequencies"] = self.f_inf_frequencies
values["f_zero_frequencies"] = self.f_zero_frequencies
values["f_inf_fit"] = self.f_inf_fit
values["f_zero_fit"] = self.f_zero_fit
with open(join(save_directory, self.save_file_name), "wb") as file:
pickle.dump(values, file)
print("Fi-Curve: Values saved!")
def load_values(self, save_directory):
file_path = join(save_directory, self.save_file_name)
if not exists(file_path):
print("Fi-Curve: No file to load")
return False
file = open(file_path, "rb")
values = pickle.load(file)
if set(values["stimulus_values"]) != set(self.stimulus_values):
stderr.write("Fi-Curve:load_values() - Given stimulus values are different to the loaded ones!:\n "
"given: {}\n loaded: {}".format(str(self.stimulus_values), str(values["stimulus_values"])))
self.stimulus_values = values["stimulus_values"]
self.f_baseline_frequencies = values["f_baseline_frequencies"]
self.f_inf_frequencies = values["f_inf_frequencies"]
self.f_zero_frequencies = values["f_zero_frequencies"]
self.f_inf_fit = values["f_inf_fit"]
self.f_zero_fit = values["f_zero_fit"]
print("Fi-Curve: Values loaded!")
return True
class FICurveCellData(FICurve): class FICurveCellData(FICurve):
def __init__(self, cell_data: CellData, stimulus_values): def __init__(self, cell_data: CellData, stimulus_values, save_dir=None):
self.cell_data = cell_data self.cell_data = cell_data
super().__init__(stimulus_values) super().__init__(stimulus_values, save_dir)
def calculate_all_frequency_points(self): def calculate_all_frequency_points(self):
mean_frequencies = self.cell_data.get_mean_fi_curve_isi_frequencies() mean_frequencies = self.cell_data.get_mean_fi_curve_isi_frequencies()
@ -353,7 +399,6 @@ class FICurveModel(FICurve):
def calculate_all_frequency_points(self): def calculate_all_frequency_points(self):
sampling_interval = self.model.get_sampling_interval() sampling_interval = self.model.get_sampling_interval()
self.f_inf_frequencies = [] self.f_inf_frequencies = []
self.f_zero_frequencies = [] self.f_zero_frequencies = []
@ -377,7 +422,7 @@ class FICurveModel(FICurve):
if len(time) == 0 or min(time) > self.stim_start \ if len(time) == 0 or min(time) > self.stim_start \
or max(time) < self.stim_start + self.stim_duration: or max(time) < self.stim_start + self.stim_duration:
print("Too few spikes to calculate f_inf, f_0 and f_base") # print("Too few spikes to calculate f_inf, f_0 and f_base")
self.f_inf_frequencies.append(0) self.f_inf_frequencies.append(0)
self.f_zero_frequencies.append(0) self.f_zero_frequencies.append(0)
self.f_baseline_frequencies.append(0) self.f_baseline_frequencies.append(0)
@ -456,9 +501,9 @@ class FICurveModel(FICurve):
plt.close() plt.close()
def get_fi_curve_class(data, stimulus_values, eod_freq=None, trials=5) -> FICurve: def get_fi_curve_class(data, stimulus_values, eod_freq=None, trials=5, save_dir=None) -> FICurve:
if isinstance(data, CellData): if isinstance(data, CellData):
return FICurveCellData(data, stimulus_values) return FICurveCellData(data, stimulus_values, save_dir)
if isinstance(data, LifacNoiseModel): if isinstance(data, LifacNoiseModel):
if eod_freq is None: if eod_freq is None:
raise ValueError("The FiCurveModel needs the eod variable to work") raise ValueError("The FiCurveModel needs the eod variable to work")

View File

@ -8,6 +8,7 @@ from AdaptionCurrent import Adaption
import numpy as np import numpy as np
from warnings import warn from warnings import warn
from scipy.optimize import minimize from scipy.optimize import minimize
import time
class Fitter: class Fitter:
@ -42,7 +43,7 @@ class Fitter:
self.f_zero_straight_contrast = 0 self.f_zero_straight_contrast = 0
self.f_zero_fit = [] self.f_zero_fit = []
self.tau_a = 0 # self.tau_a = 0
# counts how often the cost_function was called # counts how often the cost_function was called
self.counter = 0 self.counter = 0
@ -51,13 +52,14 @@ class Fitter:
self.eod_freq = cell_data.get_eod_frequency() self.eod_freq = cell_data.get_eod_frequency()
data_baseline = get_baseline_class(cell_data) data_baseline = get_baseline_class(cell_data)
data_baseline.load_values(cell_data.get_data_path())
self.baseline_freq = data_baseline.get_baseline_frequency() self.baseline_freq = data_baseline.get_baseline_frequency()
self.vector_strength = data_baseline.get_vector_strength() self.vector_strength = data_baseline.get_vector_strength()
self.serial_correlation = data_baseline.get_serial_correlation(self.sc_max_lag) self.serial_correlation = data_baseline.get_serial_correlation(self.sc_max_lag)
self.coefficient_of_variation = data_baseline.get_coefficient_of_variation() self.coefficient_of_variation = data_baseline.get_coefficient_of_variation()
self.burstiness = data_baseline.get_burstiness() self.burstiness = data_baseline.get_burstiness()
fi_curve = get_fi_curve_class(cell_data, cell_data.get_fi_contrasts()) fi_curve = get_fi_curve_class(cell_data, cell_data.get_fi_contrasts(), save_dir=cell_data.get_data_path())
self.fi_contrasts = fi_curve.stimulus_values self.fi_contrasts = fi_curve.stimulus_values
self.f_inf_values = fi_curve.f_inf_frequencies self.f_inf_values = fi_curve.f_inf_frequencies
self.f_inf_slope = fi_curve.get_f_inf_slope() self.f_inf_slope = fi_curve.get_f_inf_slope()
@ -71,8 +73,8 @@ class Fitter:
# around 1/3 of the value at straight # around 1/3 of the value at straight
# self.f_zero_slope = fi_curve.get_fi_curve_slope_at(fi_curve.get_f_zero_and_f_inf_intersection()) # self.f_zero_slope = fi_curve.get_fi_curve_slope_at(fi_curve.get_f_zero_and_f_inf_intersection())
adaption = Adaption(fi_curve) # adaption = Adaption(fi_curve)
self.tau_a = adaption.get_tau_real() # self.tau_a = adaption.get_tau_real()
def fit_model_to_data(self, data: CellData, start_parameters, fit_routine_func: callable): def fit_model_to_data(self, data: CellData, start_parameters, fit_routine_func: callable):
self.set_data_reference_values(data) self.set_data_reference_values(data)
@ -83,13 +85,13 @@ class Fitter:
# fit only v_offset, mem_tau, input_scaling, dend_tau # fit only v_offset, mem_tau, input_scaling, dend_tau
x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"], x0 = np.array([start_parameters["mem_tau"], start_parameters["noise_strength"],
start_parameters["input_scaling"], self.tau_a, start_parameters["delta_a"], start_parameters["input_scaling"], start_parameters["tau_a"], start_parameters["delta_a"],
start_parameters["dend_tau"], start_parameters["refractory_period"]]) start_parameters["dend_tau"], start_parameters["refractory_period"]])
initial_simplex = create_init_simples(x0, search_scale=2) initial_simplex = create_init_simples(x0, search_scale=2)
# error_list = [error_bf, error_vs, error_sc, error_cv, # error_list = [error_bf, error_vs, error_sc, error_cv,
# error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope] # error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
error_weights = (0, 1, 1, 1, 1, 1, 1, 1, 1) error_weights = (0, 1, 1, 1, 1, 1, 1, 1, 0)
fmin = minimize(fun=self.cost_function_all, fmin = minimize(fun=self.cost_function_all,
args=(error_weights,), x0=x0, method="Nelder-Mead", args=(error_weights,), x0=x0, method="Nelder-Mead",
options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 200, "maxiter": 400}) options={"initial_simplex": initial_simplex, "xatol": 0.001, "maxfev": 200, "maxiter": 400})
@ -153,12 +155,15 @@ class Fitter:
# find right v-offset # find right v-offset
test_model = self.base_model.get_model_copy() test_model = self.base_model.get_model_copy()
test_model.set_variable("noise_strength", 0) test_model.set_variable("noise_strength", 0)
time1 = time.time()
v_offset = test_model.find_v_offset(self.baseline_freq, base_stimulus) v_offset = test_model.find_v_offset(self.baseline_freq, base_stimulus)
self.base_model.set_variable("v_offset", v_offset) self.base_model.set_variable("v_offset", v_offset)
time2 = time.time()
# print("time taken for finding v_offset: {:.2f}s".format(time2-time1))
# [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope] # [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
error_list = self.calculate_errors(error_weights) error_list = self.calculate_errors(error_weights)
# print(sum(error_list))
return sum(error_list) return sum(error_list)
def cost_function_without_ref_period(self, X, error_weights=None): def cost_function_without_ref_period(self, X, error_weights=None):
@ -178,7 +183,6 @@ class Fitter:
# [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope] # [error_bf, error_vs, error_sc, error_f_inf, error_f_inf_slope, error_f_zero, error_f_zero_slope]
error_list = self.calculate_errors(error_weights) error_list = self.calculate_errors(error_weights)
return sum(error_list) return sum(error_list)
def cost_function_all_without_noise(self, X, error_weights=None): def cost_function_all_without_noise(self, X, error_weights=None):
@ -285,14 +289,18 @@ class Fitter:
if model is None: if model is None:
model = self.base_model model = self.base_model
time1 = time.time()
model_baseline = get_baseline_class(model, self.eod_freq) model_baseline = get_baseline_class(model, self.eod_freq)
baseline_freq = model_baseline.get_baseline_frequency() baseline_freq = model_baseline.get_baseline_frequency()
vector_strength = model_baseline.get_vector_strength() vector_strength = model_baseline.get_vector_strength()
serial_correlation = model_baseline.get_serial_correlation(self.sc_max_lag) serial_correlation = model_baseline.get_serial_correlation(self.sc_max_lag)
coefficient_of_variation = model_baseline.get_coefficient_of_variation() coefficient_of_variation = model_baseline.get_coefficient_of_variation()
burstiness = model_baseline.get_burstiness() burstiness = model_baseline.get_burstiness()
time2 = time.time()
# print("Time taken for all baseline parameters: {:.2f}".format(time2-time1))
time1 = time.time()
fi_curve_model = get_fi_curve_class(model, self.fi_contrasts, self.eod_freq) fi_curve_model = get_fi_curve_class(model, self.fi_contrasts, self.eod_freq)
f_zeros = fi_curve_model.get_f_zero_frequencies() f_zeros = fi_curve_model.get_f_zero_frequencies()
f_infinities = fi_curve_model.get_f_inf_frequencies() f_infinities = fi_curve_model.get_f_inf_frequencies()
@ -300,6 +308,10 @@ class Fitter:
# f_zero_slopes = [fi_curve_model.get_f_zero_fit_slope_at_stimulus_value(x) for x in self.fi_contrasts] # f_zero_slopes = [fi_curve_model.get_f_zero_fit_slope_at_stimulus_value(x) for x in self.fi_contrasts]
f_zero_slope_at_straight = fi_curve_model.get_f_zero_fit_slope_at_stimulus_value(self.f_zero_straight_contrast) f_zero_slope_at_straight = fi_curve_model.get_f_zero_fit_slope_at_stimulus_value(self.f_zero_straight_contrast)
time2 = time.time()
# print("Time taken for all fi-curve parameters: {:.2f}".format(time2 - time1))
# calculate errors with reference values # calculate errors with reference values
error_bf = abs((baseline_freq - self.baseline_freq) / self.baseline_freq) error_bf = abs((baseline_freq - self.baseline_freq) / self.baseline_freq)
error_vs = abs((vector_strength - self.vector_strength) / 0.1) error_vs = abs((vector_strength - self.vector_strength) / 0.1)
@ -329,7 +341,11 @@ class Fitter:
error_list[i] = error_list[i] * error_weights[i] error_list[i] = error_list[i] * error_weights[i]
elif error_weights is not None: elif error_weights is not None:
warn("Error: weights had different length than errors and were ignored!") warn("Error: weights had different length than errors and were ignored!")
if np.isnan(sum(error_list)):
print("--------SOME ERROR VALUE(S) IS/ARE NaN:")
print(error_list)
return [50 for e in error_list]
# raise ValueError("Some error value(s) is/are NaN!")
return error_list return error_list

83
ModelFit.py Normal file
View File

@ -0,0 +1,83 @@
import os
from models.LIFACnoise import LifacNoiseModel
import numpy as np
def get_best_fit(folder_path):
min_err = np.inf
min_item = ""
for item in os.listdir(folder_path):
err = float(item.split("_")[-1])
if err < min_err:
min_err = err
min_item = item
return ModelFit(os.path.join(folder_path, min_item))
class ModelFit:
def __init__(self, folder_path):
self.path = folder_path
self.parameter_file_name = "parameters_info.txt"
self.value_file = "value_comparision.tsv"
self.fi_comp_img = "fi_curve_comparision.png"
self.isi_hist_img = "isi-histogram.png"
self.isi_hist_comp_img = "isi-histogram_comparision.png"
def get_final_parameters(self):
par_file_path = os.path.join(self.path, self.parameter_file_name)
with open(par_file_path, 'r') as par_file:
for line in par_file:
line = line.strip().split('\t')
if line[0] == "final_parameters:":
return dict(line[1])
print("Final parameters not found! - ", self.path)
return {}
def get_start_parameters(self):
par_file_path = os.path.join(self.path, self.parameter_file_name)
with open(par_file_path, 'r') as par_file:
for line in par_file:
line = line.strip().split('\t')
if line[0] == "start_parameters:":
return dict(line[1])
print("Start parameters not found! - ", self.path)
return {}
def get_behaviour_values(self):
values_file_path = os.path.join(self.path, self.value_file)
cell_values = {}
model_values = {}
with open(values_file_path, 'r') as val_file:
line = val_file.readline() # ignore headers
for line in val_file:
line = line.strip().split('\t')
cell_values[line[0]] = float(line[1])
model_values[line[0]] = float(line[2])
return cell_values, model_values
def get_fi_curve_comparision_image(self):
path = os.path.join(self.path, self.fi_comp_img)
if os.path.exists(path):
return path
else:
raise FileNotFoundError("Fi-curve comparision image is missing. - " + self.path)
def get_isi_histogram_image(self):
path = os.path.join(self.path, self.isi_hist_img)
if os.path.exists(path):
return path
else:
raise FileNotFoundError("Isi-histogram image is missing. - " + self.path)
def get_error_value(self):
return self.path.split("_")[-1]
def get_model(self):
return LifacNoiseModel(self.get_final_parameters())

116
cell_overview.py Normal file
View File

@ -0,0 +1,116 @@
from CellData import icelldata_of_dir
from Baseline import BaselineCellData
from FiCurve import FICurveCellData
import os
def main():
# plot_visualizations("cells/")
full_overview("cells/master_table.csv", "cells/")
def move_rejected_cell_data():
count = 0
jump_to = 0
negative_contrast_rel = 0
cell_list = []
for d in icelldata_of_dir("invivo_data/"):
count += 1
if count < jump_to:
continue
print(d.get_data_path())
base = BaselineCellData(d)
base.load_values(d.get_data_path())
ficurve = FICurveCellData(d, d.get_fi_contrasts(), d.get_data_path())
if ficurve.get_f_inf_slope() < 0:
negative_contrast_rel += 1
print("negative f_inf slope")
cell_list.append(os.path.abspath(d.get_data_path()))
for c in cell_list:
if os.path.exists(c):
print("Source: ", c)
destination = os.path.abspath("rejected_cells/negative_slope_f_inf/" + os.path.basename(c))
print("destination: ", destination)
print()
os.rename(c, destination)
print("Number: " + str(negative_contrast_rel))
def plot_visualizations(folder_path):
for cell_data in icelldata_of_dir("invivo_data/"):
name = os.path.split(cell_data.get_data_path())[-1]
print(name)
save_path = folder_path + name + "/"
if not os.path.exists(save_path):
os.mkdir(save_path)
baseline = BaselineCellData(cell_data)
baseline.plot_baseline(save_path)
baseline.plot_serial_correlation(10, save_path)
baseline.plot_polar_vector_strength(save_path)
baseline.plot_interspike_interval_histogram(save_path)
ficurve = FICurveCellData(cell_data, cell_data.get_fi_contrasts())
ficurve.plot_fi_curve(save_path)
def full_overview(save_path_table, folder_path):
with open(save_path_table, "w") as table:
table.write("Name, Path, Baseline Frequency Hz,Vector Strength, serial correlation lag=1,"
" serial correlation lag=2, burstiness, coefficient of variation,"
" fi-curve inf slope, fi-curve zero slope at straight, contrast at fi-curve zero straight\n")
# add contrasts, f-inf values, f_zero_values
count = 0
start = 0
for cell_data in icelldata_of_dir("invivo_data/"):
count += 1
if count < start:
continue
save_dir = cell_data.get_data_path()
name = os.path.split(cell_data.get_data_path())[-1]
line = name + ","
line += cell_data.get_data_path() + ","
baseline = BaselineCellData(cell_data)
if not baseline.load_values(save_dir):
baseline.save_values(save_dir)
line += "{:.1f},".format(baseline.get_baseline_frequency())
line += "{:.2f},".format(baseline.get_vector_strength())
sc = baseline.get_serial_correlation(2)
line += "{:.2f},".format(sc[0])
line += "{:.2f},".format(sc[1])
line += "{:.2f},".format(baseline.get_burstiness())
line += "{:.2f},".format(baseline.get_coefficient_of_variation())
ficurve = FICurveCellData(cell_data, cell_data.get_fi_contrasts(), save_dir)
line += "{:.2f},".format(ficurve.get_f_inf_slope())
line += "{:.2f}\n".format(ficurve.get_f_zero_fit_slope_at_straight())
line += "{:.2f}\n".format(ficurve.f_zero_fit[3])
table.write(line)
name = os.path.split(cell_data.get_data_path())[-1]
print(name)
save_path = folder_path + name + "/"
if not os.path.exists(save_path):
os.mkdir(save_path)
baseline.plot_baseline(save_path)
baseline.plot_serial_correlation(10, save_path)
baseline.plot_polar_vector_strength(save_path)
baseline.plot_interspike_interval_histogram(save_path)
ficurve.plot_fi_curve(save_path)
if __name__ == '__main__':
main()

42
collect_results.py Normal file
View File

@ -0,0 +1,42 @@
import argparse
import os
import numpy as np
from ModelFit import ModelFit
if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument("dir", help="folder containing the cell folders with the fit results")
# args = parser.parse_args()
dir_path = "results/invivo_results/" # args.dir
# if not os.path.isdir(dir_path):
# print("Argument dir is not a directory.")
# parser.print_usage()
# exit(0)
for item in os.listdir(dir_path):
cell_folder = os.path.join(dir_path, item)
if not os.path.isdir(cell_folder):
continue
min_err = np.inf
min_run = ""
for run in os.listdir(cell_folder):
err = float(run.split("_")[-1])
if err < min_err:
min_err = err
min_run = os.path.join(cell_folder, run)
results = ModelFit(min_run)
quit()
# search folders for one with min error
# gather images + info about parameters, behaviour
pass

151
data_files.txt Normal file
View File

@ -0,0 +1,151 @@
/mnt/invivo_data/2014-07-17-ad-invivo-1
/mnt/invivo_data/2014-12-03-ag-invivo-1
/mnt/invivo_data/2014-12-11-aa-invivo-1
/mnt/invivo_data/2014-06-06-ag-invivo-1
/mnt/invivo_data/2018-03-22-ac-invivo-1
/mnt/invivo_data/2014-03-19-aa-invivo-1
/mnt/invivo_data/2014-06-06-aa-invivo-1
/mnt/invivo_data/2012-01-17-ap
/mnt/invivo_data/2018-01-10-ai
/mnt/invivo_data/2015-01-15-ab-invivo-1
/mnt/invivo_data/2014-12-03-ab-invivo-1
/mnt/invivo_data/2013-04-11-ad-invivo-1
/mnt/invivo_data/2018-05-08-ae-invivo-1
/mnt/invivo_data/2013-04-11-aa-invivo-1
/mnt/invivo_data/2012-01-11-ah
/mnt/invivo_data/2013-04-17-ac-invivo-1
/mnt/invivo_data/2018-01-12-ai
/mnt/invivo_data/2011-10-25-aa-invivo-1
/mnt/invivo_data/2014-12-03-aa-invivo-1
/mnt/invivo_data/2014-12-03-an-invivo-1
/mnt/invivo_data/2014-06-06-ah-invivo-1
/mnt/invivo_data/2013-04-16-ac-invivo-1
/mnt/invivo_data/2013-04-16-af-invivo-1
/mnt/invivo_data/2015-01-20-af-invivo-1
/mnt/invivo_data/2013-04-12-ab-invivo-1
/mnt/invivo_data/2012-04-20-ad-invivo-1
/mnt/invivo_data/2013-01-08-ab-invivo-1
/mnt/invivo_data/2013-02-21-ad-invivo-1
/mnt/invivo_data/2018-05-08-ad-invivo-1
/mnt/invivo_data/2014-12-03-am-invivo-1
/mnt/invivo_data/2018-01-12-af
/mnt/invivo_data/2011-10-25-ac-invivo-1
/mnt/invivo_data/2013-04-18-ac-invivo-1
/mnt/invivo_data/2017-07-18-ak-invivo-1
/mnt/invivo_data/2013-04-16-ab-invivo-1
/mnt/invivo_data/2012-05-15-ac-invivo-1
/mnt/invivo_data/2014-12-11-ad-invivo-1
/mnt/invivo_data/2014-01-10-af-invivo-1
/mnt/invivo_data/2018-05-08-af-invivo-1
/mnt/invivo_data/2013-04-09-ab-invivo-1
/mnt/invivo_data/2014-01-10-ac-invivo-1
/mnt/invivo_data/2014-11-26-ae-invivo-1
/mnt/invivo_data/2017-07-18-ah-invivo-1
/mnt/invivo_data/2012-05-24-aa-invivo-1
/mnt/invivo_data/2015-01-20-aa-invivo-1
/mnt/invivo_data/2018-05-08-aa-invivo-1
/mnt/invivo_data/2014-03-19-aj-invivo-1
/mnt/invivo_data/2012-05-07-ac-invivo-1
/mnt/invivo_data/2012-04-20-ak-invivo-1
/mnt/invivo_data/2013-04-10-af-invivo-1
/mnt/invivo_data/2013-04-16-ag-invivo-1
/mnt/invivo_data/2017-11-08-aa-invivo-1
/mnt/invivo_data/2017-11-10-aa-invivo-1
/mnt/invivo_data/2014-06-06-ac-invivo-1
/mnt/invivo_data/2014-01-16-ak-invivo-1
/mnt/invivo_data/2013-02-21-aa-invivo-1
/mnt/invivo_data/2013-02-21-ag-invivo-1
/mnt/invivo_data/2018-01-09-ab
/mnt/invivo_data/2018-05-08-ac-invivo-1
/mnt/invivo_data/2013-04-09-aa-invivo-1
/mnt/invivo_data/2013-04-17-ab-invivo-1
/mnt/invivo_data/2013-02-21-ae-invivo-1
/mnt/invivo_data/2018-05-08-ai-invivo-1
/mnt/invivo_data/2018-01-10-aa
/mnt/invivo_data/2014-11-26-aa-invivo-1
/mnt/invivo_data/2013-04-18-aa-invivo-1
/mnt/invivo_data/2014-01-10-ae-invivo-1
/mnt/invivo_data/2014-12-03-ae-invivo-1
/mnt/invivo_data/2014-03-19-ah-invivo-1
/mnt/invivo_data/2010-11-26-am-invivo-1
/mnt/invivo_data/2014-03-25-ag-invivo-1
/mnt/invivo_data/2017-07-18-aj-invivo-1
/mnt/invivo_data/2013-02-21-af-invivo-1
/mnt/invivo_data/2012-04-20-af-invivo-1
/mnt/invivo_data/2015-01-15-aa-invivo-1
/mnt/invivo_data/2013-01-08-ac-invivo-1
/mnt/invivo_data/2018-01-10-al
/mnt/invivo_data/2014-12-11-ac-invivo-1
/mnt/invivo_data/2013-04-11-ab-invivo-1
/mnt/invivo_data/2014-12-03-ad-invivo-1
/mnt/invivo_data/2014-01-23-ab-invivo-1
/mnt/invivo_data/2013-04-09-ac-invivo-1
/mnt/invivo_data/2013-04-18-ab-invivo-1
/mnt/invivo_data/2014-01-16-aa-invivo-1
/mnt/invivo_data/2014-03-19-ai-invivo-1
/mnt/invivo_data/2014-12-11-ag-invivo-1
/mnt/invivo_data/2018-06-26-ah-invivo-1
/mnt/invivo_data/2014-01-16-ai-invivo-1
/mnt/invivo_data/2014-12-11-af-invivo-1
/mnt/invivo_data/2014-01-16-aj-invivo-1
/mnt/invivo_data/2014-03-25-aa-invivo-1
/mnt/invivo_data/2013-04-10-aa-invivo-1
/mnt/invivo_data/2012-04-20-aj-invivo-1
/mnt/invivo_data/2013-04-11-ac-invivo-1
/mnt/invivo_data/2013-04-16-ad-invivo-1
/mnt/invivo_data/2012-01-11-ae
/mnt/invivo_data/2014-12-18-aa-invivo-1
/mnt/invivo_data/2018-05-08-ab-invivo-1
/mnt/invivo_data/2012-04-20-ah-invivo-1
/mnt/invivo_data/2012-01-11-ad
/mnt/invivo_data/2011-10-25-ab-invivo-1
/mnt/invivo_data/2015-01-20-ab-invivo-1
/mnt/invivo_data/2014-01-10-ab-invivo-1
/mnt/invivo_data/2014-12-11-ai-invivo-1
/mnt/invivo_data/2013-04-16-aa-invivo-1
/mnt/invivo_data/2018-01-12-ae
/mnt/invivo_data/2018-01-10-af
/mnt/invivo_data/2017-07-18-ao-invivo-1
/mnt/invivo_data/2015-01-20-ad-invivo-1
/mnt/invivo_data/2014-03-19-af-invivo-1
/mnt/invivo_data/2018-01-12-ac
/mnt/invivo_data/2013-04-10-ac-invivo-1
/mnt/invivo_data/2011-10-25-ad-invivo-1
/mnt/invivo_data/2018-06-25-ad-invivo-1
/mnt/invivo_data/2012-05-15-aa-invivo-1
/mnt/invivo_data/2015-01-20-ag-invivo-1
/mnt/invivo_data/2014-03-19-ae-invivo-1
/mnt/invivo_data/2013-01-08-aa-invivo-1
/mnt/invivo_data/2018-01-10-ak
/mnt/invivo_data/2014-12-03-af-invivo-1
/mnt/invivo_data/2014-05-21-aa-invivo-1
/mnt/invivo_data/2014-03-19-ad-invivo-1
/mnt/invivo_data/2014-03-25-af-invivo-1
/mnt/invivo_data/2011-10-17-ag
/mnt/invivo_data/2015-01-20-ac-invivo-1
/mnt/invivo_data/2015-01-20-ae-invivo-1
/mnt/invivo_data/2018-01-10-ae
/mnt/invivo_data/2013-04-16-ae-invivo-1
/mnt/invivo_data/2012-04-20-ab-invivo-1
/mnt/invivo_data/2013-04-17-ad-invivo-1
/mnt/invivo_data/2014-11-26-af-invivo-1
/mnt/invivo_data/2013-04-17-ae-invivo-1
/mnt/invivo_data/2015-01-20-ah-invivo-1
/mnt/invivo_data/2010-11-08-al-invivo-1
/mnt/invivo_data/2017-07-18-ai-invivo-1
/mnt/invivo_data/2012-05-10-ad-invivo-1
/mnt/invivo_data/2012-04-20-ae-invivo-1
/mnt/invivo_data/2012-05-24-ab-invivo-1
/mnt/invivo_data/2014-12-03-ac-invivo-1
/mnt/invivo_data/2012-05-30-ab-invivo-1
/mnt/invivo_data/2012-05-07-aa-invivo-1
/mnt/invivo_data/2013-01-08-ad-invivo-1
/mnt/invivo_data/2017-08-11-ab-invivo-1
/mnt/invivo_data/2014-01-10-aa-invivo-1
/mnt/invivo_data/2011-06-20-ao
/mnt/invivo_data/2014-12-03-ai-invivo-1
/mnt/invivo_data/2015-01-13-aa-invivo-1
/mnt/invivo_data/2012-04-20-ag-invivo-1
/mnt/invivo_data/2014-11-19-aa-invivo-1
/mnt/invivo_data/2014-01-23-af-invivo-1
/mnt/invivo_data/2014-12-11-ah-invivo-1

View File

@ -0,0 +1,140 @@
from CellData import CellData, icelldata_of_dir
from os import listdir
import os
def main():
# choose_thresholds()
precalculate_baseline_spiketimes()
def precalculate_baseline_spiketimes():
threshold_file_path = "invivo_data/thresholds.tsv"
thresholds_dict = {}
if os.path.exists(threshold_file_path):
with open(threshold_file_path, "r") as threshold_file:
for line in threshold_file:
line = line.strip()
line = line.split('\t')
name = line[0]
thresh = float(line[1])
min_length = int(line[2])
step_size = int(line[3])
thresholds_dict[name] = [thresh, min_length, step_size]
for cell_data in icelldata_of_dir("invivo_data/"):
name = os.path.basename(cell_data.get_data_path())
if name not in thresholds_dict.keys():
print("key missing: {}".format(name))
continue
thresh = thresholds_dict[name][0]
min_length = thresholds_dict[name][1]
split_step_size = thresholds_dict[name][2]
cell_data.get_base_spikes(threshold=thresh, min_length=min_length, split_step=split_step_size)
def choose_thresholds():
base_path = "invivo_data/"
threshold_file_path = "invivo_data/thresholds.tsv"
re_choose_thresholds = False
thresholds_dict = {}
if os.path.exists(threshold_file_path):
with open(threshold_file_path, "r") as threshold_file:
for line in threshold_file:
line = line.strip()
line = line.split('\t')
name = line[0]
thresh = float(line[1])
if len(line) > 2:
min_length = int(line[2])
step_size = int(line[3])
thresholds_dict[name] = [thresh, min_length, step_size]
else:
thresholds_dict[name] = [thresh]
for item in sorted(listdir(base_path)):
# starting assumptions:
thresh = 2.5
min_split_length = 5000
split_step_size = 1000
if "thresholds" in item:
continue
if item in thresholds_dict.keys() and thresholds_dict[item][0] != 99 and not re_choose_thresholds:
if len(thresholds_dict[item]) == 1:
thresholds_dict[item] = [thresholds_dict[item][0], min_split_length, split_step_size]
continue
print(item)
item_path = base_path + item
data = CellData(item_path)
trace = data.get_base_traces(trace_type=data.V1)
if len(trace) == 0:
print("NO V1 TRACE FOUND: ", item_path)
continue
data.get_base_spikes(thresh, min_length=min_split_length, split_step=split_step_size, re_calculate=True,
only_first=True)
stop = False
print("Threshold was {:.2f}, Min Length was {:.0f}, Split step size was {:.0f}".format(thresh, min_split_length,
split_step_size))
response = input(
"Choose: 'ok', 'stop', or a number (threshold) or three numbers (threshold, minlength, step_size) seperated with commas")
while response != "ok":
if response == "stop":
stop = True
break
try:
parts = response.split(",")
if len(parts) == 1:
thresh = float(response)
else:
thresh = float(parts[0])
min_split_length = int(parts[1])
split_step_size = int(parts[2])
except ValueError as e:
print("{} could not be parsed as number or ok please try again.".format(response))
print("Threshold was {:.2f}, Min Length was {:.0f}, Split step size was {:.0f}".format(thresh,
min_split_length,
split_step_size))
response = input(
"Choose: 'ok', 'stop', or a number (threshold) or three numbers (threshold, minlength, step_size) seperated with commas")
continue
data.get_base_spikes(thresh, min_length=min_split_length, split_step=split_step_size, re_calculate=True,
only_first=True)
print(
"Threshold was {:.2f}, Min Length was {:.0f}, Split step size was {:.0f}".format(thresh,
min_split_length,
split_step_size))
response = input(
"Choose: 'ok', 'stop', or a number (threshold) or three numbers (threshold, minlength, step_size) seperated with commas")
if stop:
break
thresholds_dict[item] = [thresh, min_split_length, split_step_size]
with open(threshold_file_path, "w") as threshold_file:
for name in sorted(thresholds_dict.keys()):
if len(thresholds_dict[name]) == 1:
threshold_file.write(name + "\t" + str(thresholds_dict[name][0]) + "\n")
else:
line = name + "\t"
line += str(thresholds_dict[name][0]) + "\t"
line += str(thresholds_dict[name][1]) + "\t"
line += str(thresholds_dict[name][2]) + "\n"
threshold_file.write(line)
if __name__ == '__main__':
main()

View File

@ -39,7 +39,8 @@ def full_boltzmann_straight_slope(f_max, f_min, k, x_zero=0):
def derivative_full_boltzmann(x, f_max, f_min, k, x_zero): def derivative_full_boltzmann(x, f_max, f_min, k, x_zero):
return (f_max - f_min) * k * np.power(np.e, -k * (x - x_zero)) / (1 + np.power(np.e, -k * (x - x_zero))**2) res = (f_max - f_min) * k * np.power(np.e, -k * (x - x_zero)) / (1 + np.power(np.e, -k * (x - x_zero))**2)
return res
def inverse_full_boltzmann(x, f_max, f_min, k, x_zero): def inverse_full_boltzmann(x, f_max, f_min, k, x_zero):

View File

@ -1,6 +1,6 @@
import numpy as np import numpy as np
from warnings import warn from warnings import warn
from thunderfish.eventdetection import detect_peaks, threshold_crossing_times, threshold_crossings from thunderfish.eventdetection import threshold_crossing_times, threshold_crossings, detect_peaks
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
import functions as fu import functions as fu
from numba import jit from numba import jit
@ -235,15 +235,8 @@ def mean_freq_of_spiketimes_after_time_x(spiketimes, time_x, time_in_ms=False):
if len(relevant_spikes) <= 1: if len(relevant_spikes) <= 1:
return 0 return 0
if time_in_ms:
relevant_spikes = relevant_spikes / 1000
isis = np.diff(relevant_spikes)
isi_freqs = 1 / isis
weights = isis / min(isis)
mean_freq = sum(isi_freqs * weights) / sum(weights)
return mean_freq return calculate_mean_isi_freq(relevant_spikes, time_in_ms)
def calculate_mean_isi_freq(spiketimes, time_in_ms=False): def calculate_mean_isi_freq(spiketimes, time_in_ms=False):
@ -262,6 +255,9 @@ def calculate_mean_isi_freq(spiketimes, time_in_ms=False):
# @jit(nopython=True) # only faster at around 30 000 calls # @jit(nopython=True) # only faster at around 30 000 calls
def calculate_coefficient_of_variation(spiketimes: np.ndarray) -> float: def calculate_coefficient_of_variation(spiketimes: np.ndarray) -> float:
# CV (stddev of ISI divided by mean ISI (np.diff(spiketimes)) # CV (stddev of ISI divided by mean ISI (np.diff(spiketimes))
if len(spiketimes) <= 2:
return 0
isi = np.diff(spiketimes) isi = np.diff(spiketimes)
std = np.std(isi) std = np.std(isi)
mean = np.mean(isi) mean = np.mean(isi)
@ -272,7 +268,7 @@ def calculate_coefficient_of_variation(spiketimes: np.ndarray) -> float:
# @jit(nopython=True) # maybe faster with more than ~60 000 calls # @jit(nopython=True) # maybe faster with more than ~60 000 calls
def calculate_serial_correlation(spiketimes: np.ndarray, max_lag: int) -> np.ndarray: def calculate_serial_correlation(spiketimes: np.ndarray, max_lag: int) -> np.ndarray:
isi = np.diff(spiketimes) isi = np.diff(spiketimes)
if len(spiketimes) < max_lag + 1: if len(spiketimes) < max_lag + 1 or len(spiketimes) < 20:
warn("Cannot compute serial correlation with list shorter than max lag...") warn("Cannot compute serial correlation with list shorter than max lag...")
return np.zeros(max_lag) return np.zeros(max_lag)
# raise ValueError("Given list to short, with given max_lag") # raise ValueError("Given list to short, with given max_lag")
@ -302,29 +298,6 @@ def calculate_eod_frequency(time, eod):
return 1/mean_duration return 1/mean_duration
def calculate_vector_strength_from_v1_trace(times, eods, v1_traces):
# Vectorstaerke (use EOD frequency from header (metadata)) VS > 0.8
# dl.iload_traces(repro='BaselineActivity')
relative_spike_times = []
eod_durations = []
if len(times) == 0:
print("-----LENGTH OF TIMES = 0")
for recording in range(len(times)):
spiketime_idices = detect_spike_indices(v1_traces[recording])
rel_spikes, eod_durs = eods_around_spikes(times[recording], eods[recording], spiketime_idices)
relative_spike_times.extend(rel_spikes)
eod_durations.extend(eod_durs)
# print(__vector_strength__(np.array(rel_spikes), np.array(eod_durs)))
relative_spike_times = np.array(relative_spike_times)
eod_durations = np.array(eod_durations)
return __vector_strength__(relative_spike_times, eod_durations)
def calculate_vector_strength_from_spiketimes(time, eod, spiketimes, sampling_interval): def calculate_vector_strength_from_spiketimes(time, eod, spiketimes, sampling_interval):
spiketime_indices = np.array(np.around((np.array(spiketimes) + time[0]) / sampling_interval), dtype=int) spiketime_indices = np.array(np.around((np.array(spiketimes) + time[0]) / sampling_interval), dtype=int)
rel_spikes, eod_durs = eods_around_spikes(time, eod, spiketime_indices) rel_spikes, eod_durs = eods_around_spikes(time, eod, spiketime_indices)
@ -332,28 +305,10 @@ def calculate_vector_strength_from_spiketimes(time, eod, spiketimes, sampling_in
return __vector_strength__(rel_spikes, eod_durs) return __vector_strength__(rel_spikes, eod_durs)
def detect_spike_indices(v1, split=20, threshold=3.0): def detect_spike_indices_automatic_split(v1, threshold, min_length=5000, split_step=1000):
total = len(v1)
all_peaks = []
for n in range(split):
length = int(total / split)
first_index = n * length
last_index = (n + 1) * length
std = np.std(v1[first_index:last_index])
peaks, _ = detect_peaks(v1[first_index:last_index], std * threshold)
peaks = peaks + first_index
all_peaks.extend(peaks)
all_peaks = np.array(all_peaks)
return all_peaks
def detect_spike_indices_automatic_split(v1, min_length=3000, threshold=3.0):
split_start = 0 split_start = 0
step_size = 250 step_size = split_step
break_threshold = 0.1 break_threshold = 0.25
splits = [] splits = []
if len(v1) < min_length: if len(v1) < min_length:
@ -373,8 +328,8 @@ def detect_spike_indices_automatic_split(v1, min_length=3000, threshold=3.0):
# print("last_max: {:.2f}, current_max: {:.2f}".format(last_max, max(v1[idx:idx+step_size]))) # print("last_max: {:.2f}, current_max: {:.2f}".format(last_max, max(v1[idx:idx+step_size])))
# print("max_dif: {:.2f}, min_dif: {:.2f}".format(max_dif, min_dif)) # print("max_dif: {:.2f}, min_dif: {:.2f}".format(max_dif, min_dif))
max_similar = abs((max(v1[idx:idx+step_size]) / last_max) - 1) < break_threshold max_similar = abs((max(v1[idx:idx+step_size]) - last_max) / last_max) < break_threshold
min_similar = abs((min(v1[idx:idx+step_size]) / last_min) - 1) < break_threshold min_similar = abs((min(v1[idx:idx+step_size]) - last_min) / last_min) < break_threshold
if not max_similar or not min_similar: if not max_similar or not min_similar:
# print("new split") # print("new split")
@ -395,10 +350,9 @@ def detect_spike_indices_automatic_split(v1, min_length=3000, threshold=3.0):
splits.append((split_start, len(v1))) splits.append((split_start, len(v1)))
# plt.plot(v1) # plt.plot(v1)
#
# for s in splits: # for s in splits:
# plt.plot(s, (max(v1[s[0]:s[1]]), max(v1[s[0]:s[1]]))) # plt.plot(s, (max(v1[s[0]:s[1]]), max(v1[s[0]:s[1]])))
# plt.show()
all_peaks = [] all_peaks = []
for s in splits: for s in splits:
@ -407,31 +361,22 @@ def detect_spike_indices_automatic_split(v1, min_length=3000, threshold=3.0):
std = np.std(v1[first_index:last_index]) std = np.std(v1[first_index:last_index])
peaks, _ = detect_peaks(v1[first_index:last_index], std * threshold) peaks, _ = detect_peaks(v1[first_index:last_index], std * threshold)
peaks = peaks + first_index peaks = peaks + first_index
# plt.plot(peaks, [np.mean(v1[first_index:last_index]) for _ in peaks], 'o') # plt.plot(peaks, [np.max(v1[first_index:last_index]) for _ in peaks], 'o')
all_peaks.extend(peaks) all_peaks.extend(peaks)
# plt.show() # plt.show()
all_peaks = np.array(all_peaks) # plt.close()
# all_peaks = np.array(all_peaks)
return all_peaks return all_peaks
def detect_spiketimes(time, v1, split=80, threshold=2.8): def detect_spiketimes(time, v1, threshold=2.0, min_length=5000, split_step=1000):
# all_peak_indicies = detect_spikes_indices(v1, split, threshold) all_peak_indicies = detect_spike_indices_automatic_split(v1, threshold=threshold, min_length=min_length, split_step=split_step)
all_peak_indicies = detect_spike_indices_automatic_split(v1, threshold=threshold)
return [time[p_idx] for p_idx in all_peak_indicies] return [time[p_idx] for p_idx in all_peak_indicies]
# def calculate_phases(relative_spike_times, eod_durations):
# phase_times = np.zeros(len(relative_spike_times))
#
# for i in range(len(relative_spike_times)):
# phase_times[i] = (relative_spike_times[i] / eod_durations[i]) * 2 * np.pi
#
# return phase_times
def eods_around_spikes(time, eod, spiketime_idices): def eods_around_spikes(time, eod, spiketime_idices):
eod_durations = [] eod_durations = []
relative_spike_times = [] relative_spike_times = []

View File

@ -1,6 +1,7 @@
from stimuli.AbstractStimulus import AbstractStimulus from stimuli.AbstractStimulus import AbstractStimulus
from warnings import warn from warnings import warn
from collections import OrderedDict
class AbstractModel: class AbstractModel:
@ -8,7 +9,7 @@ class AbstractModel:
# TODO what information about the model does the ModelParser need to be able to simulate the right kind of data # TODO what information about the model does the ModelParser need to be able to simulate the right kind of data
# for further analysis in cell_data/fi_curve etc. # for further analysis in cell_data/fi_curve etc.
DEFAULT_VALUES = {} DEFAULT_VALUES = OrderedDict([])
def __init__(self, params: dict = None): def __init__(self, params: dict = None):
self.parameters = {} self.parameters = {}
@ -77,11 +78,12 @@ class AbstractModel:
def set_variable(self, key, value): def set_variable(self, key, value):
if key not in self.DEFAULT_VALUES.keys(): if key not in self.DEFAULT_VALUES.keys():
raise ValueError("Given key is unknown!\n" raise ValueError("Given key is unknown!\n"
"Please check spelling and refer to list LIFAC.KEYS.") "Please check spelling and refer to list DEFAULT_VALUES.keys().")
if "tau" in key and value <= 0: if "tau" in key and value <= 0:
warn("Time constants cannot be zero or negative! Setting " + str(key) + " to 0.5ms") warn("Time constants cannot be zero or negative! Setting " + str(key) + " to 0.5ms")
self.parameters[key] = 0.00005 self.parameters[key] = 0.00005
return return
self.parameters[key] = value self.parameters[key] = value
def _set_default_parameters(self): def _set_default_parameters(self):
@ -93,6 +95,6 @@ class AbstractModel:
err_msg = "Unknown key in the given parameters:" + str(k) err_msg = "Unknown key in the given parameters:" + str(k)
raise ValueError(err_msg) raise ValueError(err_msg)
if "tau" in k and params[k] < 0: if "tau" in k and params[k] < 0.0000001:
warn("Time constants cannot be negative setting" + str(k) + "0.5ms") warn("Time constants cannot be negative ot smaller than 0.0001ms setting " + str(k) + " to 0.5ms")
params[k] = 0.00005 params[k] = 0.00005

View File

@ -8,24 +8,25 @@ from stimuli.SinusoidalStepStimulus import SinusoidalStepStimulus
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
from warnings import warn from warnings import warn
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from collections import OrderedDict
class LifacNoiseModel(AbstractModel): class LifacNoiseModel(AbstractModel):
# all times in milliseconds # all times in milliseconds
# possible mem_res: 100 * 1000000 exact value unknown in p-units # possible mem_res: 100 * 1000000 exact value unknown in p-units
DEFAULT_VALUES = {"mem_tau": 0.015, DEFAULT_VALUES = OrderedDict([("mem_tau", 0.015),
"v_base": 0, ("v_base", 0),
"v_zero": 0, ("v_zero", 0),
"threshold": 1, ("threshold", 1),
"v_offset": -10, ("v_offset", -10),
"input_scaling": 60, ("input_scaling", 60),
"delta_a": 0.08, ("delta_a", 0.08),
"tau_a": 0.1, ("tau_a", 0.1),
"a_zero": 2, ("a_zero", 2),
"noise_strength": 0.05, ("noise_strength", 0.05),
"step_size": 0.00005, ("step_size", 0.00005),
"dend_tau": 0.001, ("dend_tau", 0.001),
"refractory_period": 0.001} ("refractory_period", 0.001)])
def __init__(self, params: dict = None): def __init__(self, params: dict = None):
super().__init__(params) super().__init__(params)
@ -287,8 +288,10 @@ def simulate_fast(rectified_stimulus_array, total_time_s, parameters: np.ndarray
input_voltage[i] = input_voltage[i - 1] + ( input_voltage[i] = input_voltage[i - 1] + (
(-input_voltage[i - 1] + rectified_stimulus_array[i]) / dend_tau) * step_size (-input_voltage[i - 1] + rectified_stimulus_array[i]) / dend_tau) * step_size
output_voltage[i] = output_voltage[i - 1] + ((v_base - output_voltage[i - 1] + v_offset + ( output_voltage[i] = output_voltage[i - 1] + ((v_base - output_voltage[i - 1] + v_offset + (
input_voltage[i] * input_scaling) - adaption[i - 1] + noise) / mem_tau) * step_size input_voltage[i] * input_scaling) - adaption[i - 1] + noise) / mem_tau) * step_size
adaption[i] = adaption[i - 1] + ((-adaption[i - 1]) / tau_a) * step_size adaption[i] = adaption[i - 1] + ((-adaption[i - 1]) / tau_a) * step_size
if len(spiketimes) > 0 and time[i] - spiketimes[-1] < ref_period + step_size/2: if len(spiketimes) > 0 and time[i] - spiketimes[-1] < ref_period + step_size/2:

View File

@ -5,10 +5,10 @@ from Baseline import get_baseline_class
from FiCurve import get_fi_curve_class from FiCurve import get_fi_curve_class
from Fitter import Fitter from Fitter import Fitter
import numpy as np
import time import time
import os import os
import copy import copy
import argparse
import multiprocessing as mp import multiprocessing as mp
@ -18,34 +18,65 @@ FIT_ROUTINE = ""
def main(): def main():
parser = argparse.ArgumentParser()
parser.add_argument("--cell", help="folder (with .dat files) containing the cell data")
args = parser.parse_args()
if args.cell is not None:
cell_data = CellData(args.cell)
start_parameters = [p for p in iget_start_parameters()]
fit_cell_parallel(cell_data, start_parameters)
quit()
# test_single_cell("invivo_data/2012-01-17-ap/")
#
# quit()
start_parameters = [p for p in iget_start_parameters()]
start_data = 8
count = 0
for cell_data in icelldata_of_dir("./invivo_data/"):
count += 1
if count < start_data:
continue
fit_cell_parallel(cell_data, start_parameters)
test_effect_of_refractory_period()
quit() def test_single_cell(path):
cells = [data for data in icelldata_of_dir("./data/")] cell_data = CellData(path)
start_parameters = [p for p in iget_start_parameters()]
start_parameter = [p for p in iget_start_parameters()] for i, p in enumerate(start_parameters):
fitter = Fitter()
fitter.set_data_reference_values(cell_data)
fmin, res_par = fitter.fit_routine_1(p)
fit_all_cells_parallel_sync(cells, start_parameter, ) cell_path = os.path.basename(cell_data.get_data_path())
error = fitter.calculate_errors(model=LifacNoiseModel(res_par))
save_path = "results/invivo_results/" + cell_path + "/start_parameter_{:}_err_{:.2f}/".format(i, sum(error))
save_fitting_run_info(cell_data, res_par, p, plot=True, save_path=save_path)
print("Done with start parameters {}".format(str(i)))
def fit_cell_base(parameter):
def fit_cell_base(parameters):
# parameter = (cell_data, start_parameter_index, start_parameter, results_base_folder) # parameter = (cell_data, start_parameter_index, start_parameter, results_base_folder)
time1 = time.time() time1 = time.time()
fitter = Fitter() fitter = Fitter()
fitter.set_data_reference_values(parameter[0]) fitter.set_data_reference_values(parameters[0])
fmin, res_par = fitter.fit_routine_const_ref_period(parameter[2]) fmin, res_par = fitter.fit_routine_1(parameters[2])
cell_data = parameter[0] cell_data = parameters[0]
cell_path = os.path.split(cell_data.get_data_path())[-1] cell_path = os.path.split(cell_data.get_data_path())[-1]
error = fitter.calculate_errors(model=LifacNoiseModel(res_par)) error = fitter.calculate_errors(model=LifacNoiseModel(res_par))
save_path = parameter[3] + "/" + cell_path + "/start_parameter_{:}_err_{:.2f}/".format(parameter[1], sum(error)) save_path = parameters[3] + "/" + cell_path + "/start_parameter_{:}_err_{:.2f}/".format(parameters[1], sum(error))
save_fitting_run_info(parameter[0], res_par, parameter[2], plot=True, save_path=save_path) save_fitting_run_info(parameters[0], res_par, parameters[2], plot=True, save_path=save_path)
time2 = time.time() time2 = time.time()
del fitter
print("Time taken for " + cell_path + print("Time taken for " + cell_path +
"\n and start parameters ({:}): {:.2f}s thread time".format(parameter[1]+1, time2 - time1) + "\n and start parameters ({:}): {:.2f}s thread time".format(parameters[1]+1, time2 - time1) +
"\n error: {:.2f}".format(sum(error))) "\n error: {:.2f}".format(sum(error)))
@ -58,7 +89,7 @@ def fit_all_cells_parallel_sync(cells, start_parameters, thread_pool, results_ba
time1 = time.time() time1 = time.time()
thread_pool.map(fit_cell_base, parameter) thread_pool.map(fit_cell_base, parameter)
time2 = time.time() time2 = time.time()
print("Time taken for all cells and start parameters ({:}): {:.2f}s".format(len(parameter), time2 - time1)) print("Time taken for all ({:}): {:.2f}s".format(len(parameter)*len(cells), time2 - time1))
def fit_cell_parallel(cell_data, start_parameters): def fit_cell_parallel(cell_data, start_parameters):
@ -67,17 +98,16 @@ def fit_cell_parallel(cell_data, start_parameters):
core_count = mp.cpu_count() core_count = mp.cpu_count()
pool = mp.Pool(core_count - 1) pool = mp.Pool(core_count - 1)
fitter = Fitter() parameters = []
fitter.set_data_reference_values(cell_data) for i, p in enumerate(start_parameters):
parameters.append((cell_data, i, p, "./results/invivo_results/"))
time1 = time.time() time1 = time.time()
outputs = pool.map(fitter.fit_routine_1, start_parameters) pool.map(fit_cell_base, parameters)
time2 = time.time() time2 = time.time()
print("Time taken for all start parameters ({:}): {:.2f}s".format(len(start_parameters), time2-time1)) print("Time taken for all start parameters ({:}): {:.2f}s".format(len(start_parameters), time2-time1))
for i, (fmin, fin_pars) in enumerate(outputs): del pool
error = fitter.calculate_errors(model=LifacNoiseModel(fin_pars)) del cell_data
save_path = "./test_routines/" + cell_path + "/start_parameter_{:}_err_{:.2f}/".format(i+1, sum(error))
save_fitting_run_info(cell_data, fin_pars, start_parameters[i],
plot=True, save_path=save_path)
def test_fit_routines(): def test_fit_routines():
@ -130,6 +160,7 @@ def iget_start_parameters():
noise_strength_list = [0.03] # [0.02, 0.06] noise_strength_list = [0.03] # [0.02, 0.06]
dend_tau_list = [0.001, 0.002] dend_tau_list = [0.001, 0.002]
delta_a_list = [0.035, 0.065] delta_a_list = [0.035, 0.065]
tau_a_list = [0.1, 0.4]
ref_time_list = [0.00065] ref_time_list = [0.00065]
for mem_tau in mem_tau_list: for mem_tau in mem_tau_list:
@ -137,10 +168,11 @@ def iget_start_parameters():
for noise_strength in noise_strength_list: for noise_strength in noise_strength_list:
for dend_tau in dend_tau_list: for dend_tau in dend_tau_list:
for delta_a in delta_a_list: for delta_a in delta_a_list:
for ref_time in ref_time_list: for tau_a in tau_a_list:
yield {"mem_tau": mem_tau, "input_scaling": input_scaling, for ref_time in ref_time_list:
"noise_strength": noise_strength, "dend_tau": dend_tau, yield {"mem_tau": mem_tau, "input_scaling": input_scaling,
"delta_a": delta_a, "refractory_period": ref_time} "noise_strength": noise_strength, "dend_tau": dend_tau,
"delta_a": delta_a, "tau_a": tau_a, "refractory_period": ref_time}
def run_with_real_data(fitter, fit_routine_func, parallel=False): def run_with_real_data(fitter, fit_routine_func, parallel=False):
@ -264,6 +296,8 @@ def save_fitting_run_info(cell_data, parameters, start_parameters, plot=False, s
# plot model images # plot model images
model_baseline.plot_baseline(save_path) model_baseline.plot_baseline(save_path)
model_baseline.plot_interspike_interval_histogram(save_path) model_baseline.plot_interspike_interval_histogram(save_path)
model_baseline.plot_isi_histogram_comparision(data_baseline.get_interspike_intervals(),
model_baseline.get_interspike_intervals(), save_path)
model_baseline.plot_serial_correlation(6, save_path) model_baseline.plot_serial_correlation(6, save_path)
model_ficurve.plot_fi_curve(save_path) model_ficurve.plot_fi_curve(save_path)

6
run_cellwise_fitting.sh Executable file
View File

@ -0,0 +1,6 @@
for file in invivo_data/*; do
if [ -d "$file" ]; then
nice python3 run_Fitter.py --cell $file
fi
done

View File

@ -1,5 +1,7 @@
from stimuli.SinusAmplitudeModulation import SinusAmplitudeModulationStimulus as SAM from stimuli.SinusAmplitudeModulation import SinusAmplitudeModulationStimulus as SAM
from Baseline import get_baseline_class
from FiCurve import FICurveModel
from models.LIFACnoise import LifacNoiseModel from models.LIFACnoise import LifacNoiseModel
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@ -8,51 +10,94 @@ from CellData import CellData
def main(): def main():
# 2012-12-13_ao fit and eod frequency: # 2012-07-12-ag-invivo-1 fit and eod frequency:
parameters = {'step_size': 5e-05, 'mem_tau': 0.009946816831208656, 'v_base': 0, 'v_zero': 0, 'threshold': 1, # parameters = {'refractory_period': 0.00080122694889117, 'v_base': 0, 'v_zero': 0, 'a_zero': 20, 'step_size': 5e-05,
'v_offset': -9.375, 'input_scaling': 85.90592189374783, 'delta_a': 0.11098554500597714, # 'delta_a': 0.23628384937392385, 'threshold': 1, 'input_scaling': 100.66894113671654,
'tau_a': 0.04533432159583689, 'a_zero': 2, 'noise_strength': 0.02947375332925044, # 'mem_tau': 0.012388673630113763, 'tau_a': 0.09106579031822526, 'v_offset': -6.25,
'dend_tau': 0.001154822221492827, 'refractory_period': 0.0006} # 'noise_strength': 0.0404417932620334, 'dend_tau': 0.00122153436141022}
eod_freq = 658 # cell_data = CellData("./data/2012-07-12-ag-invivo-1/")
cell_data = CellData("./data/2012-12-13-ao-invivo-1/")
parameters = {'delta_a': 0.08820130374685671, 'refractory_period': 0.0006, 'a_zero': 15, 'step_size': 5e-05,
'v_base': 0, 'noise_strength': 0.03622523883042496, 'v_zero': 0, 'threshold': 1,
'input_scaling': 77.75367190909581, 'tau_a': 0.07623731247799118, 'v_offset': -10.546875,
'mem_tau': 0.008741976196676469, 'dend_tau': 0.0012058986118892773}
cell_data = CellData("./data/2012-12-13-an-invivo-1/")
eod_freq = cell_data.get_eod_frequency()
model = LifacNoiseModel(parameters) model = LifacNoiseModel(parameters)
mean_duration = np.mean(cell_data.get_sam_durations())
contrasts = cell_data.get_sam_contrasts()
spiketimes = cell_data.get_sam_spiketimes()
delta_freqs = np.unique(cell_data.get_sam_delta_frequencies())
for i, m_freq in enumerate(delta_freqs):
stimulus = SAM(eod_freq, contrasts[i]/100, m_freq) # base_cell = get_baseline_class(cell_data)
v1, spikes_model = model.simulate_fast(stimulus, mean_duration) # base_model = get_baseline_class(model, cell_data.get_eod_frequency())
prob_density_function_model = spiketimes_calculate_pdf(spikes_model, model.get_sampling_interval()) # isis_cell = np.array(base_cell.get_interspike_intervals()) * 1000
for spikes_cell in spiketimes[i]: # isi_model = np.array(base_model.get_interspike_intervals()) * 1000
prob_density_cell = spiketimes_calculate_pdf(spikes_cell, cell_data.get_sampling_interval())
# bins = np.arange(0, 20, 0.1)
plt.plot(prob_density_function_model) # plt.hist(isi_model, bins=bins, alpha=0.5)
plt.plot(prob_density_cell) # plt.hist(isis_cell, bins=bins, alpha=0.5)
plt.show() # plt.show()
plt.close()
# # __init__(carrier_frequency, contrast, modulation_frequency, start_time=0, duration=np.inf, amplitude=1)
# mod_freqs = np.arange(-60, eod_freq*4 + 61, 10)
# sigma_of_pdfs = []
# for m_freq in mod_freqs:
# print(m_freq, "max: {:.2f}".format(mod_freqs[-1]))
# stimulus = SAM(eod_freq, 0.2, m_freq)
#
# prob_density_function = generate_pdf(model, stimulus)
# buffer = 0.25
# buffer_idx = int(buffer / model.get_parameters()["step_size"])
#
# sigma_of_pdfs.append(np.std(prob_density_function[buffer_idx:-buffer_idx]))
#
# normed_mod_freqs = (mod_freqs + eod_freq) / eod_freq
# plt.plot(normed_mod_freqs, sigma_of_pdfs)
# plt.savefig("./figures/sam/test.png")
# plt.close() # plt.close()
pass # ficurve = FICurveModel(model, np.arange(-1, 1.1, 0.1), eod_freq)
#
# ficurve.plot_fi_curve()
durations = cell_data.get_sam_durations()
u_durations = np.unique(durations)
mean_duration = np.mean(durations)
contrasts = cell_data.get_sam_contrasts()
contrast = contrasts[0] # are all the same in this test case
spiketimes = cell_data.get_sam_spiketimes()
delta_freqs = cell_data.get_sam_delta_frequencies()
step_size = cell_data.get_sampling_interval()
spikes_dictionary = {}
for i, m_freq in enumerate(delta_freqs):
if m_freq in spikes_dictionary:
spikes_dictionary[m_freq].append(spiketimes[i])
else:
spikes_dictionary[m_freq] = [spiketimes[i]]
for m_freq in sorted(spikes_dictionary.keys()):
if mean_duration < 2*1/float(m_freq):
continue
stimulus = SAM(eod_freq, contrast/100, m_freq)
v1, spikes_model = model.simulate_fast(stimulus, mean_duration*4)
prob_density_function_model = spiketimes_calculate_pdf(spikes_model, step_size)
# plt.plot(prob_density_function_model)
# plt.show()
# plt.close()
fig, axes = plt.subplots(1, 4)
cuts = cut_pdf_into_periods(prob_density_function_model, 1/float(m_freq), step_size)
for c in cuts:
axes[0].plot(c, color="gray", alpha=0.2)
axes[0].set_title("model")
mean_model = np.mean(cuts, axis=0)
axes[0].plot(mean_model, color="black")
means_cell = []
for spikes_cell in spikes_dictionary[m_freq]:
prob_density_cell = spiketimes_calculate_pdf(spikes_cell[0], step_size)
cuts_cell = cut_pdf_into_periods(prob_density_cell, 1/float(m_freq), step_size)
for c in cuts_cell:
axes[1].plot(c, color="gray", alpha=0.15)
print(cuts_cell.shape)
means_cell.append(np.mean(cuts_cell, axis=0))
means_cell = np.array(means_cell)
total_mean_cell = np.mean(means_cell, axis=0)
axes[1].set_title("cell")
axes[1].plot(total_mean_cell, color="black")
axes[2].set_title("difference")
diff = [(total_mean_cell[i]-mean_model[i]) for i in range(len(total_mean_cell))]
axes[2].plot(diff)
axes[3].plot(total_mean_cell)
axes[3].plot(mean_model)
plt.show()
plt.close()
def generate_pdf(model, stimulus, trials=4, sim_length=3, kernel_width=0.005): def generate_pdf(model, stimulus, trials=4, sim_length=3, kernel_width=0.005):
@ -78,7 +123,7 @@ def generate_pdf(model, stimulus, trials=4, sim_length=3, kernel_width=0.005):
def spiketimes_calculate_pdf(spikes, step_size, kernel_width=0.005): def spiketimes_calculate_pdf(spikes, step_size, kernel_width=0.005):
length = int(spikes[-1] / step_size)+1 length = int(spikes[len(spikes)-1] / step_size)+1
binary = np.zeros(length) binary = np.zeros(length)
spikes = [int(s / step_size) for s in spikes] spikes = [int(s / step_size) for s in spikes]
for s_idx in spikes: for s_idx in spikes:
@ -90,6 +135,33 @@ def spiketimes_calculate_pdf(spikes, step_size, kernel_width=0.005):
return rate return rate
def cut_pdf_into_periods(pdf, period, step_size, factor=1.5):
idx_period_length = int(period/float(step_size))
offset_per_step = period/float(step_size) - idx_period_length
cut_length = int(period / float(step_size) * factor)
cuts = []
num_of_cuts = int(len(pdf) / idx_period_length)
if len(pdf) - (num_of_cuts * idx_period_length + (num_of_cuts * offset_per_step)) < cut_length - idx_period_length:
num_of_cuts -= 1
if num_of_cuts <= 0:
raise RuntimeError("Probability density function to short to cut.")
for i in np.arange(0, num_of_cuts, 1):
offset_correction = int(offset_per_step * i)
start_idx = i*idx_period_length + offset_correction
end_idx = (i*idx_period_length)+cut_length + offset_correction
cuts.append(np.array(pdf[start_idx: end_idx]))
cuts = np.array(cuts)
if len(cuts.shape) < 2:
print("Fishy....")
return cuts
def gaussian_kernel(sigma, dt): def gaussian_kernel(sigma, dt):
x = np.arange(-4. * sigma, 4. * sigma, dt) x = np.arange(-4. * sigma, 4. * sigma, dt)
y = np.exp(-0.5 * (x / sigma) ** 2) / np.sqrt(2. * np.pi) / sigma y = np.exp(-0.5 * (x / sigma) ** 2) / np.sqrt(2. * np.pi) / sigma

View File

@ -1,6 +1,7 @@
from stimuli.AbstractStimulus import AbstractStimulus from stimuli.AbstractStimulus import AbstractStimulus
import numpy as np import numpy as np
from numba import jit, njit from numba import jit, njit
from warnings import warn
class SinusAmplitudeModulationStimulus(AbstractStimulus): class SinusAmplitudeModulationStimulus(AbstractStimulus):
@ -67,8 +68,9 @@ def convert_to_array(carrier_freq, amplitude, modulation_freq, contrast, start_t
idx_end = (am_end - time_start) / step_size_s idx_end = (am_end - time_start) / step_size_s
if idx_start != round(idx_start) or idx_end != round(idx_end): if idx_start != round(idx_start) or idx_end != round(idx_end):
raise ValueError("Didn't calculate integers when searching the start and end index. start:", idx_start, "end:", idx_end) warn("Didn't calculate integers when searching the start and end index. start: {} end: {}".format(idx_start, idx_end))
# print("am_start: {:.0f}, am_end: {:.0f}, length: {:.0f}".format(am_start, am_end, am_end-am_start)) # raise ValueError("Didn't calculate integers when searching the start and end index. start:", idx_start, "end:", idx_end)
# print("am_start: {:.0f}, am_end: {:.0f}, length: {:.0f}".format(am_start, am_end, am_end-am_start))
idx_start = int(idx_start) idx_start = int(idx_start)
idx_end = int(idx_end) idx_end = int(idx_end)

68
test.py Normal file
View File

@ -0,0 +1,68 @@
from Baseline import get_baseline_class
from CellData import CellData
from models.LIFACnoise import LifacNoiseModel
from Baseline import BaselineCellData, BaselineModel
from os import listdir
from IPython import embed
import pyrelacs.DataLoader as Dl
for meep in Dl.load("invivo_data/2011-10-25-aa-invivo-1/info.dat"):
print(meep)
quit()
def icelldata_of_dir(base_path):
global COUNT
for item in sorted(listdir(base_path)):
item_path = base_path + item
try:
data = CellData(item_path)
yield data
except TypeError as e:
print(str(e))
except IndexError as e:
print(str(e), "\n")
except ValueError as e:
print(str(e), "\n")
print("Currently throw errors: {}".format(COUNT))
for data in icelldata_of_dir("invivo_data/"):
v1 = data.get_base_traces(data.V1)
if len(v1) == 0:
embed()
quit()
quit()
parameter_bursty_model = {'step_size': 5e-05, 'mem_tau': 0.0066693150193490695, 'v_base': 0, 'v_zero': 0,
'threshold': 1, 'v_offset': -45.703125, 'input_scaling': 172.13861987237314,
'delta_a': 0.06148215166012024, 'tau_a': 0.03391674075000068, 'a_zero': 2,
'noise_strength': 0.0684136549210377, 'dend_tau': 0.0013694103932013805,
'refractory_period': 0.001}
eod = 752
model = LifacNoiseModel(parameter_bursty_model)
baseline_model = get_baseline_class(model, 752, trials=2)
baseline_model.get_burstiness()
quit()
for cell_data in icelldata_of_dir("data/"):
baseline = get_baseline_class(cell_data)
baseline.get_burstiness()

130
test_for_cells.py Normal file
View File

@ -0,0 +1,130 @@
from CellData import icelldata_of_dir, CellData
from DataParserFactory import DatParser
import numpy as np
import os
import matplotlib.pyplot as plt
data_save_path = "test_routines/test_files/"
read = False
if read:
directory = "/mnt/invivo_data/"
fi_curve_min_contrasts = 7
fi_curve_min_trials = 7
baseline_min_duration = 30
files = []
baseline = []
ficurve = []
accepted = []
count = 0
for data_dir in os.listdir(directory):
data_dir = os.path.join(directory, data_dir)
if not os.path.isdir(data_dir):
continue
try:
parser = DatParser(data_dir)
print(data_dir)
baseline_lengths = parser.get_baseline_length()
baseline_good = max(baseline_lengths) >= baseline_min_duration
contrasts = parser.get_fi_curve_contrasts()
if len(contrasts) < fi_curve_min_contrasts:
fi_curve_good = False
else:
intensities_with_enough_trials = contrasts[:, 0][contrasts[:, 1] >= fi_curve_min_trials]
fi_curve_good = len(intensities_with_enough_trials) >= fi_curve_min_contrasts
if fi_curve_good and baseline_good:
count += 1
print("good")
accepted.append(True)
else:
print("bad")
accepted.append(False)
files.append(data_dir)
baseline.append(baseline_lengths)
ficurve.append(contrasts)
except RuntimeError as e:
print(data_dir)
print("bad")
accepted.append(False)
files.append(data_dir)
baseline.append([])
ficurve.append([])
files = np.array(files)
baseline = np.array(baseline)
ficurve = np.array(ficurve)
accepted = np.array(accepted)
np.save(data_save_path + "files", files)
np.save(data_save_path + "baseline", baseline)
np.save(data_save_path + "ficurve", ficurve)
np.save(data_save_path + "accepted", accepted)
print("Total good:", count)
else:
files = np.load(data_save_path + "files.npy", allow_pickle=True)
baseline = np.load(data_save_path + "baseline.npy", allow_pickle=True)
ficurve = np.load(data_save_path + "ficurve.npy", allow_pickle=True)
accepted = np.load(data_save_path + "accepted.npy", allow_pickle=True)
print(np.sum(accepted))
with open("test_routines/data_files.txt", "w") as file:
for i in range(len(files)):
if accepted[i]:
file.write(files[i] + "\n")
quit()
min_contrasts = 7
min_trials = 7
min_baseline = 30
print("min_baseline: {:}, min_contrasts: {:}, min_trials: {:}".format(min_baseline, min_contrasts, min_trials))
# bins = np.arange(0, 100, 1)
# plt.hist([max(x) for x in baseline if len(x) > 0], bins=bins)
# plt.show()
# plt.close()
good_cells = []
ints_with_enough_trials = []
for i, contrasts in enumerate(ficurve):
if len(baseline[i]) <= 0 or max(baseline[i]) < min_baseline:
continue
count = 0
if len(contrasts) == 0:
continue
for intensity in contrasts:
if intensity[1] >= min_trials:
count += 1
ints_with_enough_trials.append(count)
bins = np.arange(0.5, 20.5, 1)
points = plt.hist(ints_with_enough_trials, bins=bins)
print(sum(points[0][min_contrasts-1:]))
#plt.show()
#plt.close()
count = 0
all_cells = 0
for cell_data in icelldata_of_dir("data/", False):
all_cells += 1
if max(cell_data.get_baseline_length()) < min_baseline:
continue
contrasts = cell_data.get_fi_curve_contrasts_with_trial_number()
c_count = 0
for c in contrasts:
if c[1] >= min_trials:
c_count += 1
if c_count < min_contrasts:
continue
count += 1
print("Fullfilled by {:} of {:} test cells".format(count, all_cells))

View File

@ -254,23 +254,6 @@ def rectify_stimulus_array(stimulus_array: np.ndarray):
if __name__ == '__main__': if __name__ == '__main__':
# # X = [0.05, 0.02, 50, 0.1, 0.03]
# model = LifacNoiseModel()
# # model.set_variable("mem_tau", X[0])
# # model.set_variable("noise_strength", X[1])
# # model.set_variable("input_scaling", X[2])
# # model.set_variable("tau_a", X[3])
# # model.set_variable("delta_a", X[4])
# stim = SinusoidalStepStimulus(700, 0.2, start_time=1, duration=1)
# bf, vs, sc = model.calculate_baseline_markers(700)
# print("baseline freq:{:.2f}\nVector strength: {:.3f}\nSerial cor:".format(bf, vs), sc)
# contrasts = np.arange(-0.3, 0.31, 0.05)
# model.calculate_fi_curve(contrasts, 700)
# f_infinities, slope = model.calculate_fi_markers(contrasts, 700)
# print("FI-Curve\nSlope: {:.2f}\nValues:".format(slope), f_infinities)
# plot_model_during_stimulus(model, stim, 3)
# quit()
model_parameters = {'v_offset': -15.234375, 'input_scaling': 64.94152780134829, 'step_size': 5e-05, 'a_zero': 2, model_parameters = {'v_offset': -15.234375, 'input_scaling': 64.94152780134829, 'step_size': 5e-05, 'a_zero': 2,
'threshold': 1, 'v_base': 0, 'delta_a': 0.04763179657857666, 'tau_a': 0.07891848949732623, 'threshold': 1, 'v_base': 0, 'delta_a': 0.04763179657857666, 'tau_a': 0.07891848949732623,
@ -288,7 +271,7 @@ if __name__ == '__main__':
# test_baseline_polar_plot() # test_baseline_polar_plot()
# time_test_function() # time_test_function()
# test_cell_data() test_cell_data()
# test_peak_detection() # test_peak_detection()
# test_simulation_speed() # test_simulation_speed()
# test_parameters() # test_parameters()

View File

@ -0,0 +1,40 @@
import pyrelacs.DataLoader as Dl
def main():
traces_missing()
# Index / Value error depending on cell:
def throw_error():
cell_folder = "../invivo_data/2014-06-06-ah-invivo-1/"
repro = "BaselineActivity"
for info, key, time, x in Dl.iload_traces(cell_folder, repro=repro):
continue
def traces_missing():
cell_folder = "../invivo_data/2011-10-25-ab-invivo-1"
repro = "BaselineActivity"
time_traces = []
v1_traces = []
eod_traces = []
local_eod_traces = []
stimulus_traces = []
for info, key, time, x in Dl.iload_traces(cell_folder, repro=repro):
time_traces.append(time)
v1_traces.append(x[0])
eod_traces.append(x[1])
local_eod_traces.append(x[2])
stimulus_traces.append(x[3])
print("num of v1 traces: {:}\nnum of eod traces: {:}\nnum of local eod traces: {:}\nnum of stim traces: {:}".format(len(v1_traces), len(eod_traces), len(local_eod_traces), len(stimulus_traces)))
if __name__ == '__main__':
main()

View File

@ -1,27 +0,0 @@
import pyrelacs.DataLoader as Dl
import matplotlib.pyplot as plt
import numpy as np
from DataParserFactory import get_parser
import pprint
from Baseline import get_baseline_class
from FiCurve import get_fi_curve_class
from CellData import icelldata_of_dir
from models.LIFACnoise import LifacNoiseModel
parameter_bursty_model = {'step_size': 5e-05, 'mem_tau': 0.0066693150193490695, 'v_base': 0, 'v_zero': 0,
'threshold': 1, 'v_offset': -45.703125, 'input_scaling': 172.13861987237314,
'delta_a': 0.06148215166012024, 'tau_a': 0.03391674075000068, 'a_zero': 2,
'noise_strength': 0.0684136549210377, 'dend_tau': 0.0013694103932013805,
'refractory_period': 0.001}
eod = 752
model = LifacNoiseModel(parameter_bursty_model)
baseline_model = get_baseline_class(model, 752, trials=2)
baseline_model.get_burstiness()
quit()
for cell_data in icelldata_of_dir("../data/"):
baseline = get_baseline_class(cell_data)
baseline.get_burstiness()

28
thesis/Masterthesis.aux Executable file
View File

@ -0,0 +1,28 @@
\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldcontentsline\contentsline
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\contentsline\oldcontentsline
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{}
\select@language{english}
\@writefile{toc}{\select@language{english}}
\@writefile{lof}{\select@language{english}}
\@writefile{lot}{\select@language{english}}
\@writefile{toc}{\contentsline {section}{\numberline {1}Abstract}{2}{section.1}}
\@writefile{toc}{\contentsline {section}{\numberline {2}Introduction}{2}{section.2}}
\@writefile{toc}{\contentsline {section}{\numberline {3}Materials and Methods}{2}{section.3}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Notes:}{2}{subsection.3.1}}
\@writefile{toc}{\contentsline {section}{\numberline {4}Results}{3}{section.4}}
\@writefile{toc}{\contentsline {section}{\numberline {5}Discussion}{3}{section.5}}

530
thesis/Masterthesis.log Executable file
View File

@ -0,0 +1,530 @@
This is pdfTeX, Version 3.14159265-2.6-1.40.16 (TeX Live 2015/Debian) (preloaded format=pdflatex 2018.11.12) 22 JUN 2020 14:03
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.
**Masterthesis.tex
(./Masterthesis.tex
LaTeX2e <2016/02/01>
Babel <3.9q> and hyphenation patterns for 81 language(s) loaded.
(/usr/share/texlive/texmf-dist/tex/latex/base/article.cls
Document Class: article 2014/09/29 v1.4h Standard LaTeX document class
(/usr/share/texlive/texmf-dist/tex/latex/base/size12.clo
File: size12.clo 2014/09/29 v1.4h Standard LaTeX file (size option)
)
\c@part=\count79
\c@section=\count80
\c@subsection=\count81
\c@subsubsection=\count82
\c@paragraph=\count83
\c@subparagraph=\count84
\c@figure=\count85
\c@table=\count86
\abovecaptionskip=\skip41
\belowcaptionskip=\skip42
\bibindent=\dimen102
)
(/usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
Package: geometry 2010/09/12 v5.6 Page Geometry
(/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
Package: keyval 2014/10/28 v1.15 key=value parser (DPC)
\KV@toks@=\toks14
)
(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/ifpdf.sty
Package: ifpdf 2011/01/30 v2.3 Provides the ifpdf switch (HO)
Package ifpdf Info: pdfTeX in PDF mode is detected.
)
(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/ifvtex.sty
Package: ifvtex 2010/03/01 v1.5 Detect VTeX and its facilities (HO)
Package ifvtex Info: VTeX not detected.
)
(/usr/share/texlive/texmf-dist/tex/generic/ifxetex/ifxetex.sty
Package: ifxetex 2010/09/12 v0.6 Provides ifxetex conditional
)
\Gm@cnth=\count87
\Gm@cntv=\count88
\c@Gm@tempcnt=\count89
\Gm@bindingoffset=\dimen103
\Gm@wd@mp=\dimen104
\Gm@odd@mp=\dimen105
\Gm@even@mp=\dimen106
\Gm@layoutwidth=\dimen107
\Gm@layoutheight=\dimen108
\Gm@layouthoffset=\dimen109
\Gm@layoutvoffset=\dimen110
\Gm@dimlist=\toks15
)
(/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
Package: graphicx 2014/10/28 v1.0g Enhanced LaTeX Graphics (DPC,SPQR)
(/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
Package: graphics 2016/01/03 v1.0q Standard LaTeX Graphics (DPC,SPQR)
(/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
Package: trig 2016/01/03 v1.10 sin cos tan (DPC)
)
(/usr/share/texlive/texmf-dist/tex/latex/latexconfig/graphics.cfg
File: graphics.cfg 2010/04/23 v1.9 graphics configuration of TeX Live
)
Package graphics Info: Driver file: pdftex.def on input line 95.
(/usr/share/texlive/texmf-dist/tex/latex/pdftex-def/pdftex.def
File: pdftex.def 2011/05/27 v0.06d Graphics/color for pdfTeX
(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/infwarerr.sty
Package: infwarerr 2010/04/08 v1.3 Providing info/warning/error messages (HO)
)
(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/ltxcmds.sty
Package: ltxcmds 2011/11/09 v1.22 LaTeX kernel commands for general use (HO)
)
\Gread@gobject=\count90
))
\Gin@req@height=\dimen111
\Gin@req@width=\dimen112
)
(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsmath.sty
Package: amsmath 2016/03/03 v2.15a AMS math features
\@mathmargin=\skip43
For additional information on amsmath, use the `?' option.
(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amstext.sty
Package: amstext 2000/06/29 v2.01 AMS text
(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsgen.sty
File: amsgen.sty 1999/11/30 v2.0 generic functions
\@emptytoks=\toks16
\ex@=\dimen113
))
(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsbsy.sty
Package: amsbsy 1999/11/29 v1.2d Bold Symbols
\pmbraise@=\dimen114
)
(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsopn.sty
Package: amsopn 1999/12/14 v2.01 operator names
)
\inf@bad=\count91
LaTeX Info: Redefining \frac on input line 199.
\uproot@=\count92
\leftroot@=\count93
LaTeX Info: Redefining \overline on input line 297.
\classnum@=\count94
\DOTSCASE@=\count95
LaTeX Info: Redefining \ldots on input line 394.
LaTeX Info: Redefining \dots on input line 397.
LaTeX Info: Redefining \cdots on input line 518.
\Mathstrutbox@=\box26
\strutbox@=\box27
\big@size=\dimen115
LaTeX Font Info: Redeclaring font encoding OML on input line 630.
LaTeX Font Info: Redeclaring font encoding OMS on input line 631.
\macc@depth=\count96
\c@MaxMatrixCols=\count97
\dotsspace@=\muskip10
\c@parentequation=\count98
\dspbrk@lvl=\count99
\tag@help=\toks17
\row@=\count100
\column@=\count101
\maxfields@=\count102
\andhelp@=\toks18
\eqnshift@=\dimen116
\alignsep@=\dimen117
\tagshift@=\dimen118
\tagwidth@=\dimen119
\totwidth@=\dimen120
\lineht@=\dimen121
\@envbody=\toks19
\multlinegap=\skip44
\multlinetaggap=\skip45
\mathdisplay@stack=\toks20
LaTeX Info: Redefining \[ on input line 2735.
LaTeX Info: Redefining \] on input line 2736.
)
(/usr/share/texlive/texmf-dist/tex/latex/natbib/natbib.sty
Package: natbib 2010/09/13 8.31b (PWD, AO)
\bibhang=\skip46
\bibsep=\skip47
LaTeX Info: Redefining \cite on input line 694.
\c@NAT@ctr=\count103
)
(/usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
Package: hyperref 2012/11/06 v6.83m Hypertext links for LaTeX
(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/hobsub-hyperref.sty
Package: hobsub-hyperref 2012/05/28 v1.13 Bundle oberdiek, subset hyperref (HO)
(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/hobsub-generic.sty
Package: hobsub-generic 2012/05/28 v1.13 Bundle oberdiek, subset generic (HO)
Package: hobsub 2012/05/28 v1.13 Construct package bundles (HO)
Package hobsub Info: Skipping package `infwarerr' (already loaded).
Package hobsub Info: Skipping package `ltxcmds' (already loaded).
Package: ifluatex 2010/03/01 v1.3 Provides the ifluatex switch (HO)
Package ifluatex Info: LuaTeX not detected.
Package hobsub Info: Skipping package `ifvtex' (already loaded).
Package: intcalc 2007/09/27 v1.1 Expandable calculations with integers (HO)
Package hobsub Info: Skipping package `ifpdf' (already loaded).
Package: etexcmds 2011/02/16 v1.5 Avoid name clashes with e-TeX commands (HO)
Package etexcmds Info: Could not find \expanded.
(etexcmds) That can mean that you are not using pdfTeX 1.50 or
(etexcmds) that some package has redefined \expanded.
(etexcmds) In the latter case, load this package earlier.
Package: kvsetkeys 2012/04/25 v1.16 Key value parser (HO)
Package: kvdefinekeys 2011/04/07 v1.3 Define keys (HO)
Package: pdftexcmds 2011/11/29 v0.20 Utility functions of pdfTeX for LuaTeX (HO
)
Package pdftexcmds Info: LuaTeX not detected.
Package pdftexcmds Info: \pdf@primitive is available.
Package pdftexcmds Info: \pdf@ifprimitive is available.
Package pdftexcmds Info: \pdfdraftmode found.
Package: pdfescape 2011/11/25 v1.13 Implements pdfTeX's escape features (HO)
Package: bigintcalc 2012/04/08 v1.3 Expandable calculations on big integers (HO
)
Package: bitset 2011/01/30 v1.1 Handle bit-vector datatype (HO)
Package: uniquecounter 2011/01/30 v1.2 Provide unlimited unique counter (HO)
)
Package hobsub Info: Skipping package `hobsub' (already loaded).
Package: letltxmacro 2010/09/02 v1.4 Let assignment for LaTeX macros (HO)
Package: hopatch 2012/05/28 v1.2 Wrapper for package hooks (HO)
Package: xcolor-patch 2011/01/30 xcolor patch
Package: atveryend 2011/06/30 v1.8 Hooks at the very end of document (HO)
Package atveryend Info: \enddocument detected (standard20110627).
Package: atbegshi 2011/10/05 v1.16 At begin shipout hook (HO)
Package: refcount 2011/10/16 v3.4 Data extraction from label references (HO)
Package: hycolor 2011/01/30 v1.7 Color options for hyperref/bookmark (HO)
)
(/usr/share/texlive/texmf-dist/tex/latex/oberdiek/auxhook.sty
Package: auxhook 2011/03/04 v1.3 Hooks for auxiliary files (HO)
)
(/usr/share/texlive/texmf-dist/tex/latex/oberdiek/kvoptions.sty
Package: kvoptions 2011/06/30 v3.11 Key value format for package options (HO)
)
\@linkdim=\dimen122
\Hy@linkcounter=\count104
\Hy@pagecounter=\count105
(/usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
File: pd1enc.def 2012/11/06 v6.83m Hyperref: PDFDocEncoding definition (HO)
)
\Hy@SavedSpaceFactor=\count106
(/usr/share/texlive/texmf-dist/tex/latex/latexconfig/hyperref.cfg
File: hyperref.cfg 2002/06/06 v1.2 hyperref configuration of TeXLive
)
Package hyperref Info: Option `breaklinks' set `true' on input line 4319.
Package hyperref Info: Option `bookmarks' set `true' on input line 4319.
Package hyperref Info: Option `bookmarksopen' set `true' on input line 4319.
Package hyperref Info: Option `colorlinks' set `false' on input line 4319.
Package hyperref Info: Hyper figures OFF on input line 4443.
Package hyperref Info: Link nesting OFF on input line 4448.
Package hyperref Info: Hyper index ON on input line 4451.
Package hyperref Info: Plain pages OFF on input line 4458.
Package hyperref Info: Backreferencing OFF on input line 4463.
Package hyperref Info: Implicit mode ON; LaTeX internals redefined.
Package hyperref Info: Bookmarks ON on input line 4688.
\c@Hy@tempcnt=\count107
(/usr/share/texlive/texmf-dist/tex/latex/url/url.sty
\Urlmuskip=\muskip11
Package: url 2013/09/16 ver 3.4 Verb mode for urls, etc.
)
LaTeX Info: Redefining \url on input line 5041.
\XeTeXLinkMargin=\dimen123
\Fld@menulength=\count108
\Field@Width=\dimen124
\Fld@charsize=\dimen125
Package hyperref Info: Hyper figures OFF on input line 6295.
Package hyperref Info: Link nesting OFF on input line 6300.
Package hyperref Info: Hyper index ON on input line 6303.
Package hyperref Info: backreferencing OFF on input line 6310.
Package hyperref Info: Link coloring OFF on input line 6315.
Package hyperref Info: Link coloring with OCG OFF on input line 6320.
Package hyperref Info: PDF/A mode OFF on input line 6325.
LaTeX Info: Redefining \ref on input line 6365.
LaTeX Info: Redefining \pageref on input line 6369.
\Hy@abspage=\count109
\c@Item=\count110
\c@Hfootnote=\count111
)
Package hyperref Message: Driver: hpdftex.
(/usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
File: hpdftex.def 2012/11/06 v6.83m Hyperref driver for pdfTeX
\Fld@listcount=\count112
\c@bookmark@seq@number=\count113
(/usr/share/texlive/texmf-dist/tex/latex/oberdiek/rerunfilecheck.sty
Package: rerunfilecheck 2011/04/15 v1.7 Rerun checks for auxiliary files (HO)
Package uniquecounter Info: New unique counter `rerunfilecheck' on input line 2
82.
)
\Hy@SectionHShift=\skip48
)
(/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
Package: inputenc 2015/03/17 v1.2c Input encoding file
\inpenc@prehook=\toks21
\inpenc@posthook=\toks22
(/usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def
File: utf8x.def 2004/10/17 UCS: Input encoding UTF-8
))
(/usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty
Package: ucs 2013/05/11 v2.2 UCS: Unicode input support
(/usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def
File: uni-global.def 2013/05/13 UCS: Unicode global data
)
\uc@secondtry=\count114
\uc@combtoks=\toks23
\uc@combtoksb=\toks24
\uc@temptokena=\toks25
)
(/usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
Package: babel 2016/02/24 3.9q The Babel package
(/usr/share/texlive/texmf-dist/tex/generic/babel-english/english.ldf
Language: english 2012/08/20 v3.3p English support from the babel system
(/usr/share/texlive/texmf-dist/tex/generic/babel/babel.def
File: babel.def 2016/02/24 3.9q Babel common definitions
\babel@savecnt=\count115
\U@D=\dimen126
)
\l@canadian = a dialect from \language\l@american
\l@australian = a dialect from \language\l@british
\l@newzealand = a dialect from \language\l@british
))
(/usr/share/texlive/texmf-dist/tex/latex/floatrow/floatrow.sty
Package: floatrow 2008/08/02 v0.3b floatrow: float package extension
(/usr/share/texlive/texmf-dist/tex/latex/caption/caption3.sty
Package: caption3 2016/02/04 v1.7-139 caption3 kernel (AR)
Package caption3 Info: TeX engine: e-TeX on input line 67.
\captionmargin=\dimen127
\captionmargin@=\dimen128
\captionwidth=\dimen129
\caption@tempdima=\dimen130
\caption@indent=\dimen131
\caption@parindent=\dimen132
\caption@hangindent=\dimen133
)
\c@float@type=\count116
\float@exts=\toks26
\float@box=\box28
\@floatcapt=\box29
Package floatrow Info: Modified float package code loaded on input line 455.
Package floatrow Info: Modified rotfloat package code loaded on input line 473.
\FR@everyfloat=\toks27
\flrow@foot=\insert199
\FB@wd=\dimen134
\FBo@wd=\dimen135
\FBc@wd=\dimen136
\FBo@ht=\skip49
\FBc@ht=\skip50
\FBf@ht=\skip51
\FBo@max=\skip52
\FBc@max=\skip53
\FBf@max=\skip54
\c@FBl@b=\count117
\floatbox@depth=\count118
\c@FRobj=\count119
\c@FRsobj=\count120
\Xhsize=\skip55
\sXhsize=\skip56
\Zhsize=\skip57
\sZhsize=\skip58
\flrow@rowbox=\box30
\FR@Zunitlength=\dimen137
\c@FBcnt=\count121
\FPOScnt=\count122
\LTleft=\skip59
\LTright=\skip60
\LTleft=\skip61
\LTright=\skip62
\flrow@types=\toks28
)
(/usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
\lst@mode=\count123
\lst@gtempboxa=\box31
\lst@token=\toks29
\lst@length=\count124
\lst@currlwidth=\dimen138
\lst@column=\count125
\lst@pos=\count126
\lst@lostspace=\dimen139
\lst@width=\dimen140
\lst@newlines=\count127
\lst@lineno=\count128
\lst@maxwidth=\dimen141
(/usr/share/texlive/texmf-dist/tex/latex/listings/lstmisc.sty
File: lstmisc.sty 2015/06/04 1.6 (Carsten Heinz)
\c@lstnumber=\count129
\lst@skipnumbers=\count130
\lst@framebox=\box32
)
(/usr/share/texlive/texmf-dist/tex/latex/listings/listings.cfg
File: listings.cfg 2015/06/04 1.6 listings configuration
))
Package: listings 2015/06/04 1.6 (Carsten Heinz)
(./Masterthesis.aux)
\openout1 = `Masterthesis.aux'.
LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 20.
LaTeX Font Info: ... okay on input line 20.
LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 20.
LaTeX Font Info: ... okay on input line 20.
LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 20.
LaTeX Font Info: ... okay on input line 20.
LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 20.
LaTeX Font Info: ... okay on input line 20.
LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 20.
LaTeX Font Info: ... okay on input line 20.
LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 20.
LaTeX Font Info: ... okay on input line 20.
LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 20.
LaTeX Font Info: ... okay on input line 20.
*geometry* detected driver: pdftex
*geometry* verbose mode - [ preamble ] result:
* driver: pdftex
* paper: a4paper
* layout: <same size as paper>
* layoutoffset:(h,v)=(0.0pt,0.0pt)
* modes:
* h-part:(L,W,R)=(71.13188pt, 455.24411pt, 71.13188pt)
* v-part:(T,H,B)=(56.9055pt, 717.00946pt, 71.13188pt)
* \paperwidth=597.50787pt
* \paperheight=845.04684pt
* \textwidth=455.24411pt
* \textheight=717.00946pt
* \oddsidemargin=-1.1381pt
* \evensidemargin=-1.1381pt
* \topmargin=-52.36449pt
* \headheight=12.0pt
* \headsep=25.0pt
* \topskip=12.0pt
* \footskip=30.0pt
* \marginparwidth=35.0pt
* \marginparsep=10.0pt
* \columnsep=10.0pt
* \skip\footins=10.8pt plus 4.0pt minus 2.0pt
* \hoffset=0.0pt
* \voffset=0.0pt
* \mag=1000
* \@twocolumnfalse
* \@twosidefalse
* \@mparswitchfalse
* \@reversemarginfalse
* (1in=72.27pt=25.4mm, 1cm=28.453pt)
(/usr/share/texlive/texmf-dist/tex/context/base/supp-pdf.mkii
[Loading MPS to PDF converter (version 2006.09.02).]
\scratchcounter=\count131
\scratchdimen=\dimen142
\scratchbox=\box33
\nofMPsegments=\count132
\nofMParguments=\count133
\everyMPshowfont=\toks30
\MPscratchCnt=\count134
\MPscratchDim=\dimen143
\MPnumerator=\count135
\makeMPintoPDFobject=\count136
\everyMPtoPDFconversion=\toks31
) (/usr/share/texlive/texmf-dist/tex/latex/oberdiek/epstopdf-base.sty
Package: epstopdf-base 2010/02/09 v2.5 Base part for package epstopdf
(/usr/share/texlive/texmf-dist/tex/latex/oberdiek/grfext.sty
Package: grfext 2010/08/19 v1.1 Manage graphics extensions (HO)
)
Package grfext Info: Graphics extension search list:
(grfext) [.png,.pdf,.jpg,.mps,.jpeg,.jbig2,.jb2,.PNG,.PDF,.JPG,.JPE
G,.JBIG2,.JB2,.eps]
(grfext) \AppendGraphicsExtensions on input line 452.
(/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
File: epstopdf-sys.cfg 2010/07/13 v1.3 Configuration of (r)epstopdf for TeX Liv
e
))
\AtBeginShipoutBox=\box34
Package hyperref Info: Link coloring OFF on input line 20.
(/usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
Package: nameref 2012/10/27 v2.43 Cross-referencing by name of section
(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/gettitlestring.sty
Package: gettitlestring 2010/12/03 v1.4 Cleanup title references (HO)
)
\c@section@level=\count137
)
LaTeX Info: Redefining \ref on input line 20.
LaTeX Info: Redefining \pageref on input line 20.
LaTeX Info: Redefining \nameref on input line 20.
(./Masterthesis.out) (./Masterthesis.out)
\@outlinefile=\write3
\openout3 = `Masterthesis.out'.
(/usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def
File: ucsencs.def 2011/01/21 Fixes to fontencodings LGR, T3
)
Package caption Info: Begin \AtBeginDocument code.
Package caption Info: End \AtBeginDocument code.
(/usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
Package: color 2016/01/03 v1.1b Standard LaTeX Color (DPC)
(/usr/share/texlive/texmf-dist/tex/latex/latexconfig/color.cfg
File: color.cfg 2007/01/18 v1.5 color configuration of teTeX/TeXLive
)
Package color Info: Driver file: pdftex.def on input line 143.
)
\c@lstlisting=\count138
(/usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-0.def
File: uni-0.def 2013/05/13 UCS: Unicode data U+0000..U+00FF
) [1
{/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map}]pdfTeX warning (ext4): dest
ination with the same identifier (name{page.1}) has been already used, duplicat
e ignored
<to be read again>
\relax
l.69 \newpage
\newpage [1] (./Masterthesis.toc)
\tf@toc=\write4
\openout4 = `Masterthesis.toc'.
[2]
Package atveryend Info: Empty hook `BeforeClearDocument' on input line 163.
[3]
Package atveryend Info: Empty hook `AfterLastShipout' on input line 163.
(./Masterthesis.aux)
Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 163.
Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 163.
Package rerunfilecheck Info: File `Masterthesis.out' has not changed.
(rerunfilecheck) Checksum: EAE1F86B6F3D151F56F19C2521AD77A8;282.
Package atveryend Info: Empty hook `AtVeryVeryEnd' on input line 163.
)
Here is how much of TeX's memory you used:
10049 strings out of 493029
145585 string characters out of 6136233
258475 words of memory out of 5000000
13445 multiletter control sequences out of 15000+600000
8867 words of font info for 32 fonts, out of 8000000 for 9000
1141 hyphenation exceptions out of 8191
37i,11n,38p,339b,551s stack positions out of 5000i,500n,10000p,200000b,80000s
</usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmbx12.pfb></us
r/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr12.pfb></usr/share
/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr17.pfb>
Output written on Masterthesis.pdf (4 pages, 47088 bytes).
PDF statistics:
98 PDF objects out of 1000 (max. 8388607)
88 compressed objects within 1 object stream
35 named destinations out of 1000 (max. 500000)
49 words of extra memory for PDF output out of 10000 (max. 10000000)

6
thesis/Masterthesis.out Executable file
View File

@ -0,0 +1,6 @@
\BOOKMARK [1][]{section.1}{Abstract}{}% 1
\BOOKMARK [1][]{section.2}{Introduction}{}% 2
\BOOKMARK [1][]{section.3}{Materials and Methods}{}% 3
\BOOKMARK [2][]{subsection.3.1}{Notes:}{section.3}% 4
\BOOKMARK [1][]{section.4}{Results}{}% 5
\BOOKMARK [1][]{section.5}{Discussion}{}% 6

BIN
thesis/Masterthesis.pdf Executable file

Binary file not shown.

Binary file not shown.

198
thesis/Masterthesis.tex Executable file
View File

@ -0,0 +1,198 @@
\documentclass[12pt,a4paper,pdftex]{article}
\usepackage[left=25mm, right=25mm, top=20mm, bottom=25mm]{geometry}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{natbib}
\usepackage[breaklinks=true,bookmarks=true,bookmarksopen=true,pdfpagemode=UseNone,pdfstartview=FitH,colorlinks=false,citecolor=blue]{hyperref}
\usepackage[utf8x]{inputenc}
\usepackage[english]{babel}
%\usepackage{float}
\usepackage{floatrow}
\usepackage{listings} % für den code am Ende
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Ab hier beginnt der eigentliche Text:
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Titelseite
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{titlepage}
\begin{center}
{\Huge TITEL \par}
\vspace{0.75cm}
{\Large Masterthesis \par}
\vspace{0.25cm}
{der Mathematisch-Naturwissenschaftlichen Fakultät \par} {der Eberhard Karls Universität Tübingen \par}
\vspace{0.75cm}
{Erstkorrektor: \\
Zweitkorrektor: Prof.~Dr.~Jan Benda \par}
\vspace{0.25cm}
{Lehrbereich für Neuroethologie}
\vfill
\large vorgelegt von \par
\large Alexander Mathias Ott \par
Abgabedatum: 30.11.2017
\end{center}
\end{titlepage}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Erklärung
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section*{Eigenständigkeitserklärung}
\vspace{0.5cm}
Hiermit erkläre ich, dass ich die vorgelegte Arbeit selbstständig verfasst habe und keine anderen als die angegebenen Quellen und Hilfsmittel benutzt habe.
\vspace{2mm}
\noindent
Außerdem erkläre ich, dass die eingereichte Arbeit weder vollständig noch in wesentlichen Teilen Gegenstand eines anderen Prüfungsverfahrens gewesen ist.
\vfill
\begin{tabular}{ll}
$\overline{\text{Unterschrift}\hspace{6cm}}$ & $\overline{\text{Ort, Datum}\hspace{4cm}}$ \\
\end{tabular}
\newpage\newpage
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Inhalsverzeichnis
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
{
\hypersetup{linkcolor=black}
\tableofcontents
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Zusammenfassung
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Abstract}
%Einleitung + Ergebnisse der Diskussion in kurz
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Einleitung
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
\begin{enumerate}
\item electric fish
\begin{enumerate}
\item general: habitat,
\item as model animal for ethology
\item electric organ + eod
\item sensory neurons p- and t(?)-type
\end{enumerate}
\item sensory perception
\begin{enumerate}
\item receptor -> heterogenic population
\item further analysis limited by what receptors code for
\item p-type neurons code AMs
\end{enumerate}
\item goal be able to simulate heterogenic population to analyse full coding properties -> many cells at the same time needed -> only possible in vitro/ with model simulations
\item Possible to draw representative values for model parameters to generate a population ?
\end{enumerate}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Methoden
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Materials and Methods}
\subsection{Notes:}
\begin{enumerate}
\item Construction of model
\begin{enumerate}
\item Explain general LIF
\item parameters explanation, dif. equations
\item Explain addition of adaption current
\item note addition of noise
\item check between alpha in fire-rate model adaption and a-delta in LIFAC
\item check for noise independence from step-size (?)
\end{enumerate}
\item Data generation
\begin{enumerate}
\item How data was measured / which data used
\item How data was chosen -> at least 30s baseline, 7 contrasts with 7 trials
\item experimental protocells were allowed by XYZ (before 2012: All experimental protocols were approved and complied with national and regional laws (file no. 55.2-1-54-2531-135-09). between 2013-2016 ZP 1/13 Regierungspräsidium Tübingen and after 2016 ZP 1/16 Regierungspräsidium Tübingen)
\end{enumerate}
\item behavior parameters:
\begin{enumerate}
\item which behaviors were looked at / calculated and why (bf, vs, sc, cv, fi-curve...)
\item how exactly were they calculated in the cell and model
\end{enumerate}
\item Fitting of model to data
\begin{enumerate}
\item which variables where determined beforehand (None, just for start parameters)
\item which variables where fit
\item What method was used (Nelder-Mead) and why/(how it works?)
\item fit routine ? (currently just all at the same time)
\end{enumerate}
\end{enumerate}
\section{Results}
\begin{enumerate}
\item how well does the fitting work?
\item distribution of behavior parameters (cells and models)
\item distributions of parameters
\item correlations: between parameters between parameters and behavior
\item correlation between final error and behavior parameters of the cell -> hard to fit cell types
\item (response to SAM stimuli)
\end{enumerate}
\section{Discussion}
\section{Possible Sources}
\subsection{Henriette Walz - Thesis}
\subsubsection{Nervous system - Signal encoding}
\begin{enumerate}
\item single neurons are the building blocks of the nervous system (Cajal 1899)
\item encoding of information in spike frequency - rate code(first description(?) Adrian 1928) also find examples! (light flash intensity Barlow et al. 1971, )
\item encoding info in inter spike intervals (Singer and Gary 1995)
\item encoding time window (Theunissen and Miller 1995) "This time window is the time scale in which the encoding is assumed to take placewithin the nervous system
\item encoding is noisy (Mainen and Sejnowski 1995, Tolhurst et al 1983, Tomko and Crapper 1974 -> review Faisal et al 2008) in part because of stimulus properties but also cell properties (Ion channel stochasticity (van Rossum et al.,2003))
\item noise can be beneficial to encoding -> “stochastic
resonance” (weak stimuli on thresholding devices like neurons, noice allows coding of sub threshold stimuli) (Benzi et al., 1981)
\end{enumerate}
\end{document}

7
thesis/Masterthesis.toc Executable file
View File

@ -0,0 +1,7 @@
\select@language {english}
\contentsline {section}{\numberline {1}Abstract}{2}{section.1}
\contentsline {section}{\numberline {2}Introduction}{2}{section.2}
\contentsline {section}{\numberline {3}Materials and Methods}{2}{section.3}
\contentsline {subsection}{\numberline {3.1}Notes:}{2}{subsection.3.1}
\contentsline {section}{\numberline {4}Results}{3}{section.4}
\contentsline {section}{\numberline {5}Discussion}{3}{section.5}

Binary file not shown.

View File

@ -78,7 +78,7 @@ class HelperFunctionsTester(unittest.TestCase):
print(cell_data.get_data_path()) print(cell_data.get_data_path())
v1 = cell_data.get_base_traces(cell_data.V1)[0] v1 = cell_data.get_base_traces(cell_data.V1)[0]
hF.detect_spike_indices_automatic_split(v1) hF.detect_spike_indices_automatic_split(v1, 2.8)
# todo # todo
# search_eod_start_and_end_times ? (not used anymore ?) # search_eod_start_and_end_times ? (not used anymore ?)