diff --git a/CellData.py b/CellData.py index 5db7872..6afbf7a 100644 --- a/CellData.py +++ b/CellData.py @@ -4,6 +4,8 @@ import os import helperFunctions as hf import numpy as np +import matplotlib.pyplot as plt + COUNT = 0 @@ -127,6 +129,14 @@ class CellData: if only_first and i > 0: break spiketimes.append(hf.detect_spiketimes(times[i], v1_traces[i], threshold=threshold, min_length=min_length, split_step=split_step)) + + # plt.plot(times[0], v1_traces[0]) + # idx_pos = np.array(spiketimes) / self.get_sampling_interval() + # idx_pos = np.array(np.rint(idx_pos), np.int) + # + # plt.plot(spiketimes[0], np.array(v1_traces[0])[idx_pos][0, :], 'o') + # plt.show() + self.base_spikes = np.array(spiketimes) if os.path.isdir(self.data_path): diff --git a/DataParserFactory.py b/DataParserFactory.py index 301b35d..6f71f8a 100644 --- a/DataParserFactory.py +++ b/DataParserFactory.py @@ -60,6 +60,7 @@ class DatParser(AbstractParser): def __init__(self, dir_path): self.base_path = dir_path + self.info_file = self.base_path + "/info.dat" self.fi_file = self.base_path + "/fispikes1.dat" self.baseline_file = self.base_path + "/basespikes1.dat" self.sam_file = self.base_path + "/samallspikes1.dat" @@ -77,6 +78,39 @@ class DatParser(AbstractParser): return lengths + def get_species(self): + species = "" + for metadata in Dl.load(self.info_file): + if "Species" in metadata.keys(): + species = metadata["Species"] + elif "Subject" in metadata.keys(): + if isinstance(metadata["Subject"], dict) and "Species" in metadata["Subject"].keys(): + species = metadata["Subject"]["Species"] + + return species + + def get_quality(self): + quality = "" + for metadata in Dl.load(self.info_file): + if "Recording quality" in metadata.keys(): + quality = metadata["Recording quality"] + elif "Recording" in metadata.keys(): + if isinstance(metadata["Recording"], dict) and "Recording quality" in metadata["Recording"].keys(): + quality = metadata["Recording"]["Recording quality"] + return quality + + def get_cell_type(self): + type = "" + for metadata in Dl.load(self.info_file): + if len(metadata.keys()) < 3: + return "" + if "CellType" in metadata.keys(): + type = metadata["CellType"] + elif "Cell" in metadata.keys(): + if isinstance(metadata["Cell"], dict) and "CellType" in metadata["Cell"].keys(): + type = metadata["Cell"]["CellType"] + return type + def get_fi_curve_contrasts(self): """ @@ -358,11 +392,11 @@ class DatParser(AbstractParser): def __test_data_file_existence__(self): if not exists(self.stimuli_file): - raise RuntimeError(self.stimuli_file + " file doesn't exist!") + raise FileNotFoundError(self.stimuli_file + " file doesn't exist!") if not exists(self.fi_file): - raise RuntimeError(self.fi_file + " file doesn't exist!") + raise FileNotFoundError(self.fi_file + " file doesn't exist!") if not exists(self.baseline_file): - raise RuntimeError(self.baseline_file + " file doesn't exist!") + raise FileNotFoundError(self.baseline_file + " file doesn't exist!") # if not exists(self.sam_file): # raise RuntimeError(self.sam_file + " file doesn't exist!") diff --git a/Figures_Model.py b/Figures_Model.py index 5f2817c..bd6df1a 100644 --- a/Figures_Model.py +++ b/Figures_Model.py @@ -10,66 +10,75 @@ import models.smallModels as sM def main(): # stimulus_development() - model_adaption_example() - # model_comparison() + # model_adaption_example() + model_comparison() pass def isi_development(): model_params = consts.model_cell_1 - pass def model_comparison(): step = 0.00001 - duration = 0.5 + duration = 1.3 stimulus = np.arange(0, duration, step) - stimulus[0:8000] = 2 - stimulus[8000:20000] = 0 - stimulus[20000:] = 1 + stimulus[0:int(0.7/step)] = 0.5 + stimulus[int(0.7/step):int(0.9/step)] = 1 + stimulus[int(0.9/step):int(1.1/step)] = 0 + stimulus[int(1.1/step):int(1.3/step)] = 0.5 fig, axes = plt.subplots(5, 2, sharex=True, sharey="col", figsize=consts.FIG_SIZE_LARGE) - axes[1, 0].set_title("Voltage") - axes[1, 1].set_title("Frequency") - - axes[0, 0].plot(np.arange(0, duration, step)[:len(stimulus)], stimulus) - axes[0, 0].set_ylabel("Stimulus") + axes[0, 0].plot(np.arange(-0.5, duration, step)[:len(stimulus)], stimulus) + axes[0, 0].set_title("Stimulus") + axes[0, 0].set_ylabel("V in mV") axes[0, 1].set_frame_on(False) axes[0, 1].set_axis_off() + axes[0, 0].set_ylim((-0.1, 1.5)) + axes[0, 0].set_xlim((0, duration-0.5)) v1, spikes = sM.pif_simulation(stimulus, step) - axes[1, 0].plot(np.arange(0, duration, step)[:len(v1)], v1) + spikes = np.array(spikes)-0.5 + axes[1, 0].plot(np.arange(-0.5, duration, step)[:len(v1)], v1) axes[1, 0].eventplot(spikes, lineoffsets=1.2, linelengths=0.2, colors="black") time, freq = hF.calculate_time_and_frequency_trace(spikes, step) axes[1, 1].plot(time, freq) - axes[1, 0].set_ylabel("PIF") + axes[1, 0].set_title("PIF") + axes[1, 0].set_ylabel("V in mV") v1, spikes = sM.lif_simulation(stimulus, step) - axes[2, 0].plot(np.arange(0, duration, step)[:len(v1)], v1) + spikes = np.array(spikes)-0.5 + axes[2, 0].plot(np.arange(-0.5, duration, step)[:len(v1)], v1) axes[2, 0].eventplot(spikes, lineoffsets=1.2, linelengths=0.2, colors="black") time, freq = hF.calculate_time_and_frequency_trace(spikes, step) axes[2, 1].plot(time, freq) - axes[2, 0].set_ylabel("LIF") + axes[2, 0].set_title("LIF") + axes[2, 0].set_ylabel("V in mV") v1, spikes = sM.lifac_simulation(stimulus, step) - axes[3, 0].plot(np.arange(0, duration, step)[:len(v1)], v1) + spikes = np.array(spikes) - 0.5 + axes[3, 0].plot(np.arange(-0.5, duration, step)[:len(v1)], v1) axes[3, 0].eventplot(spikes, lineoffsets=1.2, linelengths=0.2, colors="black") time, freq = hF.calculate_time_and_frequency_trace(spikes, step) axes[3, 1].plot(time, freq) - axes[3, 0].set_ylabel("LIFAC") + axes[3, 0].set_title("LIFAC") + axes[3, 0].set_ylabel("V in mV") v1, spikes = sM.lifac_ref_simulation(stimulus, step) - axes[4, 0].plot(np.arange(0, duration, step)[:len(v1)], v1) + spikes = np.array(spikes) - 0.5 + axes[4, 0].plot(np.arange(-0.5, duration, step)[:len(v1)], v1) axes[4, 0].eventplot(spikes, lineoffsets=1.2, linelengths=0.2, colors="black") time, freq = hF.calculate_time_and_frequency_trace(spikes, step) axes[4, 1].plot(time, freq) - axes[4, 0].set_ylabel("LIFAC + ref") + axes[4, 0].set_title("LIFAC + ref") + axes[4, 0].set_ylabel("V in mV") axes[4, 0].set_xlabel("Time [s]") axes[4, 1].set_xlabel("Time [s]") + # v1, spikes = sM.lifac_ref_noise_simulation(stimulus, step) # axes[5, 0].plot(np.arange(0, duration, step)[:len(v1)], v1) # axes[5, 0].eventplot(spikes, lineoffsets=1.2, linelengths=0.2, colors="black") diff --git a/Figures_Stimuli.py b/Figures_Stimuli.py index 8b442a0..8733a0d 100644 --- a/Figures_Stimuli.py +++ b/Figures_Stimuli.py @@ -3,16 +3,56 @@ from stimuli.SinusoidalStepStimulus import SinusoidalStepStimulus from stimuli.SinusAmplitudeModulation import SinusAmplitudeModulationStimulus import numpy as np import matplotlib.pyplot as plt +import Figure_constants as consts def main(): stimuli_protocol_examples() + am_generation() pass +def am_generation(): + fig, axes = plt.subplots(3, 1, sharey=True, sharex=True, figsize=consts.FIG_SIZE_MEDIUM) + + start = 0 + end = 1 + + time_start = -0.2 + time_end = 1.2 + step_size = 0.00005 + + frequency = 55 + contrast = 0.5 + + base_stim = SinusoidalStepStimulus(frequency, 0, start, end - start) + + values = base_stim.as_array(time_start, time_end - time_start, step_size) + time = np.arange(time_start, time_end, step_size) + + axes[0].set_title("Fish EOD") + axes[0].plot(time, values) + + am = np.zeros(len(values)) + am[int((start-time_start)/step_size):int((end-time_start)/step_size)] = contrast + + axes[1].set_title("Amplitude modulation") + axes[1].plot(time, values*am) + axes[1].plot(time, am) + axes[1].set_ylabel("Voltage [mV]") + + axes[2].set_title("Resulting stimulus") + axes[2].plot(time, (values*am) + values) + axes[2].set_xlabel("Time [s]") + axes[2].set_xlim((time_start, time_end)) + plt.tight_layout() + plt.savefig("thesis/figures/amGeneration.pdf") + plt.close() + + def stimuli_protocol_examples(): - fig, axes = plt.subplots(2, 1) + fig, axes = plt.subplots(3, 1, sharey=True, sharex=True, figsize=consts.FIG_SIZE_MEDIUM) start = 0 end = 1 @@ -24,6 +64,15 @@ def stimuli_protocol_examples(): frequency = 55 contrast = 0.5 + base_stim = SinusoidalStepStimulus(frequency, 0, start, end - start) + + values = base_stim.as_array(time_start, time_end - time_start, step_size) + time = np.arange(time_start, time_end, step_size) + + axes[0].set_title("Baseline Stimulus") + axes[0].plot(time, values) + # axes[1].set_ylabel("Voltage [mV]") + # Sinusoidal STEP stimulus # frequency, contrast, start_time=0, duration=np.inf, amplitude=1 step_stim = SinusoidalStepStimulus(frequency, contrast, start, end - start) @@ -31,8 +80,9 @@ def stimuli_protocol_examples(): values = step_stim.as_array(time_start, time_end - time_start, step_size) time = np.arange(time_start, time_end, step_size) - axes[0].plot(time, values) - axes[0].set_ylabel("Voltage [mV]") + axes[1].set_title("Step Stimulus") + axes[1].plot(time, values) + axes[1].set_ylabel("Voltage [mV]") # SAM Stimulus: mod_freq = 10 @@ -44,14 +94,17 @@ def stimuli_protocol_examples(): beat_time = np.arange(start, end, step_size) beat_values = np.sin(beat_time * 2 * np.pi * mod_freq) * contrast + 1 - axes[1].plot(beat_time, beat_values) + axes[2].plot(beat_time, beat_values) - axes[1].set_xlabel("Time [s]") - axes[1].set_ylabel("Voltage [mV]") + axes[2].set_title("SAM Stimulus") + axes[2].set_xlabel("Time [s]") + # axes[2].set_ylabel("Voltage [mV]") + axes[2].set_xlim((time_start, time_end)) + plt.tight_layout() plt.savefig("thesis/figures/stimuliExamples.pdf") plt.close() if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/Fitter.py b/Fitter.py index 138dde3..11d63c1 100644 --- a/Fitter.py +++ b/Fitter.py @@ -140,6 +140,9 @@ class Fitter: print("tried impossible value") return 1000 + abs(X[i]) * 10000 + if X[6] > 1.05/self.eod_freq: # refractory period shouldn't be larger than one eod period + print("tried too large ref period") + return 1000 + abs(X[6]) * 10000 self.base_model.set_variable("mem_tau", X[0]) self.base_model.set_variable("noise_strength", X[1]) self.base_model.set_variable("input_scaling", X[2]) @@ -235,7 +238,7 @@ class Fitter: # raise ValueError("Some error value(s) is/are NaN!") return error_list - def calculate_f0_curve_error(self, model, fi_curve_model): + def calculate_f0_curve_error(self, model, fi_curve_model, dendritic_delay=0.005): buffer = 0.00 test_duration = 0.05 @@ -251,6 +254,11 @@ class Fitter: model_start_idx = int((stimulus_start - buffer) / fi_curve_model.get_sampling_interval()) model_end_idx = int((stimulus_start + buffer + test_duration) / model.get_sampling_interval()) + idx_offset = int(dendritic_delay / model.get_sampling_interval()) + + model_start_idx += idx_offset + model_end_idx += idx_offset + if len(time_prediction) == 0 or len(time_prediction) < model_end_idx \ or time_prediction[0] > fi_curve_model.get_stimulus_start(): error_f0_curve = 200 diff --git a/ModelFit.py b/ModelFit.py index 5306b08..d496917 100644 --- a/ModelFit.py +++ b/ModelFit.py @@ -252,7 +252,7 @@ class ModelFit: cell_times, cell_freqs = fi_curve_cell.get_mean_time_and_freq_traces() axes[2].plot(cell_times[f_zero_curve_contrast_idx], cell_freqs[f_zero_curve_contrast_idx]) - axes[2].plot(time, freq) + axes[2].plot(np.array(time) + 0.005, freq) axes[2].set_title("blue: cell, orange: model") axes[2].set_xlim(-0.15, 0.35) diff --git a/analysis.py b/analysis.py index fba8043..ecf7db5 100644 --- a/analysis.py +++ b/analysis.py @@ -26,22 +26,32 @@ def main(): # exit(0) # sensitivity_analysis(dir_path, max_models=3) - behaviour_keys = ["burstiness", "coefficient_of_variation", "serial_correlation", - "vector_strength", "f_inf_slope", "f_zero_slope", "f_zero_middle"] + behaviour_keys = ["Burstiness", "coefficient_of_variation", "serial_correlation", + "vector_strength", "f_inf_slope", "f_zero_slope", "baseline_frequency"] fits_info = get_fit_info(dir_path) + total_fits = len(fits_info) for cell in sorted(fits_info.keys()): model_values = fits_info[cell][1] - if model_values["vector_strength"] < 0.4: - del fits_info[cell] - print("excluded because of vs") - - elif model_values["f_inf_slope"] / fits_info[cell][2]["f_inf_slope"] > 2: - del fits_info[cell] - print("f_inf bad") - + # if model_values["vector_strength"] < 0.4: + # del fits_info[cell] + # print("excluded because of vs") + # + # elif model_values["f_inf_slope"] / fits_info[cell][2]["f_inf_slope"] > 2: + # del fits_info[cell] + # print("f_inf bad") + # + # elif abs((model_values["baseline_frequency"] / fits_info[cell][2]["baseline_frequency"]) - 1) > 0.05: + # del fits_info[cell] + # print("baseline freq bad") + # + # elif fits_info[cell][2]["Burstiness"] == 0 or abs((model_values["Burstiness"] / fits_info[cell][2]["Burstiness"]) - 1) > 0.65: + # del fits_info[cell] + # print("burstiness bad") + + print("'good' fits of total fits: {} / {}".format(len(fits_info), total_fits)) errors = calculate_percent_errors(fits_info) create_boxplots(errors) labels, corr_values, corrected_p_values = behaviour_correlations(fits_info, model_values=False) diff --git a/cell_overview.py b/cell_overview.py index 666c3eb..b5572cd 100644 --- a/cell_overview.py +++ b/cell_overview.py @@ -5,12 +5,13 @@ from FiCurve import FICurveCellData import os import numpy as np + def main(): # plot_visualizations("cells/") # full_overview("cells/master_table.csv", "cells/") - recalculate_saved_preanalysis("data/test_data/") + recalculate_saved_preanalysis("data/final/") def recalculate_saved_preanalysis(data_folder): diff --git a/find_thresholds_for_spike_detection.py b/find_thresholds_for_spike_detection.py index 75f4df9..e9fd296 100644 --- a/find_thresholds_for_spike_detection.py +++ b/find_thresholds_for_spike_detection.py @@ -8,8 +8,9 @@ def main(): # choose_thresholds() precalculate_baseline_spiketimes() + def precalculate_baseline_spiketimes(): - threshold_file_path = "invivo_data/thresholds.tsv" + threshold_file_path = "data/thresholds.tsv" thresholds_dict = {} if os.path.exists(threshold_file_path): @@ -24,7 +25,7 @@ def precalculate_baseline_spiketimes(): thresholds_dict[name] = [thresh, min_length, step_size] - for cell_data in icelldata_of_dir("invivo_data/"): + for cell_data in icelldata_of_dir("data/final/"): name = os.path.basename(cell_data.get_data_path()) if name not in thresholds_dict.keys(): @@ -39,8 +40,8 @@ def precalculate_baseline_spiketimes(): def choose_thresholds(): - base_path = "invivo_data/" - threshold_file_path = "invivo_data/thresholds.tsv" + base_path = "data/final/" + threshold_file_path = "data/thresholds.tsv" re_choose_thresholds = False thresholds_dict = {} @@ -55,8 +56,18 @@ def choose_thresholds(): min_length = int(line[2]) step_size = int(line[3]) thresholds_dict[name] = [thresh, min_length, step_size] + print("Already done:", name) else: thresholds_dict[name] = [thresh] + print("Already done (no len/step): ", name) + + count = 0 + for item in sorted(listdir(base_path)): + if item in thresholds_dict.keys() and thresholds_dict[item][0] != 99 and not re_choose_thresholds: + continue + + count += 1 + print("cells to do:", count) for item in sorted(listdir(base_path)): # starting assumptions: @@ -81,11 +92,12 @@ def choose_thresholds(): continue data.get_base_spikes(thresh, min_length=min_split_length, split_step=split_step_size, re_calculate=True, - only_first=True) + only_first=True) stop = False print("Threshold was {:.2f}, Min Length was {:.0f}, Split step size was {:.0f}".format(thresh, min_split_length, split_step_size)) + response = input( "Choose: 'ok', 'stop', or a number (threshold) or three numbers (threshold, minlength, step_size) seperated with commas") diff --git a/run_Fitter.py b/run_Fitter.py index 11d07d2..4b6c461 100644 --- a/run_Fitter.py +++ b/run_Fitter.py @@ -16,9 +16,9 @@ import multiprocessing as mp # SAVE_DIRECTORY = "./results/invivo_results/" -SAVE_DIRECTORY = "./results/invivo-1/" +SAVE_DIRECTORY = "./results/final_1/" # SAVE_DIRECTORY_BEST = "./results/invivo_best/" -SAVE_DIRECTORY_BEST = "./results/invivo-1_best/" +SAVE_DIRECTORY_BEST = "./results/final_1_best/" # [bf, vs, sc, cv, isi_hist, bursty, f_inf, f_inf_slope, f_zero, f_zero_slope, f0_curve] ERROR_WEIGHTS = (2, 2, 1, 1, 0, 1, 1, 1, 0, 1) diff --git a/run_cellwise_fitting.sh b/run_cellwise_fitting.sh index 9e4cca6..66f9042 100755 --- a/run_cellwise_fitting.sh +++ b/run_cellwise_fitting.sh @@ -1,5 +1,5 @@ -for file in data/invivo_bursty/*; do +for file in data/final/*; do if [ -d "$file" ]; then nice python3 run_Fitter.py --cell $file fi diff --git a/test.py b/test.py index 379b098..50f06b8 100644 --- a/test.py +++ b/test.py @@ -20,7 +20,9 @@ from thunderfish.eventdetection import threshold_crossing_times, threshold_cross folder = "./results/invivo-1/" for cell in os.listdir(folder): - fit = get_best_fit(os.path.join(folder, cell)) + fit = get_best_fit(os.path.join(folder, cell), use_comparable_error=False) + fit.generate_master_plot() + continue cell_data = fit.get_cell_data() model = fit.get_model() fi = FICurveModel(model, np.arange(-0.5, 0.6, 0.1), cell_data.get_eod_frequency()) @@ -99,6 +101,7 @@ def rms(array): square = np.array(array)**2 return np.sqrt(np.mean(square)) + def perc_smaller_value(isis, value): isis = np.array(isis) fullfilled = isis < value @@ -119,8 +122,6 @@ for cell_data in cell_datas: cell_data_idx = np.arange(0, len(cell_datas), 1) burstiness, cell_data_idx = (list(t) for t in zip(*sorted(zip(burstiness, cell_data_idx)))) - - for i in range(len(burstiness)): base = BaselineCellData(cell_datas[cell_data_idx[i]]) isis = np.array(base.get_interspike_intervals()) * 1000 diff --git a/test_for_cells.py b/test_for_cells.py index 6feb68d..df31207 100644 --- a/test_for_cells.py +++ b/test_for_cells.py @@ -6,6 +6,7 @@ import os import matplotlib.pyplot as plt import pyrelacs.DataLoader as Dl from Baseline import BaselineCellData +from FiCurve import FICurveCellData data_save_path = "test_routines/test_files/" read = False @@ -14,7 +15,157 @@ read = False def main(): # test_kraken_files() # test_for_species() - test_for_vector_strength() + # test_for_vector_strength() + + # test_cells() + + # read_test_cells_tsv() + inspect_fi_curve() + + +def inspect_fi_curve(): + data = "data/final/" + skip = True + for cell in sorted(os.listdir(data)): + if cell == "2015-01-15-ab-invivo-1": + skip = False + if skip: + continue + cell_path = data + cell + + cell_data = CellData(cell_path) + print(cell_path) + ficurve = FICurveCellData(cell_data, cell_data.get_fi_contrasts()) + + ficurve.plot_fi_curve() + + +def read_test_cells_tsv(): + file_path = "data/test_cells.tsv" + data_path = "data/final/" + rejected = os.listdir("rejected_cells/") + cells = os.listdir(data_path) + count_rejected = 0 + count_accepted = 0 + missing_cells = [] + with open(file_path, 'r') as file: + for line in file: + parts = line.strip().split('\t') + + if parts[0] == "True": + # print(parts[0]) + count_accepted += 1 + + cell_name = parts[1].split("/")[-1] + if cell_name in cells: + pass + elif cell_name not in rejected: + missing_cells.append(cell_name) + else: + print("already thrown:", cell_name) + + + + else: + count_rejected += 1 + + print("accepted:", count_accepted) + print("rejected:", count_rejected) + print("Total:", count_rejected + count_accepted) + + for c in sorted(missing_cells): + print(c) + + +def test_cells(): + directory = "/mnt/invivo_data/" + + fi_curve_min_contrasts = 7 + fi_curve_min_trials = 7 + + baseline_min_duration = 29.9 + + accepted_ls = [] + files = [] + baseline_lengths = [] + ficurve_contrasts = [] + count_errors = 0 + for cell in os.listdir(directory): + accepted = True + cell_path = os.path.join(directory, cell) + if not os.path.isdir(cell_path): + continue + + if not os.path.exists(cell_path + "/info.dat"): + count_errors += 1 + continue + print(cell_path) + try: + parser = DatParser(cell_path) + + # Test length of baseline recording + cell_baseline_lengths = parser.get_baseline_length() + long_enough = False + for baseline_len in cell_baseline_lengths: + if baseline_len >= baseline_min_duration: + long_enough = True + break + + if not long_enough: + accepted = False + + # test for recording quality + quality = parser.get_quality() + if quality.lower() not in ["good", "bursty", "fair"]: + print("bad quality") + accepted = False + + # test for species + if "lepto" not in parser.get_species().lower(): + accepted = False + + if not parser.get_cell_type().lower() == "p-unit": + print(parser.get_cell_type()) + accepted = False + # Test for enough trials and tested contrasts + contrasts = parser.get_fi_curve_contrasts() + count = 0 + for c in contrasts: + if c[1] >= fi_curve_min_trials: + count += 1 + + if count < fi_curve_min_contrasts: + accepted = False + + accepted_ls.append(accepted) + files.append(cell_path) + baseline_lengths.append(max(cell_baseline_lengths)) + ficurve_contrasts.append(count) + + except FileNotFoundError as e: + print("parser didn't work: File not found") + # accepted_ls.append(False) + # files.append(cell_path) + # baseline_lengths.append(-1) + # ficurve_contrasts.append(-1) + # count_errors += 1 + except UnicodeDecodeError as e: + print("parser didn't work: UnicodeError") + # accepted_ls.append(False) + # files.append(cell_path) + # baseline_lengths.append(-1) + # ficurve_contrasts.append(-1) + count_errors += 1 + print("Error:", count_errors) + + with open("data/test_cells.tsv", 'w') as file: + + for i in range(len(accepted_ls)): + file.write("{}\t{}\t{}\t{}\n".format(accepted_ls[i], files[i], baseline_lengths[i], ficurve_contrasts[i])) + + + + def test_for_vector_strength(): directory = "invivo_data/" @@ -98,6 +249,10 @@ def test_for_species(): # for cell in sorted_cells["Apteronotus albifrons"]: # print(cell) + +# temp ssh mount command: +# sudo sshfs -o allow_other,default_permissions efish@kraken:/home/efish/ephys/invivo-1/intra/ /mnt/invivo_data/ + def test_kraken_files(): if read: directory = "/mnt/invivo_data/" diff --git a/thesis/sources/Golowasch2002.pdf b/thesis/sources/Golowasch2002.pdf new file mode 100644 index 0000000..fd204df Binary files /dev/null and b/thesis/sources/Golowasch2002.pdf differ diff --git a/thesis/to-read/dennis/papers b/thesis/to-read/dennis/papers new file mode 100644 index 0000000..e521f60 --- /dev/null +++ b/thesis/to-read/dennis/papers @@ -0,0 +1,25 @@ +What is Stochastic Resonance? Definitions, Misconceptions Debates and its relevance to Biology +Mark MacDonnell, Derek Abbott + +Limits on reliable information flows through stochastic populations +Lucas Boczkowski, Emanuele Natale, Ofer Feinerman, Amos Korman + +Effects of Synaptic Noise and Filtering on the frequency Response of spiking neurons +Nicolas Brunel Frances Chance Nicolas Fourcaud L.F. Abbott + +Intermdeiate intrinsic diversity enhances neural population coding +Shreejoy Tripathy, Krishnan Padmanabhan ... + +Intrinsic biophysical diversity decorrelates neuronal firing while increasing information content +Krishnan Padmanabhan, Nathaniel Urban + +Neuromodulation of Spike-timing precision in sensory neurons +Cyrus Billimoria Ralph DiCaprio, John Birmingham, L.F. Abbott Eve Marder + +Adaptive Exponential integrate-and-fire Model as an Effective description of neuronal activity + +The competing benefits of Noise and Heterogeneity in neural coding +Eric Hunsberger, Matthew Scott, Chris Eliasmith + +More is different - Broken symmetry and the nature of the hierachical structure of science +P.W. Anderson