susceptibility1/utils_all_down.py
2024-04-20 12:47:06 +02:00

14171 lines
624 KiB
Python

import math
import os
import pathlib
import shutil
import subprocess
import time
from textwrap import wrap
import matplotlib as mpl
import numpy
import paramiko
import psutil
from matplotlib.ticker import NullFormatter, ScalarFormatter, StrMethodFormatter
from scipy.signal import vectorstrength
# from numba import jit
#from utils_test import compare_all_eod_creations, plot_test, plt_adaptation_convergence_rate, powerspectraallfish, \
# test_fish, test_wave
try:
import rlxnix as rlx
except:
a = 5
try:
from numba import jit
except ImportError:
def jit():
def decorator_jit(func):
return func
return decorator_jit
import scipy
import scipy as sp
from matplotlib import gridspec as gridspec, mlab as ml, pyplot as plt, ticker as ticker
from numba import jit
from scipy.integrate import odeint
from thunderfish import eodanalysis, fakefish, harmonics
try:
from utils_invis_calc import *
except:
a = 0
# print('not alexandras pc')
import numpy as np
from IPython import embed
from scipy.ndimage import gaussian_filter
from scipy.interpolate import interp1d
import pandas as pd
import nixio as nix
import inspect
try:
from credencials import credencials0
except:
import sys
# sys.path.insert(1, '../')
sys.path.insert(0, '..')
from credencials import credencials0
def load_four_durations(mt, mt_group, mt_idx, mt_nr,forth_cut = []):
if 'fish2alone.StartTime' in mt_group.keys():
first_cut = 0
try:
second_cut = mt_group['fish2alone.StartTime'].iloc[mt_idx] # .loc[indices[ll]]
except:
print('index problem')
embed()
third_cut = mt_group['fish1.StartTime'].iloc[mt_idx] # .loc[indices[ll]]
forth_cut = mt_group['fish1.StartTime'].iloc[mt_idx] + mt_group['fish1.Duration'].iloc[
mt_idx] # .loc[indices[ll]]
# check for problems in the saved nix files
if (forth_cut < third_cut) or (third_cut < second_cut) or (second_cut < first_cut):
print('something wrong with length 1')
cont = False
else:
cont = True
delay = find_delay(mt, mt_nr, second_cut, forth_cut)
zeroth_cut = -delay
fish_cuts = [zeroth_cut, first_cut, second_cut, third_cut,
forth_cut]
fish_number = ['base_0', 'control_01', 'control_02', '012']
whole_duration = mt_group['fish1.Duration'].iloc[mt_idx] + \
mt_group['fish2alone.Duration'].iloc[mt_idx] + \
mt_group['fish1alone.Duration'].iloc[mt_idx] + delay
else: # if 'Duration' in mt_group
first_cut = 0
# mt.metadata.pprint(max_depth = -1)
try:
second_cut = mt_group['Duration'].iloc[mt_idx] - mt_group['fish2.Duration'].iloc[mt_idx]
except:
print('duration problem')
embed()
third_cut = mt_group['Duration'].iloc[
mt_idx] # .loc[indices[ll]] # times_features.loc[position_test, 'Duration']
delay = find_delay(mt, mt_nr, second_cut, third_cut)
fish_number = ['base_0', 'control_01', '012']
zeroth_cut = - delay
fish_cuts = [zeroth_cut, first_cut, second_cut, third_cut]
whole_duration = mt_group['Duration'].iloc[mt_idx] # times_features.loc[position_test, 'Duration']
cont = True
# das ist das Ding hier gibt es control_02 nicht immer! das brauchen wir für die Subtraktion nicht aber für die restlichen Anlysen!
return zeroth_cut, first_cut, second_cut, third_cut, fish_number, fish_cuts, whole_duration, delay, cont
def find_delay(mt, mt_nr, second_cut, last_cut):
features, delay_name = feature_extract(mt)
# there is a presaved delay, but I believe its always 0.5 s
delay = mt.features[delay_name].data[:][mt_nr][0]
# we check if the cuts all are close to 1 s, then also the delay should be 1 s
# then we need to check if we can take a delay of one second and take it
if (second_cut > 0.75) & (second_cut < 1.25) & (delay < 0.75):
delay = delay_and_reality_alignment(mt, second_cut, mt_nr, last_cut)
elif (second_cut > 0.25) & (second_cut < 0.75) & (delay < 0.25):
delay = delay_and_reality_alignment(mt, second_cut, mt_nr, last_cut)
# und dann machen wir das gleiche für 0.5 s
return delay
def feature_extract(mt):
features = []
delay_name = []
for ff, f in enumerate(mt.features):
features.append(f.data.name)
if 'delay' in f.data.name:
delay_name = f.data.name
return features, delay_name
def delay_and_reality_alignment(mt, second_cut, mt_nr, last_cut):
pervious = mt_nr - 1
# we check if there have been mts before
if pervious >= 0:
delay = (mt.positions[:][mt_nr] - (mt.positions[:][pervious] + last_cut))[0]
else:
if len(mt.positions[:]) == 1:
# so if its only one then we assume we can go the same time back
delay = second_cut
else:
# else we extract the maximal possible delay between two subsequent mts
delay = (mt.positions[:][mt_nr + 1] - mt.positions[:][mt_nr] - last_cut)[0]
# and we restrict the delay maximally to the first cut after zero
if delay > second_cut * 1.02:
delay = second_cut * 1.02
return delay
def calc_hist(spikes_mt, bin_rate=0.001):
bin_nr = int((spikes_mt[-1] - spikes_mt[0]) / bin_rate)
hist = np.histogram(spikes_mt, bins=bin_nr)
time = hist[1][0:-1] - np.diff(hist[1])[0] / 2
sampling = np.diff(time)[0]
hist = hist[0] / sampling
return hist, time, sampling
def titles_EIF(eod_fish_r, eod_fish_r_rec, color_p1, color_p3, mult_nr, eod_fr, eod_fe, stimulus, stimulus_rec,
colorful_title):
if eod_fe[mult_nr] == eod_fr + 50:
stimuli = [stimulus, stimulus_rec]
eod_fish_rs = [eod_fish_r, eod_fish_r_rec]
if colorful_title:
titles = [
[r'$(\cos( \omega_{1} t) + \alpha\cos($',
r'$\mathbf{%.1f \omega_{1}}$' % ((eod_fe[mult_nr] - eod_fr) / eod_fr + 1), r'$ t)$'],
[r'$\lfloor$', r'$ \cos(\omega_{1}t) + \alpha\cos($',
r'$\mathbf{%.1f \omega_{1}}$' % ((eod_fe[mult_nr] - eod_fr) / eod_fr + 1), r'$ t)$', r'$ \rfloor_0$'
]]
color_add_pos = [['black', 'black', 'black', 'black'],
['black', 'black', 'black', 'black', 'black', 'black', ], ]
color_add_pos = [['black', 'black', 'black'], [color_p1, 'black', 'black', 'black', color_p1, ], ]
else:
stimuli = [stimulus, stimulus_rec]
eod_fish_rs = [eod_fish_r, eod_fish_r_rec]
titles = ['No nonlinearity:\n' +
r'$(\cos( \omega_{1} t) + \alpha\cos(%.1f \omega_{1} t))$' % (
(eod_fe[mult_nr] - eod_fr) / eod_fr + 1),
'Threshold:\n' + r'$\lfloor \cos(\omega_{1}t) + \alpha\cos(%.1f \omega_{1} t) \rfloor_0$' % (
(eod_fe[mult_nr] - eod_fr) / eod_fr + 1), ]
color_add_pos = ['black']
add_pos = [-0.2, -0.4]
else:
stimuli = [stimulus_rec, stimulus_rec ** 3]
eod_fish_rs = [eod_fish_r_rec, eod_fish_r_rec ** 3]
if colorful_title:
titles = [
[r'$Threshold:\\$', r'$\lfloor$', r'$(\cos( \omega_{1} t) + \alpha\cos($',
r'$\mathbf{%.1f \omega_{1}} $' % ((eod_fe[mult_nr] - eod_fr) / eod_fr + 1), r'$t))$', r'$ \rfloor_0$'],
[r'$Threshold cubed:\\$', r'$\lfloor$', r'$ \cos(\omega_{1}t) + \alpha\cos($',
r'$\mathbf{%.1f \omega_{1}}$' % ((eod_fe[mult_nr] - eod_fr) / eod_fr + 1), r'$ t)$', r'$ \rfloor_0$',
r'$^3$'
]]
color_add_pos = [[color_p1, 'black', 'black', 'black', color_p1, ],
[color_p1, 'black', 'black', 'black', color_p1, color_p3], ]
else:
stimuli = [stimulus_rec, stimulus_rec ** 3]
titles = [
'Threshold:\n' + r'$\lfloor(\cos( \omega_{1} t) + \alpha\cos(%.1f \omega_{1} t)) \rfloor_0$' % (
(eod_fe[mult_nr] - eod_fr) / eod_fr + 1),
'Threshold cubed:\n' + r'$\lfloor \cos(\omega_{1}t) + \alpha\cos(%.1f \omega_{1} t) \rfloor_0^3$' % (
(eod_fe[mult_nr] - eod_fr) / eod_fr + 1), ]
color_add_pos = ['black']
# individuelle position des titels
add_pos = [-0.42, -0.32]
return add_pos, color_add_pos, titles, stimuli, eod_fish_rs
def create_stimulus_SAM(SAM, eod_fish_e, eod_fish_r, eod_f1, e, eod_fr, time_array, a_f1, eod_fj=[0], j=0, a_fj=0,
three='', test=False):
# this is a function to implement the SAM or the direct stimulation
# direct
if SAM == '':
stimulus = eod_fish_e + eod_fish_r
eod_fish_sam = []
# SAM
else:
# NOT YET ADAPTED FOR THEE WAVES!
# todo: adapt here env frequency
# print('SAM DONE')
if three == '':
# if a_fj != 0:
# env_f = np.abs(np.abs(eod_fr - eod_fe[e]) - np.abs(eod_fr - eod_fj[j]))
# else:
env_f = eod_f1[e] - eod_fr
time_fish_sam = time_array * 2 * np.pi * (env_f)
eod_fish_sam = a_f1 * np.sin(time_fish_sam)
stimulus = eod_fish_r * (1 + eod_fish_sam)
else:
time_fish_e = time_array * 2 * np.pi * (np.abs(eod_fr - eod_f1[e]))
eod_fish_1 = a_f1 * np.sin(time_fish_e)
time_fish_j = time_array * 2 * np.pi * (np.abs(eod_fr - eod_fj[j]))
eod_fish_2 = a_fj * np.sin(time_fish_j)
stimulus = eod_fish_r * (1 + eod_fish_1 + eod_fish_2)
eod_fish_sam = eod_fish_1 + eod_fish_2
if test:
test_times()
return stimulus, eod_fish_sam
def make_paramters(stimulus_length, deltat, eod_fr, a_fr, a_fe, eod_fe, e, phase=0):
time = np.arange(0, stimulus_length, deltat)
time_fish_r = time * 2 * np.pi * eod_fr
eod_fish_r = a_fr * np.sin(time_fish_r + phase)
time_fish_e = time * 2 * np.pi * eod_fe[e]
eod_fish_e = a_fe * np.sin(time_fish_e)
stimulus_am, _ = create_stimulus_SAM('', eod_fish_e, eod_fish_r, eod_fe, e, eod_fr, time, a_fe)
stimulus = stimulus_am.copy()
stimulus[stimulus < 0.0] = 0.0
return time, stimulus, eod_fish_r, eod_fish_e, stimulus_am
def interp_arrays(fp_array, tp_array, start=-0.01, end=1.01, step=0.01):
fp = np.arange(start, end, step)
interp_a1 = np.unique([fp_array, tp_array], axis=1)
func = interp1d(interp_a1[0], interp_a1[1],
kind='linear',
bounds_error=False, fill_value=(interp_a1[1][0], interp_a1[1][-1]))
tp = func(fp)
return fp, tp
def feature_extract_cut(mt, l, name_here='', times=pd.DataFrame(), position=0, features=[]):
if name_here == '':
name_here = mt.name
if len(features) < 1:
features, delay_name = feature_extract(mt)
# just feature with name cut
for f in range(len(features)):
name = features[f][len(name_here) + 1::]
times.loc[position, name] = \
mt.features[features[f]].data[:][l][0]
return times
def ISI_frequency(time, spikes, fill=0.0): # function of JAN
zrate = 0.0 if fill == 'extend' else fill # firing rate for empty trials
# rates = np.zeros((len(time), len(spikes)))
# try:
if len(spikes) > 2:
isis = np.diff(spikes) # compute interspike intervals
isis_diff = 1.0 / isis
# except:
#
if len(spikes) > 2:
# interpolate inverse ISIs at `time`:
fv = (1.0 / isis[0], 1.0 / isis[-1]) if fill == 'extend' else (fill, fill)
fr = interp1d(spikes[:-1], isis_diff, kind='previous',
bounds_error=False, fill_value=fv)
frate = fr(time)
else:
frate = np.zeros(len(time)) + zrate
else:
frate = []
isis_diff = []
return frate, isis_diff
def any_spikes(spikes_mt, minimal=0, maximal=10):
return len(spikes_mt[(spikes_mt > minimal) & (spikes_mt < maximal)]) > 0
def create_beat_corr(hz_range, eod_fr):
beat_corr = hz_range % eod_fr
beat_corr[beat_corr > eod_fr / 2] = eod_fr[beat_corr > eod_fr / 2] - beat_corr[beat_corr > eod_fr / 2]
return beat_corr
def create_spikes_mat2(spikes, sampling_rate, eod_end=[]):
if eod_end:
if eod_end > spikes[-1]:
spikes_mat = np.zeros(int(eod_end * sampling_rate + 2))
else:
spikes_mat = np.zeros(int(spikes[-1] * sampling_rate + 2))
else:
spikes_mat = np.zeros(int(spikes[-1] * sampling_rate + 2))
spikes_idx = np.round((spikes) * sampling_rate)
for spike in spikes_idx:
spikes_mat[int(spike)] = 1 * sampling_rate
return spikes_mat
def cut_eod_sequences(eod_mt, cuts, time_eod=[], cut=0.05, rec=True, fish_number='', fillup=True,
fish_number_base=['012', 'control_01', 'control_02', 'base_0']):
# if you wanna have it in a list
if fish_number == '':
eods = []
else:
eods = {}
if len(time_eod) < 1:
delay = np.abs(cuts[0])
time_eod = np.arange(0, len(eod_mt) / 40000, 1 / 40000) - delay
# try:
eod_both = eod_mt[(time_eod > cuts[0] + cut) & (time_eod < cuts[-1] - cut)]
# except:
# print('EOD_both problem')
# embed()
length = []
for c in range(len(cuts) - 1):
if (fish_number == ''):
cont = True
elif ('interspace' not in fish_number[c]): # dieses interspace brauche ich für die detektion analyse
cont = True
if cont:
eod_2waves = eod_mt[(time_eod > cuts[c] + cut) & (time_eod < cuts[c + 1] - cut)]
if rec:
eod_2waves[eod_2waves < np.mean(eod_2waves)] = 0
if fish_number == '':
eods.append(eod_2waves)
else:
eods[fish_number[c]] = eod_2waves
length.append(len(eod_2waves))
new_length = np.min(length)
for cc, c in enumerate(range(len(cuts) - 1)):
# make all the others the same length
# if fish_number != '':
if (fish_number == ''):
cont = True
elif ('interspace' not in fish_number[c]):
cont = True
if cont:
if fish_number != '':
eods[fish_number[c]] = eods[fish_number[c]][0:new_length]
else:
eods[cc] = eods[cc][0:new_length]
if (fish_number != ''):
if fillup:
for ll in range(len(fish_number_base)):
if ('interspace' not in fish_number_base[ll]):
if fish_number_base[ll] not in eods.keys():
eods[fish_number_base[ll]] = []
# if 'interspace' in fish_number[c]:
return eods, eod_both
def remove_interspace_fish_nr(fish_number):
if 'interspace' in fish_number:
fish_number_base = fish_number * 1
fish_number_base.remove('interspace')
else:
fish_number_base = fish_number * 1
return fish_number_base
def cut_ends(eod_global, eod_globalEfield):
if len(eod_global) > len(eod_globalEfield):
eod_local_reconstruct = eod_global[0:len(eod_globalEfield)] + eod_globalEfield
elif len(eod_global) < len(eod_globalEfield):
eod_local_reconstruct = eod_global + eod_globalEfield[0:len(eod_global)]
else:
eod_local_reconstruct = eod_global + eod_globalEfield
return eod_local_reconstruct
def final_df_choice_repetitive(dfs, eod, eods, low_beat=60, high_beat=45, wish_DF=[]):
equally_spaced_DF = np.concatenate([dfs[0:25], dfs[25:40:2], dfs[40:53:2], dfs[54:-1]])
if wish_DF == []:
wish_DF = [low_beat - eod, eod / 2 - high_beat - eod, eod - low_beat - eod, low_beat, eod / 2 - high_beat,
low_beat + eod, eod / 2 - high_beat + eod * 2]
dfs_possible = [[]] * len(wish_DF)
eods_possible = [[]] * len(wish_DF)
for i in range(len(wish_DF)):
dfs_possible[i] = [equally_spaced_DF[np.argmin(np.abs(equally_spaced_DF - wish_DF[i]))]]
eods_possible[i] = [eods[np.argmin(np.abs(eods - wish_DF[i]))]]
final_DF = np.unique(dfs_possible)
final_eod = np.concatenate(eods_possible)
return final_DF, final_eod
def cut_end(dataset, dfs):
# function to manually cut the end of the recording that decreased in quality, choosen by visual inspection of their
# firing rate with time
if dataset == '2019-05-07-an-invivo-1':
x = 168
dfs = dfs[0:x]
elif dataset == '2019-05-07-az-invivo-1':
x = 444
dfs = dfs[0:x]
elif dataset == '2019-05-07-ca-invivo-1':
x = 476
dfs = dfs[0:x]
# elif dataset == '2019-09-23-ak-invivo-1':
# x = 476
# dfs = dfs[83:x]
elif dataset == '2019-09-23-aj-invivo-1':
x = 330
dfs = dfs[0:x]
elif dataset == '2019-09-23-ah-invivo-1':
x = 133
dfs = dfs[0:x]
elif dataset == '2019-10-21-ak-invivo-1':
x = 243
dfs = dfs[0:x]
elif dataset == '2019-10-21-al-invivo-1':
x = 80
dfs = dfs[0:x]
elif dataset == '2019-10-21-am-invivo-1':
x = 53
dfs = dfs[0:x]
elif dataset == '2019-10-21-an-invivo-1':
x = 149
dfs = dfs[0:x]
elif dataset == '2019-10-28-ac-invivo-1':
x = 292
dfs = dfs[0:x]
elif dataset == '2019-10-28-af-invivo-1':
x = 140
dfs = dfs[0:x]
elif dataset == '2019-10-28-ah-invivo-1':
x = 20
dfs = dfs[0:x]
elif dataset == '2019-10-28-ak-invivo-1':
x = 343
dfs = dfs[0:x]
elif dataset == '2019-11-08-ag-invivo-1':
x = 97
dfs = dfs[0:x]
elif dataset == '2019-11-18-ad-invivo-1':
x = 97
dfs = dfs[59:-1]
elif dataset == '2019-11-18-ai-invivo-1':
x = 10
dfs = dfs[0:x]
elif dataset == '2019-09-23-ad-invivo-1':
x = 391
dfs = dfs[0:x]
else:
x = -1
return x, dfs
def load_data(testregime, data, redo=True, big_file='', nrs=np.array([0.1, 0.9, 1.1, 1.4, 2.1, 3.1]), add=''):
# redo = True
if (testregime) or (not os.path.exists('toblerone_beatspikes' + add + '.npy')) or (redo == True):
data_all = pd.read_pickle(load_folder_name('calc_model') + '/' + big_file + '.pkl')
just_cell = data_all[data_all['dataset'] == data[0]]
cell = just_cell[just_cell['contrasts'] == 20]
eod = cell['eodf'] # _orig'].iloc[0]
wish_DF = (nrs - 1) * np.mean(eod) + np.mean(eod)
wish_DF = wish_DF - np.mean(eod)
dfs = np.unique(cell['df_orig'])
final_DF_orig, final_eod = final_df_choice_repetitive(dfs, np.mean(eod), eod, low_beat=60, high_beat=45,
wish_DF=wish_DF)
dfs_new = np.unique(cell['df'])
final_DF, final_eod = final_df_choice_repetitive(dfs_new, np.mean(eod), eod, low_beat=60, high_beat=45,
wish_DF=wish_DF)
# data_beat = pd.DataFrame()
# counter = 0
# for delta in final_DF:
# this_df = cell[cell['df'] == delta]
# for l in range(len(this_df)):
# data_beat.loc[counter, 'df'] = np.array(this_df['df'].iloc[l]) #
# data_beat = save_structure(counter, data_beat, this_df.iloc[l], name='eod')
# counter += 1
data_dir = load_folder_name('data') + 'cells'
dataset = data[0]
base = dataset.split(os.path.sep)[-1] + ".nix"
path = data_dir + '/' + dataset
full_path = path + '/' + base
# try to load the data file
if not os.path.exists(path + '/' + base):
dated_up = update_ssh_file(path + '/' + base)
file = nix.File.open(full_path, nix.FileMode.ReadOnly)
b = file.blocks[0]
# access data only if correct stimulation is available
data_beat = []
counter = 0
for stims in b.data_arrays:
if 'sinewave-1_Contrast' in stims.name:
mt = b.multi_tags['sinewave-1']
dfs = b.data_arrays['sinewave-1_DeltaF'][:]
x, dfs_new = cut_end(dataset, dfs)
for idx, df in enumerate(dfs_new):
if df in final_DF_orig:
data_beat.append({})
data_beat[-1]['efield'] = mt.retrieve_data(idx, 'GlobalEFieldStimulus')[:]
data_beat[-1]['global'] = mt.retrieve_data(idx, 'EOD')[:]
# data_beat[-1]['global'] = mt.retrieve_data(idx, 'EOD1')[:]
# data_beat[-1]['local'] = data_beat[-1]['Spikes-1']# + data_beat[-1]['global']
data_beat[-1]['spikes'] = mt.retrieve_data(idx, 'Spikes-1')[:] - mt.positions[:][idx]
data_beat[-1]['local'] = data_beat[-1]['efield'] + data_beat[-1]['global']
# data_beat[-1]['local'] = mt.retrieve_data(idx, 'LocalEOD-1')[:]
data_beat[-1]['df_orig'] = df
# es gibt die neu berechneten DFs die wollen wir hier eignetlich nehmen
df_new = final_DF[np.argmin(np.abs(final_DF - df))]
data_beat[-1]['df'] = df_new
data_beat[-1]['idx'] = idx
counter += 1
data_beat = pd.DataFrame(data_beat)
np.save('toblerone_beatspikes' + add + '.npy', [nrs, cell, final_eod, dfs, final_DF, data_beat])
# plot local
else:
nrs, cell, final_eod, dfs, final_DF, data_beat = np.load('toblerone_beatspikes.npy', allow_pickle=True)
return nrs, cell, final_eod, dfs, final_DF, data_beat
def get_data_pivot_three(frame, score, sumrange=500, matrix_extent='min', matrix_sorted='grid_sorted',
orientation='f1 on x, f2 on y', gridspacing=[], dfs=[]):
eodf = frame.EODf
# df = spikes_dev[spikes_dev['square'] == wave]
indexes = []
resorted = []
pivot = []
# orientation = ''
cut_type = ''
if len(frame) > 0:
if 'all' in matrix_sorted:
if len(dfs) < 1:
if ('fish1.DeltaF' in frame):
# dfs[0] = 'fish2.DeltaF'
# dfs[1] = 'fish1.DeltaF'
freqs2 = 'fish2alone.Frequency'
freqs1 = 'fish1alone.Frequency'
eodf = frame.EODf
else:
freqs2 = 'fish2.Frequency' # 'fish2.DeltaF'
freqs1 = 'Frequency' # 'DeltaF'
eodf = -frame.df2 + frame['fish2.Frequency']
else:
freqs1 = dfs[1]
freqs2 = dfs[0]
pivot = pd.crosstab(index=frame[freqs1], columns=frame[freqs2], values=frame[score],
aggfunc='mean')
if 'interp' in matrix_sorted:
array = np.ma.array(pivot, mask=np.isnan(pivot))
x = np.arange(0, array.shape[1])
y = np.arange(0, array.shape[0])
# mask invalid values
array = np.ma.masked_invalid(array)
xx, yy = np.meshgrid(x, y)
# get only the valid values
x1 = xx[~array.mask]
y1 = yy[~array.mask]
newarr = array[~array.mask]
# col = pivot.columns
# row = pivot.index
pivot[:] = scipy.interpolate.griddata((x1, y1), newarr.ravel(),
(xx, yy),
method='cubic')
# pivot
# plt.imshow(GD1, interpolation='nearest')
indexes = []
resorted = []
elif matrix_sorted == 'grid_sorted':
if 'EODf' in frame.keys():
eodf, indexes, resorted, orientation, cut_type = justmatrix(frame, 'EODf', grid_spacing=gridspacing,
freqs=dfs,
matrix_extent=matrix_extent,
sumrange=sumrange, orientation=orientation)
elif 'Frequency' in frame.keys():
eodf, indexes, resorted, orientation, cut_type = justmatrix(frame, 'Frequency',
grid_spacing=gridspacing,
freqs=dfs,
matrix_extent=matrix_extent,
sumrange=sumrange, orientation=orientation)
else:
eodf = []
pivot, indexes, resorted, orientation, cut_type = justmatrix(frame, score, grid_spacing=gridspacing,
freqs=dfs,
matrix_extent=matrix_extent, eodf=eodf,
sumrange=sumrange, orientation=orientation)
elif matrix_sorted == 'justmatrix_withcolumns':
pivot = matrix_with_columns(frame, score)
return pivot, eodf, indexes, resorted, orientation, cut_type
def freq_names_autodetection(df):
if ('fish1.DeltaF' in df):
freqs0 = 'fish2alone.Frequency'
freqs1 = 'fish1alone.Frequency'
eodf = df.EODf
else:
freqs0 = 'fish2.Frequency' # 'fish2.DeltaF'
freqs1 = 'Frequency' # 'DeltaF'
eodf = -df.df2 + df['fish2.Frequency']
return (freqs0, freqs1), eodf
def optimize_grid_spacing(freqs, df, test=False):
axis_spacing = []
for d in freqs:
# try:
sorted_DF2 = np.diff(np.sort(df[d]))
axis_spacing.extend(sorted_DF2[sorted_DF2 > 0.03])
grid_spacing = np.mean(axis_spacing) # np.median(axis_spacing)#np.mean(axis_spacing)# + np.std(axis_spacing)*0.5#*2
if 'm1' in freqs:
if grid_spacing < 0.05:
grid_spacing = 0.05
elif grid_spacing <= 0.15: # grid_spacing < 0.1
grid_spacing = 0.1
else:
grid_spacing = 0.2
else:
if grid_spacing < 10:
grid_spacing = 20
elif grid_spacing < 20:
grid_spacing = 38
elif grid_spacing < 25:
grid_spacing = 40
if test:
plt.hist(axis_spacing, bins=30)
plt.axvline(x=np.mean(axis_spacing))
plt.axvline(x=np.median(axis_spacing))
plt.show()
# print(grid_spacing)
return grid_spacing
def create_matrix_extend(df, matrix_extent, freq_name1, freq_name2, grid_spacing):
if matrix_extent == 'onlymin':
minx = np.min(df[freq_name1])
maxx = np.max(df[freq_name1]) + grid_spacing
miny = np.min(df[freq_name2])
maxy = np.max(df[freq_name2]) + grid_spacing
freq1 = np.arange(minx, maxx, grid_spacing)
freq2 = np.arange(miny, maxy, grid_spacing)
elif matrix_extent == 'min': # create base on the minima and maxima
minx = np.min(df[freq_name1])
maxx = np.max(df[freq_name1]) + grid_spacing
miny = np.min(df[freq_name2])
maxy = np.max(df[freq_name2]) + grid_spacing
step1 = grid_spacing
step2 = grid_spacing
if (miny >= 0.57) & (maxy < 0.97):
cut_type = 'lb'
step1 = 0.1
minx = 0.57 # 0.47
maxx = 0.97 # 1.05
elif (minx >= 0.47) & (maxx <= 1.07):
cut_type = 'lb'
step1 = 0.1
minx = 0.47 # 0.42
maxx = 1.07 # 1.15
elif (minx >= 1.03) & (maxx <= 1.43):
cut_type = 'rt'
step1 = 0.1
minx = 1.03 # 0.95
maxx = 1.43 # 1.55
elif (minx >= 0.93) & (maxx <= 1.53):
cut_type = 'rt'
step1 = 0.1
minx = 0.93
maxx = 1.53
if (miny >= 0.5) & (maxy <= 0.95):
step2 = 0.09
cut_type = 'lb'
miny = 0.5 # 0.47
maxy = 0.95 # 1.05
elif (miny >= 0.41) & (maxy <= 1.04):
step2 = 0.09
cut_type = 'lb'
miny = 0.41 # 0.42
maxy = 1.04
elif (minx >= 1.05) & (maxx <= 1.5):
step2 = 0.09
cut_type = 'rt'
minx = 1.05 # 0.9
maxx = 1.5
elif (miny >= 0.94) & (maxy <= 1.59):
step2 = 0.09
cut_type = 'rt'
miny = 0.94
maxy = 1.59
else:
cut_type = 'rest'
freq1 = np.arange(minx, maxx, step1)
freq2 = np.arange(miny, maxy, step2)
elif matrix_extent == 'padded': # symmetric around origin
min_both = np.min([np.min(df[freq_name1]), np.min(df[freq_name2])])
max_both = np.max([np.max(df[freq_name1]), np.max(df[freq_name2])])
med_both = max_both - (max_both + np.abs(min_both)) / 2
freq1 = np.arange(np.min([np.min(df[freq_name1]), med_both]), max_both + grid_spacing, grid_spacing)
freq2 = np.arange(np.min([np.min(df[freq_name2]), med_both]), max_both + grid_spacing, grid_spacing)
else: # starting always at zero
freq1 = np.arange(0, np.max(df[freq_name1]) + grid_spacing, grid_spacing)
freq2 = np.arange(0, np.max(df[freq_name2]) + grid_spacing, grid_spacing)
return freq1, freq2, cut_type
def find_code_vs_not():
names_extra_modules = names_folders()
add_on = ''
name_output = ''
version = 'code'
for name in names_extra_modules:
try:
inspect.stack()[-1][1]
except:
# try:
inspect.stack()[-1][1]
# except:
# print('something wierd with stack index')
# embed()
if name in inspect.stack()[-1][1]: # 'code' not in # da ist jetzt die Starter Directory
add_on = '../' # /code/
name_output = name + '/'
version = 'develop'
if ('code' not in inspect.stack()[-1][1]) | (
not (('alex' in os.getlogin()) | ('rudnaya' in os.getlogin()))):
version = 'public' # für alle sollte version public sein!
if ((not 'alex' in os.getlogin()) & ('rudnaya' not in os.getlogin())):
version = 'public'
add_on = '../'
name_output = 'develop'
elif ('rudnaya' in os.getlogin()):
if ('Masterarbeit/work/code' not in str(pathlib.Path().absolute())):
version = 'public'
add_on = '../'
name_output = 'develop'
if (add_on != '') & (name_output != ''):
save_folder = add_on + 'plt_' + name_output
else:
save_folder = ''
return version, add_on, name_output, name_output.replace('/', '-'), save_folder
def names_folders():
names_extra_modules = ['talk_final', 'thesis', 'Thesis', 'threewave2023', 'highbeats', 'suseptibility',
'susceptibility1',
'susceptibility2']
return names_extra_modules
def justmatrix(df, score, grid_spacing=[], orientation=[], matrix_extent='min', freqs=[], eodf=[], sumrange=500):
if '1' in freqs[1]:
part1 = freqs[1]
part2 = freqs[0]
freqs[0] = part1
freqs[1] = part2
# find the axis names if not provided
if len(freqs) == []:
freqs, eodf = freq_names_autodetection(df)
# find the optimal grid spacing if not provided
if grid_spacing == []:
grid_spacing = optimize_grid_spacing(freqs, df)
print('gridspacing ' + str(grid_spacing))
# find the axes for the new grid
try:
freq1, freq2, cut_type = create_matrix_extend(df, matrix_extent, freqs[0], freqs[1], grid_spacing)
except:
print('utils create_matrix_extend')
embed()
# resort all of this in a grid
try:
frame, count2, resorted_final, resorted_initial, indexes, count = resort_in_grid(freq2, freq1, sumrange, df,
freqs, score)
except:
print('utils resorted problem')
embed()
_, resorted_final, _ = add_column_index_names_pivot(orientation, eodf, freqs, freq1, freq2, resorted_final)
orientation, dataframe, eodf = add_column_index_names_pivot(orientation, eodf, freqs, freq1, freq2, frame)
return dataframe, indexes, resorted_final, orientation, cut_type
def add_column_index_names_pivot(orientation, eodf, freqs, freq1, freq2, frame):
if len(eodf) > 0:
eodf = np.nanmean(eodf)
# print(orientation)
if ('Frequency' in freqs[0]) or ('Frequency' in freqs[1]):
index = np.round(((freq2 - eodf) / eodf + 1) * 100) / 100
columns = np.round(((freq1 - eodf) / eodf + 1) * 100) / 100
else:
index = np.round((freq2) * 100) / 100
columns = np.round((freq1) * 100) / 100
if len(orientation) > 0:
if orientation == 'f2 on x, f1 on y':
dataframe = pd.DataFrame(data=np.transpose(frame), index=columns,
columns=index)
orientation = 'f2 on x, f1 on y'
dataframe.columns.name = 'Mult 2' # 'Jammer - Receiver $f_{stim}/f_{EOD}$'
dataframe.index.name = 'Mult 1'
else:
dataframe = pd.DataFrame(data=frame, index=index,
columns=columns)
orientation = 'f1 on x, f2 on y'
dataframe.columns.name = 'Mult 1' # 'Jammer - Receiver $f_{stim}/f_{EOD}$'
dataframe.index.name = 'Mult 2'
# print(2)
else:
if len(index) >= len(columns):
dataframe = pd.DataFrame(data=np.transpose(frame), index=columns,
columns=index)
orientation = 'f2 on x, f1 on y'
dataframe.columns.name = 'Mult 2' # 'Jammer - Receiver $f_{stim}/f_{EOD}$'
dataframe.index.name = 'Mult 1'
else:
dataframe = pd.DataFrame(data=frame, index=index,
columns=columns)
orientation = 'f1 on x, f2 on y'
dataframe.columns.name = 'Mult 1' # 'Jammer - Receiver $f_{stim}/f_{EOD}$'
dataframe.index.name = 'Mult 2'
# print(1)
else:
dataframe = pd.DataFrame(data=frame, index=freq2, columns=freq1)
# print(orientation)
if len(orientation) > 0:
if orientation == 'f1 on x, f2 on y':
dataframe = pd.DataFrame(data=frame, index=freq2,
columns=freq1)
orientation = 'f1 on x, f2 on y'
dataframe.columns.name = 'Mult 1' # 'Jammer - Receiver $f_{stim}/f_{EOD}$'
dataframe.index.name = 'Mult 2'
# print(3)
else:
dataframe = pd.DataFrame(data=np.transpose(frame), index=freq1,
columns=freq2)
orientation = 'f2 on x, f1 on y'
dataframe.columns.name = 'Mult 2' # 'Jammer - Receiver $f_{stim}/f_{EOD}$'
dataframe.index.name = 'Mult 1'
else:
if len(freq2) >= len(freq1):
dataframe = pd.DataFrame(data=np.transpose(frame), index=freq1,
columns=freq2)
orientation = 'f2 on x, f1 on y'
dataframe.columns.name = 'Mult 2' # 'Jammer - Receiver $f_{stim}/f_{EOD}$'
dataframe.index.name = 'Mult 1'
else:
dataframe = pd.DataFrame(data=frame, index=freq2,
columns=freq1)
orientation = 'f1 on x, f2 on y'
dataframe.columns.name = 'Mult 1' # 'Jammer - Receiver $f_{stim}/f_{EOD}$'
dataframe.index.name = 'Mult 2'
# print(orientation)
return orientation, dataframe, eodf
def resort_in_grid(freq2, freq1, sumrange, df, freqs, score):
resorted_initial = np.empty([len(freq2), len(freq1), sumrange])
indexes = np.empty([len(freq2), len(freq1), sumrange])
count = np.empty([len(freq2), len(freq1)])
count2 = np.empty([len(freq2), len(freq1)])
resorted_final = np.empty([len(freq2), len(freq1)])
indexes[:, :] = [np.NaN]
resorted_initial[:, :] = [np.NaN]
resorted_final[:, :] = [np.NaN]
count[:, :] = [0]
count2[:, :] = [1]
for r in range(len(df[score])):
ind1 = np.argmin(np.abs(freq1 - df[freqs[0]].iloc[r]))
ind2 = np.argmin(np.abs(freq2 - df[freqs[1]].iloc[r]))
resorted_initial[ind2, ind1, int(count[ind2, ind1])] = df[score].iloc[r]
indexes[ind2, ind1, int(count[ind2, ind1])] = r # df[score].index[r]
count[ind2, ind1] += 1
if np.isnan(resorted_final[ind2, ind1]):
resorted_final[ind2, ind1] = df[score].iloc[r]
else:
resorted_final[ind2, ind1] += df[score].iloc[r]
count2[ind2, ind1] += 1
frame = resorted_final / count2
plot = False
if plot:
for i in range(12):
plt.subplot(3, 4, i + 1)
plt.imshow(resorted_initial[:, :, i])
plt.show()
return frame, count2, resorted_final, resorted_initial, indexes, count
def matrix_with_columns(df, score):
dfs = ['Deltaf1', 'Deltaf2']
DF_distance = []
for d in dfs:
sorted_DF2 = np.diff(np.sort(df[d]))
DF_distance.extend(sorted_DF2[sorted_DF2 > 0.3])
grid_spacing = int(np.mean(DF_distance))
# rows, cols = np.shape(pivot)
DF1_grid = np.arange(0, np.max(df['Deltaf1']) + 5, grid_spacing)
DF2_grid = np.arange(0, np.max(df['Deltaf2']) + 5, grid_spacing)
resorted = np.empty([len(DF1_grid), len(DF2_grid)])
resorted[:] = np.NaN
resorted = []
resorted = pd.DataFrame(columns=['df2', 'df1', 'value'])
# resorted.loc[0,'df2'] = 3
position = 0
for r in range(len(df[score])):
df1 = np.argmin(np.abs(DF1_grid - df['Deltaf1'].iloc[r]))
df2 = np.argmin(np.abs(DF2_grid - df['Deltaf2'].iloc[r]))
wheredf1 = np.where(df1 == [resorted['df1']])[0]
wheredf2 = np.where(df2 == resorted['df2'][wheredf1])[0]
if len(resorted) == 0: # np.isnan(resorted[df1, df2])
# resorted[df1, df2] = df[score].iloc[r]
# resorted.append({})
# resorted = pd.DataFrame(columns=['df2', 'df1', 'value'])
resorted.loc[position, 'value'] = df[score].iloc[r]
resorted.loc[position, 'df1'] = DF1_grid[df1]
resorted.loc[position, 'df2'] = DF2_grid[df2]
# resorted[-1]['value'] = df[score].iloc[r]
# resorted[-1]['df1'] = DF1_grid[df1]
# resorted[-1]['df2'] = DF2_grid[df2]
position += 1
mean_needed = 'no'
elif (type(wheredf1) == int) and (type(wheredf2) == int):
print('whiere something')
embed()
where = np.where((df1 not in [resorted[-1]['df1']]) and (df2 not in [resorted[-1]['df2']]))
resorted[where, 'value'] = [resorted[-1]['value']]
resorted[where, 'value'].append(df[score].iloc[r])
resorted[where, 'df1'] = DF1_grid[df1]
resorted[where, 'df2'] = DF2_grid[df2]
mean_needed = 'yes'
else:
# resorted[df1, df2] = df[score].iloc[r]
# resorted.append({})
resorted.loc[position, 'value'] = df[score].iloc[r]
resorted.loc[position, 'df1'] = DF1_grid[df1]
resorted.loc[position, 'df2'] = DF2_grid[df2]
position += 1
mean_needed = 'no'
# resorted[df1, df2].extent(df[score].iloc[r])
if mean_needed == 'yes':
rows = np.shape(resorted)
for r in rows:
resorted[-1]['value'].iloc[r] = np.mean(resorted[-1]['value'].iloc[r])
df1 = pd.DataFrame(DF1_grid, columns=['DF1'])
df2 = pd.DataFrame(DF2_grid, columns=['DF2'])
# resorted.crosstab(index='df1', columns='df2', values='value')
dataframe = pd.crosstab(index=resorted['df1'], columns=resorted['df2'], values=resorted['value'], aggfunc='mean')
resorted.reset_index().pivot_table(index=resorted['df1'], columns=resorted['df2'], values=resorted['value'],
aggfunc='mean')
resorted.pivot(index='df1', columns='df2', values='value')
a = pd.pivot(index=resorted['df1'], columns=resorted['df2'], data=resorted['value'])
a = pd.pivot(resorted)
return dataframe
def create_beat_corr2(hz_range, eod_fr):
return np.abs(hz_range - eod_fr * np.round(hz_range / eod_fr))
def cut_spikes_sequences(delay, spikes_mt, sampling_rate, fish_cuts, cut=0.05, fish_number='',
fish_number_base=['base_0', 'control_01', 'control_02', '012'],
devname_orig=['05'], cut_length=True, mean_type='',
emb=False, cut_compensate=False): # devname = ['10', '5', '2', '05', 'original', 'isi']
if 'Count' in mean_type:
bin_rates = [0.02, 0.01, 0.004, 0.001]
# for bin_rate in bin_rates:
# smoothened10, time10 = calc_hist(spikes_mt, bin_rate = 0.02)
smoothened5, time5 = calc_hist(spikes_mt, bin_rate=0.01)
smoothened2, time2 = calc_hist(spikes_mt, bin_rate=0.004)
smoothed05, time05 = calc_hist(spikes_mt, bin_rate=0.001)
mat = smoothed05
test = False
if test:
test_smooth()
# spikes_mat = create_spikes_mat2(spikes_mt, sampling_rate/bin_rate)
# bin_rate = sampling_rate * 0.0005 * 2 # here is the bin windiw corresponding to 1 ms wide
devname = ['5', '2', '05'] # 'isi', 'original''10',
arrays = [smoothened5, smoothened2, smoothed05] # smoothened10,
times = [time5, time2, time05] # time10,
else:
spikes_transform = get_spikes_transformed_three(cut, delay, fish_cuts, spikes_mt)
mat = create_spikes_mat2(spikes_transform, sampling_rate, eod_end=[])
# i want to have trials that have a certain length
time = np.arange(0, len(mat) / sampling_rate, 1 / sampling_rate) - delay
devname = devname_orig
arrays = []
times = []
smoothened2 = time
smoothed05 = time
for dev in devname:
if dev == 'isi':
try:
isi, isis_diff = ISI_frequency(time, spikes_mt, fill=0.0)
if len(isi) > 0:
arrays.append(isi)
times.append(time)
else:
devname = ['10', '5', '2', '05', 'original']
except:
print('ISI doesnt work')
else:
if dev == '05':
window = 0.0005 * sampling_rate
smoothed05 = gaussian_filter(mat, sigma=window)
smoothened = smoothed05
elif dev == '2':
window02 = 0.002 * sampling_rate
smoothened2 = gaussian_filter(mat, sigma=window02)
smoothened = smoothened2
elif dev == '5':
window = 0.005 * sampling_rate
smoothened5 = gaussian_filter(mat, sigma=window)
smoothened = smoothened5
elif dev == '10':
window = 0.01 * sampling_rate
smoothened10 = gaussian_filter(mat, sigma=window)
smoothened = smoothened10
elif dev == 'original':
# window = 0.01 * sampling_rate
# smoothened10 = mat
smoothened = mat
else:
window = float(dev) / 1000 * sampling_rate
smoothened10 = gaussian_filter(mat, sigma=window)
smoothened = smoothened10
arrays.append(smoothened)
times.append(time)
arrays_calc, new_length, spikes_cut = cut_threefish_arrays(arrays, cut, cut_length, delay, fish_cuts, fish_number,
fish_number_base, spikes_transform, times,
cut_compensate=cut_compensate)
# if len(arrays_calc[i]) ==0:
if emb:
embed()
return devname, smoothened2, smoothed05, mat, times, arrays_calc, new_length / sampling_rate, spikes_cut
def get_spikes_transformed_three(cut, delay, fish_cuts, spikes_mt):
spikes_transform = spikes_mt[(spikes_mt > fish_cuts[0] + cut) & (spikes_mt < fish_cuts[-1] - cut)] + delay
return spikes_transform
def cut_threefish_arrays(arrays, cut, cut_length, delay, fish_cuts, fish_number, fish_number_base, spikes_transform,
times, cut_compensate=False):
arrays_calc = [[]] * int(len(arrays))
if fish_number != '':
spikes_cut = {}
else:
spikes_cut = []
if cut_compensate:
cut_comp = cut
else:
cut_comp = 0
for i in range(len(arrays)):
# in case I want to have it in a dictionary
length = []
if fish_number != '':
cut_arrays = {} # []
else:
cut_arrays = [] # []
for cc, c in enumerate(range(len(fish_cuts) - 1)):
try:
if fish_number != '':
if 'interspace' not in fish_number[cc]:
cut_array = arrays[i][(times[i] > fish_cuts[c] + cut) & (times[i] < fish_cuts[c + 1] - cut)]
if fish_number != '':
cut_arrays[fish_number[cc]] = cut_array
else:
cut_arrays.append(cut_array)
# try:
length.append(len(cut_array))
if i == 0:
spikes_pure = spikes_transform - delay
spikes_cut[fish_number[cc]] = [
spikes_pure[(spikes_pure > fish_cuts[c] + cut) & (spikes_pure < fish_cuts[c + 1] - cut)] -
fish_cuts[c] - cut_comp]
# spikes_pure = spikes_transform-delay
else:
if i == 0:
spikes_pure = spikes_transform - delay
spikes_cut.append([spikes_pure[(spikes_pure > fish_cuts[c] + cut) & (
spikes_pure < fish_cuts[c + 1] - cut)] - fish_cuts[c] - cut_comp])
# spikes_pure = spikes_transform-delay
except:
print('utils func cc wrong')
embed()
# find the shortest one
if length != []:
if cut_length:
new_length = np.min(length)
else:
new_length = np.max(length)
for cc, c in enumerate(range(len(fish_cuts) - 1)):
# make all the others the same length
if cut_length:
if 'interspace' not in fish_number[cc]:
if fish_number != '':
cut_arrays[fish_number[cc]] = cut_arrays[fish_number[cc]][0:new_length]
else:
cut_arrays[cc] = cut_arrays[cc][0:new_length]
# if len(cut_arrays[cc]) != 3000:
# embed()
for ll in range(len(fish_number_base)):
if 'interspace' not in fish_number_base[ll]:
if fish_number_base[ll] not in cut_arrays.keys():
cut_arrays[fish_number_base[ll]] = []
arrays_calc[i] = cut_arrays
else:
new_length = float('nan')
return arrays_calc, new_length, spikes_cut
def load_durations(mt_nr, mt, mt_group, mt_idx, mean_type='', emb=False):
if 'DetectionAnalysis' not in mean_type:
##################
# get the times where to cut the stimulus
zeroth_cut, first_cut, second_cut, third_cut, fish_number, fish_cuts, whole_duration, delay, cont = load_four_durations(
mt, mt_group, mt_idx, mt_nr)
else:
if 'fish2alone.StartTime' in mt_group.keys():
neuronal_delay = 0.005
length = 0.075
minus_cut = mt_group['fish2alone.StartTime'].iloc[mt_idx]
second_cut = mt_group['fish1.StartTime'].iloc[mt_idx] + neuronal_delay
third_cut = second_cut + length
corr_beat = create_beat_corr(np.array([mt_group.DF2.iloc[0]]), np.array([mt_group.EODf.iloc[0]]))
if corr_beat == 0:
period = 1 / 50
else:
period = 1 / corr_beat[0]
distance = np.ceil((length + neuronal_delay) / period) * period
zeroth_cut = second_cut - distance
first_cut = zeroth_cut + length
forth_cut = second_cut + distance
cut5 = forth_cut + length
cut6 = forth_cut + distance
cut7 = cut6 + length
cut0 = second_cut - distance * 2
cut1 = cut0 + length
if (cut0 < minus_cut) or (third_cut < second_cut) or (second_cut < first_cut):
print('something wrong with length')
cont = False
else:
cont = True
features, delay_name = feature_extract(mt)
# between = (mt.positions[:][l + 1] - mt.positions[:][
# l] - forth_cut)[0]
delay = mt.features[delay_name].data[:][mt_nr]
# zeroth_cut = -delay[0]
last_cut = mt_group['fish1.StartTime'].iloc[mt_idx] + mt_group['fish1.Duration'].iloc[
mt_idx] # .loc[indices[ll]]
fish_cuts = [0, cut0, cut1, zeroth_cut, first_cut, second_cut, third_cut, forth_cut, cut5, cut6, cut7,
last_cut]
fish_number = ['interspace', 'beat0', 'interspace', 'beat1', 'interspace', 'event', 'interspace', 'beats0',
'interspace', 'beats1', 'interspace']
whole_duration = mt_group['fish1.Duration'].iloc[mt_idx] + \
mt_group['fish2alone.Duration'].iloc[mt_idx] + \
mt_group['fish1alone.Duration'].iloc[mt_idx] + delay
else:
# todo: das fehlt noch
print('not implemented yet')
embed()
if emb:
embed()
return zeroth_cut, first_cut, second_cut, third_cut, fish_number, fish_cuts, whole_duration, cont
def calc_cut_off_diffs(file, cut_off):
if 'to' not in file:
cut_off_lower = 0
else:
cut_off_lower = float(file.split('to')[0].split('_')[1])
cut_off_diff = cut_off - cut_off_lower
return cut_off_lower, cut_off_diff
def file_name_clean_up(file, cut_off):
if str(cut_off) not in file:
cut_off = calc_cut_offs(file)
if 'gwn' in str(file):
file_name = str(file)[0:-4] # + str(sd)
file_name_save = str(file)[0:-4] # + str(sd) # [0:-3]
elif 'FileStimulus-file-gaussian' in str(file):
file_name = str(file)[0:-4]
file_name_save = str(file)[0:-4] # [0:-3]
elif 'FileStimulus-file-' in str(file):
file_name = str(file) # [0:-1] #+ str(sd)
file_name_save = str(file) # [0:-1]
elif 'InputArr' in str(file):
file_name = file.replace('s.dat', '')
file_name_save = file.replace('s.dat', '')
# file_name = str(file)[0:-4]
# file_name_save = str(file)[0:-4]
elif 'blw' in str(file):
file_name = str(file)[0:-4]
file_name_save = str(file)[0:-4]
elif file == '':
file_name = ''
file_name_save = ''
else:
print('file name problem')
embed()
return file_name, file_name_save, cut_off
def plot_sheme_noise(axsheme):
point = [1, 1, 1]
x_points = [0] # , 1, 1.5, 2
sigmaf = 314
f = np.arange(-2000, 2000, 1)
gauss = np.exp(-(f ** 2 / (2 * sigmaf ** 2))) ** 2
axsheme.show_spines('lbrt')
axsheme.set_spines_outward('lbrt', 0)
axsheme.xaxis.set_ticks_position('bottom')
gaussian_plot = False
if gaussian_plot:
axsheme.plot(f, gauss, color='grey')
axsheme.text(0, 1.1, 'Normal \n distribution', transform=axsheme.transAxes) # Normal \n distribution
axsheme.set_xticks_fixed([0], ['0']) # , , 550'$\omega$'
axsheme.set_xlim(-550, 550)
axsheme.set_ylim(0, 1.2)
else:
axsheme.text(0.5, 0.5, 'Intrinsic\nNoise', ha='center', va='center',
transform=axsheme.transAxes) # Normal \n distribution
# axsheme.text(-0.1, 0.5, 'Gain', transform=axsheme.transAxes, rotation=90, ha='right', va='center')
axsheme.set_yticks_off()
axsheme.set_xticks_off()
# plt.show()
def plot_sheme_lowpass(axsheme):
point = [1, 1, 1]
x_points = [0] # , 1, 1.5, 2
sigmaf = 314
f = np.arange(0, 2000, 1)
gauss = np.exp(-(f ** 2 / (2 * sigmaf ** 2))) ** 2
axsheme.show_spines('lbrt')
axsheme.set_spines_outward('lbrt', 0)
axsheme.xaxis.set_ticks_position('bottom')
axsheme.plot(f, gauss, color='grey') ## va = 'bottom', va='bottom',
axsheme.text(0.96, 0.8, 'Dendritic', ha='right', transform=axsheme.transAxes) # -0.1
axsheme.text(0.96, 0.6, 'Filter', ha='right', transform=axsheme.transAxes) # -0.1
axsheme.text(-0.1, 0.5, 'Gain', transform=axsheme.transAxes, rotation=90, ha='right', va='center')
axsheme.set_xlim(0, 550)
axsheme.set_xticks_fixed([0, 550], ['0', 'f']) # '$\omega$'
axsheme.set_ylim(0, 1.2)
axsheme.set_yticks_off()
def plot_sheme_nonlinearity(axsheme, color_power1):
xaxis = np.arange(-3, 4.1, 0.01)
power1 = np.arange(-3, 4.1, 0.01)
power1[power1 < 0] = 0
power3 = np.arange(-3, 4.1, 0.01) ** 3
power3[power3 < 0] = 0
axsheme.show_spines('lbrt')
axsheme.set_spines_outward('lbrt', 0)
axsheme.xaxis.set_ticks_position('bottom')
axsheme.set_aspect('equal')
# start with plotting diagonal then power 1 and power 2
# axsheme.plot(xaxis, xaxis-0.1, color=color_diagonal)
axsheme.plot(xaxis, power1, color=color_power1) # +0.06
# axsheme.plot(xaxis, power3-0.05, color = color_power3)
# 0.5, 1.1, 'Dendritic Filter',
axsheme.text(0.5, 1.1, 'Threshold', ha='center', transform=axsheme.transAxes)
axsheme.set_xticks_fixed([0, 1.5], ['0', '$x$'])
axsheme.set_yticks_fixed([0])
axsheme.axhline(0, color='black', linestyle='--', lw=0.5, zorder=1)
# axsheme.axvline(0, color='black', linestyle = '--', lw=0.5, zorder=1)
axsheme.set_xlim(-1.5, 1.5)
axsheme.set_ylim(-1.5, 1.5)
def plot_sheme_IF(axsheme, exp_tau, v_exp, exponential=''):
v_mem = np.arange(-1, 4.1, 0.01)
derivative = -v_mem + exp_tau * np.exp((v_mem - v_exp) / exp_tau)
derivative_cubic = np.zeros(len(v_mem))
derivative_cubic[v_mem > v_exp] = ((v_mem[v_mem > v_exp] - v_exp)) ** 3
derivative_cubic = -v_mem * 1 + derivative_cubic
# if exponential != exponentials[-1]:
# axsheme.set_xticks_blank()
# else:
# axsheme.set_xlabel('$V$')
if exponential == '':
axsheme.text(0, 1.1, 'LIF', transform=axsheme.transAxes)
axsheme.text(-0.1, 0.5, '$dV/dt$', transform=axsheme.transAxes, rotation=90, ha='right', va='center')
axsheme.plot(v_mem, -v_mem, zorder=2, color='grey')
axsheme.plot(v_mem[v_mem < v_exp], -v_mem[v_mem < v_exp], color='black')
axsheme.plot([v_exp, v_exp], [-v_mem[v_mem < v_exp][-1], 200],
color='black')
elif exponential == 'EIF':
# axsheme.set_title('EIF T = ' + str(exp_tau) + ' V = ' + str(v_exp))
axsheme.text(0, 1.1, 'EIF', transform=axsheme.transAxes)
axsheme.set_ylabel('$dV/dt$')
axsheme.plot(v_mem, -v_mem, zorder=2, color='grey')
axsheme.plot(v_mem, derivative, zorder=2, color='black')
elif exponential == 'CIF':
axsheme.set_title('CIF V = ' + str(v_exp))
axsheme.plot(v_mem, derivative_cubic, zorder=3, color='blue')
axsheme.axvline(x=v_exp, color='black', linestyle='--', linewidth=0.5, zorder=1)
axsheme.axvline(x=v_exp - 0.5, color='black', linestyle='--', linewidth=0.5, zorder=1)
# axsheme.axvline(x=1.5, color=palette['red'], zorder=1) #
axsheme.set_xlim(0, 1.6)
axsheme.set_ylim(-2, 1)
axsheme.show_spines('lbrt')
axsheme.set_spines_outward('lbrt', 0)
axsheme.xaxis.set_ticks_position('bottom')
axsheme.set_yticks_off()
axsheme.set_xticks_fixed([0.5, 1, 1.6], ['0', '1', '$V$'])
def plot_time(axt, transform_fact, v_mem_output, time, color, exponential, spike_times, shift=0.1, xlim=0.05, lw=0.5,
counter_here=0):
# das ist wohl für das EIF, für die spikes
v_mem_output[np.isinf(v_mem_output)] = 4
v_mem_output[v_mem_output > 4] = 4
time_here = (time[0:len(v_mem_output)] - shift) * transform_fact
try:
axt.plot(time_here[(time_here < xlim * transform_fact) & (time_here > 0)],
v_mem_output[(time_here < xlim * transform_fact) & (time_here > 0)], color=color, linewidth=lw)
except:
print('time problem')
embed()
axt.set_xlim(0.0, xlim * transform_fact)
axt.show_spines('lb')
# if exponential != exponentials[-1]:
# axt.set_xticks_blank()
axt.axhline(y=1, color='grey', linestyle='--', linewidth=0.5)
axt.axhline(y=0, color='grey', linestyle='--', linewidth=0.5)
if counter_here == 0:
axt.text(-7, 0, '1', color='black', ha='center', va='bottom')
axt.text(-7, 0, '0', color='black', ha='center', va='top')
# axt.set_ylim(-4, 5)
try:
spikes = (np.array(spike_times) - shift) * transform_fact
except:
print('spikes thing')
embed()
spikes_clipped = spikes[(spikes > 0) & (spikes < xlim * transform_fact)]
spike_height = 14
axt.scatter(spikes_clipped, np.ones(len(spikes_clipped)) * spike_height, color='black',
s=2, zorder=3, clip_on=False) # color
if (exponential != 'CIF') and (exponential != 'EIF'):
# ok hier plotte ich wohl diese linien!
# also das ist wohl ein extra feature nicht part von v-mem
axt.vlines((np.array(spike_times) - shift) * transform_fact, ymin=0, ymax=spike_height, color=color,
linewidth=lw)
# for sp in spike_times:
# axt.plot(np.array([sp - shift, sp - shift])*transform_fact, [1, 4], color=color, linewidth = lw)
def plot_lowpass2(g_p_t, time, v_dent_output, deltat, eod_fr, shift=0.25, nfft=4096 * 6, transform_fact=1000,
color1='grey', fft_type='mppsd', extract=True, lw=0.5, xlim=0.05):
ff, ff_am, pp, pp_am, time_here, extracted = time_psd_calculation(deltat, eod_fr, extract, fft_type, nfft, shift,
time, transform_fact, v_dent_output)
axt_p2 = plt_time_arrays(color1, g_p_t, lw, v_dent_output, extracted=extracted, xlim=xlim, time_here=time_here,
transform_fact=transform_fact)
# axp_p2.set_ylabel('[dB]', y=1.20)
return axt_p2, ff, pp, ff_am, pp_am
def plt_time_arrays(color1, g_p_t, lw, v_dent_output, extracted=[], shift=0.25, xlim=0.05, time='no', time_here='no',
nr=0, transform_fact=1000):
if type(time_here) == str:
time_here = get_time_shifted(shift, time, transform_fact, v_dent_output)
axt_p2 = plt.subplot(g_p_t[nr])
axt_p2.plot(time_here, v_dent_output, color=color1, linewidth=lw)
if len(extracted) > 0:
axt_p2.plot(time_here[5::], extracted[5::], color='red', linewidth=1)
axt_p2.show_spines('lb') # am_time*1000
axt_p2.set_xlim(0.0, xlim * transform_fact)
# axt_p2.set_xticks_blank()
axt_p2.axhline(0, color='black', lw=0.5)
return axt_p2
def time_psd_calculation(deltat, eod_fr, extract, fft_type, nfft, shift, time, transform_fact, v_dent_output):
time_here = get_time_shifted(shift, time, transform_fact, v_dent_output)
extracted = []
if extract:
try:
extracted, _ = extract_am(v_dent_output, time_here / 1000,
sampling=1 / deltat, eodf=eod_fr, norm=False)
except:
print('problem stuff')
embed()
ff_am, pp_am = calc_psd(extracted, deltat, nfft)
else:
ff_am = []
pp_am = []
ff, pp = calc_psd(v_dent_output, deltat, nfft)
return ff, ff_am, pp, pp_am, time_here, extracted
def get_time_shifted(shift, time, transform_fact, v_dent_output):
time_here = (time[0:len(v_dent_output)] - shift) * transform_fact
return time_here
def create_same_max(pps, same=False):
pps2 = []
for a in range(len(pps)):
# hier ist die auswahl des gleichen maximums
if same:
new_pp = pps
else:
# für die spikes ein individuelles
new_pp = pps[a] # (pps[a] - np.min(pps[a])) / np.max(pps[a])
# ok hier schließe ich denn array aus wo es keine spikes gibt
if np.mean(pps[a]) != 0:
pps2.append(10 * np.log10(pps[a] / np.max(new_pp)))
else:
pps2.append(np.array([float('nan')] * len(pps[a])))
pp3 = pps2
return pp3
def implement_three_core(cell, amp_frame, titles, g):
if '^3' in titles[g]:
load_name = 'models_big_fit_d_right3.csv'
# model_params = load_model(load_name=load_name, cell_nr = cell_nr)
# cell = model_params.pop('cell')
# eod_fr = model_params.pop('EODf')
# deltat = model_params.pop("deltat")
# v_offset = model_params.pop("v_offset")
v_offset = np.mean(amp_frame[cell + '_stimulus_rec_3' + '_offset'])
# print('three corr:')
elif 'floor' in titles[g]:
load_name = "models_big_fit_d_right.csv"
# model_params = load_model(load_name=load_name, cell_nr = cell_nr)
# cell = model_params.pop('cell')
# eod_fr = model_params.pop('EODf')
# deltat = model_params.pop("deltat")
# v_offset = model_params.pop("v_offset")
v_offset = np.mean(amp_frame[cell + '_stimulus_rec_1' + '_offset'])
else:
load_name = "models_big_fit_d_right.csv"
# = load_model(load_name=load_name, cell_nr = cell_nr)
# cell = model_params.pop('cell')
# eod_fr = model_params.pop('EODf')
# deltat = model_params.pop("deltat")
# v_offset = model_params.pop("v_offset")
# das ist für nur eine Zelle von Hand ausgewählt!
v_offset = 8.955 # 8.96
v_offset = np.mean(amp_frame[cell + '_stimulus' + '_offset'])
#
if 'Unnamed: 0' in model_params.keys():
Unnamed = model_params.pop('Unnamed: 0')
return v_offset, model_params, load_name
def calc_psd(stimulus, deltat, nfft):
# ff, pp = conv_power_spec(stimulus, deltat, fft_type=fft_type, nfft=nfft)
# -np.mean(stimulus)
pp, ff = ml.psd(stimulus - np.mean(stimulus), Fs=1 / deltat, NFFT=nfft, noverlap=nfft // 2)
# normalize_factor = eod_fr
return ff, pp
def simulate2(load_name, offset, stimulus,
exponential='EIF', deltat=0.00005, v_zero=0.0, a_zero=2.0, threshold=1.0,
v_base=0.0,
delta_a=0.08, tau_a=0.1, mem_tau=0.015, noise_strength=0.05,
input_scaling=60.0, dend_tau=0.001, ref_period=0.001, v_exp=0, exp_tau=0.014):
# nr = 1
# stimulus = stimulus.copy()
# stimulus[stimulus < 0.0] = 0.0
# stimulus = (stimulus) ** nr
v_dent_output = np.zeros(len(stimulus))
v_mem_output = np.zeros(len(stimulus))
v_dent_output[0] = stimulus[0]
# prepare noise:
noise = np.random.randn(len(stimulus))
# noise *= noise_strength / np.sqrt(deltat)
if 'd_right' in load_name:
noise_strength_new = np.sqrt(noise_strength * 2)
noise *= noise_strength_new / np.sqrt(deltat) # 0.05370289258320868 0.0015532069917408744
else:
noise *= noise_strength / np.sqrt(deltat)
# initial conditions for the model:
v_mem = v_zero
adapt = a_zero
v_dend = stimulus[0]
adapt_output, spike_times, v_dent_output, v_mem_output = stimulate_body(v_dent_output, stimulus, v_dend, dend_tau,
deltat,
v_base, v_mem,
offset, input_scaling, adapt, noise,
mem_tau,
tau_a, ref_period, threshold, delta_a,
exponential=exponential,
v_exp=v_exp, exp_tau=exp_tau)
return spike_times, v_dent_output, v_mem_output
def create_same_range(pps2, min_range='no'):
###########################################
# gleicher range
# also der Grundgedanke ist wir wollen bei allen den gleichen Range haben
if min_range == 'no':
min_range = np.nanmin(np.array(pps2))
pp3 = []
# min_range = -266.1849849088417 # das ist der Wert aus dem EIF damit das mit EIF_appendix vergleichbar bleibt
for a in range(len(pps2)):
pp3.append(pps2[a] * (min_range / np.nanmin(pps2[a])))
return pp3
def set_clim_shared(clims, ims, maxs, mins, nr_clim, perc05, perc95):
if clims == 'all':
for im in ims:
im.set_clim(np.min(np.min(mins)) * nr_clim, np.max(np.max(maxs) / nr_clim))
else:
for i, im in enumerate(ims):
if nr_clim == 'perc':
im.set_clim(perc05[i], perc95[i])
else:
im.set_clim(mins[i] * nr_clim, maxs[i] / nr_clim)
# else:
# else:
def do_withenoise_stimulus(deltat, eod_fr, stimulus_length, a_fe=0.2):
sampling = 1 / deltat
time_eod = np.arange(0, stimulus_length, deltat)
eod_interp, time_wn_cut, _ = load_noise('gwn300Hz50s0.3')
# try:
eod_interp = interpolate(time_wn_cut, eod_interp,
time_eod,
kind='cubic')
# nrs = 6 # 10
fake_fish = fakefish.wavefish_eods('Alepto', frequency=eod_fr,
samplerate=sampling,
duration=len(time_eod) / sampling,
phase0=0.0, noise_std=0.00)
stimulus_here = fake_fish * (1 + eod_interp * a_fe)
stimulus_here[stimulus_here < 0] = 0
return stimulus_here
def model_sheme_split(grid_sheme_orig, time_transform=1000, ws=0.1, stimulus_length=5, fft_type='mppsd',
a_fr=1, v_exp=1, exp_tau=0.1, shift=0.25):
load_name = 'models_big_fit_d_right.csv'
cell_nr = 8 # 5#5#6#3
# model_params = load_model(load_name=load_name, cell_nr = cell_nr)
models = resave_small_files("models_big_fit_d_right.csv", load_folder='calc_model_core')
flowchart_cell = '2012-07-03-ak-invivo-1'
model_params = models[models['cell'] == flowchart_cell].iloc[0]
print('cell=' + str(flowchart_cell))
# amp_frame = pd.read_csv('peak_amplitudes_power.csv')
cell = model_params.pop('cell') # .iloc[0]# Werte für das Paper nachschauen
eod_fr = model_params['EODf'] # .iloc[0]
deltat = model_params.pop("deltat") # .iloc[0]
v_offset = model_params.pop("v_offset") # .iloc[0]
sampling_rate = 1 / deltat
nfft = 2 ** 20 # int(sampling_rate / 4)
cell_nr = 8 # 5#5#6#3
eod_fe = [eod_fr + 50] # eod_fr*1+50,, eod_fr * 2 + 50
mult_nr = 0
# REMAINING rows
color_p3 = 'grey' # 'red'#palette['red']
color_p1 = 'grey' # 'blue'#palette['blue']
color_diagonal = 'grey' # 'cyan'#palette['cyan']
colors = [color_diagonal, color_p1, color_p1, color_p3]
ax_rec = [[]] * 4
ax_n = [[]] * 4
axt_IF1 = []
axt_IF2 = []
adds = [[0, 0, 0, 0], [0, 0, 2, 10]]
nrs_grid = [0, 1, 3, 4]
# delta_f = (eod_fe[mult_nr] - eod_fr) - eod_fr
delta_f = [50] # create_beat_corr(np.array([eod_fe[mult_nr] - eod_fr]), np.array([eod_fr]))[0]
# time, stimulus_here, eod_fish_r, eod_fish_e, stimulus = make_paramters(
# stimulus_length, deltat, eod_fr, a_fr, a_fe, eod_fe, mult_nr)
counter_here = 0
# ult_settings(column=2, length=5)
# # fdefaig = plt.figure()
# for mult_nr in range(len(eod_fe)):
c_sigs = [1, 1, 1, 0.9]
c_sigs = [0, 0, 0, 0.9]
var_types = ['', '', '', 'additiv_cv_adapt_factor_scaled'] # ''#'additiv_cv_adapt_factor_scaled'
# 'additiv_visual_d_4_scaled', '']
# a_fes =
nrs = [1, 2, 3, 4]
a_fes = [0, 0.02, 0.1, 0] # alpha\alpha
titles = ['Baseline', r'Contrast$\,=2\,\%$', r'Contrast$\,=20\,\%$', noise_name()]
#########################################################################################
# first row for the stimulus, and then three cols for the sheme, and the power 1 and power 3
grid0 = gridspec.GridSpecFromSubplotSpec(1, len(a_fes) + 1, subplot_spec=grid_sheme_orig,
width_ratios=[1.5, 2, 2, 2, 2], wspace=0.45)
#####################################################
# Grid for the sheme
try:
grid_sheme = gridspec.GridSpecFromSubplotSpec(6, 1,
subplot_spec=grid0[0], wspace=0.2, hspace=0.9,
height_ratios=[1, 1, 1, 1, 0, 1]) # 0.95
except:
print('grid thing1')
embed()
axshemes = []
axsheme = plt.subplot(grid_sheme[0])
axshemes.append(axsheme)
plot_sheme_nonlinearity(axsheme, color_p1)
axsheme = plt.subplot(grid_sheme[1])
axshemes.append(axsheme)
# axsheme.set_aspect('equal')
plot_sheme_lowpass(axsheme)
axsheme = plt.subplot(grid_sheme[2])
axshemes.append(axsheme)
plot_sheme_noise(axsheme)
axsheme = plt.subplot(grid_sheme[3])
axshemes.append(axsheme)
# axsheme.set_aspect('equal')
plot_sheme_IF(axsheme, exp_tau, v_exp)
###################################################################################
lw = 0.5
xlim = 0.065
axps = []
axps_lowpass = []
axps_stimulus = []
pps = []
pps_lowpass = []
pps_stimulus = []
ax_am_sp_s = []
ax_ams = []
ax_noise = []
colors_chosen = []
counter_g = 0
mult_nr = 0
# A grid for a single POWER column
axt_stims = []
for c, c_sig in enumerate(c_sigs):
a_fe = a_fes[c]
######################################
# colors = ['grey', 'grey', 'grey', 'grey']
grid_power_col = gridspec.GridSpecFromSubplotSpec(6, 1,
subplot_spec=grid0[nrs[counter_here]], wspace=0.45,
hspace=0.5, height_ratios=[1, 1, 1, 1, 0, 1])
noise_final_c, spike_times, stimulus, stimulus_here, time, v_dent_output, v_mem_output, frame = get_flowchart_params(
a_fes, a_fr, c, c_sig, cell, deltat, eod_fr, model_params, stimulus_length, v_offset, var_types, eod_fe,
color_p1, color_p3, mult_nr=mult_nr, load_name=load_name, exp_tau=exp_tau, v_exp=v_exp)
print(len(stimulus_here))
##############################################
# titles = [titles[1]]
# for g, stimulus_here in enumerate([stimuli[1]]):
add = 0
color = colors[counter_here]
# FIRST Row: Rectified stimulus
power_extra = False
wr2 = [1, 1.2]
if power_extra:
wr = [1, 1.2]
col = 2
else:
col = 1
wr = [1]
####################################################################
# stimulus
grid_lowpass = gridspec.GridSpecFromSubplotSpec(1, col,
subplot_spec=grid_power_col[0], wspace=ws, hspace=1.3,
width_ratios=wr)
ax_rec[counter_here], ff, pp, ff_am, pp_am = plot_lowpass2(grid_lowpass, time, stimulus_here, deltat, eod_fr,
shift, nfft, time_transform, color, fft_type, lw=lw,
xlim=xlim)
ax_rec[counter_here].show_spines('b')
remove_xticks(ax_rec[counter_here])
axt_stims.append(ax_rec[counter_here])
pps_stimulus.append(pp_am)
if power_extra:
axp_p2 = plt.subplot(grid_lowpass[1])
axp_p2.set_xticks_blank()
axps_stimulus.append(axp)
colors_chosen.append(color)
ax_rec[counter_here].text(0, 0.935, titles[c] + '\n' + r'$\rm{CV}=%s$' % (
np.round(np.std(np.diff(spike_times)) / np.mean(np.diff(spike_times)), 2)) + '\n$f_{Base}=%s$' % (
int(np.round(1 / np.mean(np.diff(spike_times))))) + '\,Hz',
transform=ax_rec[counter_here].transAxes,
va='bottom') # verticalalignment='right',
#embed()
grid_lowpass = gridspec.GridSpecFromSubplotSpec(1, col,
subplot_spec=grid_power_col[1], wspace=ws, hspace=1.3,
width_ratios=wr)
axt2, ff, pp, ff_am, pp_am = plot_lowpass2(grid_lowpass, time, v_dent_output, deltat, eod_fr, shift, nfft,
time_transform, color, fft_type, lw=lw, xlim=xlim)
axt2.show_spines('b')
remove_xticks(axt2)
axt_stims.append(axt2)
pps_stimulus.append(pp_am)
if power_extra:
axp_p2 = plt.subplot(grid_lowpass[1])
axp_p2.set_xticks_blank()
axps_stimulus.append(axp_p2)
colors_chosen.append(color)
# if counter_here == 0:
# ax_low[counter_here].text(-7, 0, '0', color='black', ha='center', va='center')
####################################################################
# spikes
grid_lowpass = gridspec.GridSpecFromSubplotSpec(1, 1,
subplot_spec=grid_power_col[-3], wspace=ws, hspace=1.3,
) # width_ratios=wr2
# add = adds[g][2+ee]
plot_point = ['yes', 'yes', [], 'yes']
axt_spikes, axp_IF, ff, pp, axp_s, pp_s = plot_spikes(grid_lowpass, time_transform, v_mem_output, time, color,
spike_times, shift, deltat, fft_type, nfft, eod_fr,
xlim=xlim,
counter_here=counter_here, psd=False) # , add = add
grid_lowpass_p = gridspec.GridSpecFromSubplotSpec(1, 1,
subplot_spec=grid_power_col[-1], wspace=ws, hspace=1.3,
) # width_ratios=wr2
axp_s = plt.subplot(grid_lowpass_p[0])
in_omega = False
if in_omega:
axp_s.set_xlabel('$\omega/\omega_1$')
else:
arrow = True
name = r'$f/f_{EOD}$' # r'$\frac{f}{f_{EOD}}$'
if arrow:
set_xlabel_arrow_core(axp_s, name)
# axp_s.text(1.05, -0.25, name, ha='center', va='center',
# transform=axp_s.transAxes)
axp_s.arrow_spines('b')
else:
axp_s.set_xlabel(name)
# pps.append(pp)
# axps.append(axp_IF)
axps.append(axp_s)
pps.append(pp_s)
# colors_chosen.append(color)
colors_chosen.append('black')
# if ee == 0:
axt_spikes.show_spines('b')
axt_IF1.append(axt_spikes)
# axt_stims,axt_IF1
# else:
# axt_IF2.append(axt_IF)
# if g == 0:
################################
# plot noise split
noise = np.random.randn(len(stimulus))
# noise *= noise_strength / np.sqrt(deltat)
noise_strength = model_params["noise_strength"] # .iloc[0]
# if 'd_right' in load_name:
noise_length = len(stimulus)
# noise_final = np.random.randn(noise_length) # len(stimulus)
# noise_strength_new = np.sqrt(noise_strength * 2)
# noise_final *= noise_strength_new / np.sqrt(deltat) # 0.05370289258320868 0.0015532069917408744
no_noise = False
if no_noise:
c_noise = c_sig
noise_final_c = np.random.randn(noise_length) # len(stimulus)
variance = (noise_strength * c_noise) * 2 / deltat
noise_strength_new = np.sqrt(variance)
noise_final_c *= noise_strength_new # 0.0015532069917408744
grid_lowpass = gridspec.GridSpecFromSubplotSpec(1, col, width_ratios=wr,
subplot_spec=grid_power_col[2], wspace=ws,
hspace=1.45)
ax_n, ff, pp, ff_am, pp_am = plot_lowpass2(grid_lowpass, time, noise_final_c, deltat, eod_fr, shift, nfft,
time_transform, color, fft_type, extract=False, lw=lw, xlim=xlim)
ax_n.show_spines('b')
ax_noise.append(ax_n)
remove_xticks(ax_n)
pps_lowpass.append(pp)
if power_extra:
axp_p2 = plt.subplot(grid_lowpass[1])
axp_p2.set_xticks_blank()
axps_lowpass.append(axp_p2)
# pps_stimulus.append(pp)
# axps_stimulus.append(axp_p2)
# axps.append(axp_p2)
# pps.append(pp)
counter_g += 1
# plt.show()
counter_here += 1
#embed()
####################################
# cut first parts
# because otherwise there is a dip at the beginning and thats a problem for the range thing
ff, pps_stimulus, pps_lowpass, pps = cut_first_parts(ff, pps_stimulus, pps_lowpass, pps, ll=0)
# here I calculate the log and do the same range for all power spectra
# this is kind of complicated but some cells spike even withouth thresholding and we want to keep their noise floor down
# not to see the peaks in the noise
# pp3_stimulus = create_same_max(np.concatenate([pps_stimulus, pps_lowpass]), same=True)
if power_extra:
pp3_stimulus = create_same_max(pps_stimulus, same=True)
pp3_noise = create_same_max(pps_lowpass, same=True)
pps3 = create_same_max(pps, same=True)
# pp3_stimulus = np.concatenate(pp3_stimulus)
# axps_stimulus = np.concatenate([axps_stimulus,axps_lowpass])
# pp3_stimulus = create_same_range_ps(axps, pps_stimulus)
pp3 = create_same_range(np.concatenate([pp3_stimulus, pp3_noise, pps3]))
axps_stimulus = np.concatenate([axps_stimulus, axps_lowpass, axps])
else:
axps_stimulus = axps
pp3 = pps
# pp3[4] = np.array([float('nan')]*len(pp3[4]))
# ok i do this because most cells dont spike, but they are not nice to show because of the very high offset between persynaptci potenatil and spike
# there are only few cells where the distance is not so high and this cells spike occationally but very randomly still we dont wanna se their power specturm
# therefore we dont show it
colors = [color_diagonal, color_p1, color_p1, color_p3,
color_diagonal, color_p1, color_p1, color_p3,
color_diagonal, color_p1, color_p1, color_p3, ]
plot_points = ['yes', 'yes', 'yes', 'yes',
'yes', 'yes', 'yes', 'yes',
'yes', 'yes', 'yes', 'yes',
'yes', 'yes', 'yes', 'yes', ]
plot_points = [[], 'yes', [], 'yes',
[], 'yes', [], 'yes',
[], 'yes', [], 'yes',
[], 'yes', [], 'yes', ]
axt_stims[0].get_shared_y_axes().join(*axt_stims)
axt_IF1[0].get_shared_y_axes().join(*axt_IF1)
ax_noise[0].get_shared_y_axes().join(*ax_noise)
set_same_ylim(ax_noise)
ax = np.transpose([axt_stims[0::2], axt_stims[1::2], ax_noise, axt_IF1, axps_stimulus])
fig = plt.gcf()
nr = 1.7 # 2#.24
tag2(fig, axshemes, xoffs=-3.5, yoffs=5.5)
tag2(fig = fig, axes = np.transpose(ax), xoffs=-3.5) # yoffs = [5.5,nr,nr,nr,nr-0.2]
#########################################################
# plot psds
for a, axp in enumerate(axps_stimulus):
lw_p = 0.8
# gemeinsamen Limit setzten
pp_here = 2 * 10 * np.log10(pp3[a] / np.max(pp3)) # der faktor 2 ist falsch
plot_power_common_lim(axp, pp_here, ff / eod_fr, colors[a], lw_p, plot_points[a], delta_f / eod_fr)
axp.show_spines('b')
axp.set_ylim(-22, 1) # -8.5
if a == 3: # % 4 == 3:
axp.yscalebar(1.1, 0.6, 20, 'dB', va='center', ha='right')
# axp.yscalebar(1.1, 0.8, 20, 'dB', va='center', ha='right')
axps_stimulus[0].get_shared_y_axes().join(*axps_stimulus)
# if power_extra:
# axps_stimulus[0].get_shared_y_axes().join(*axps_stimulus)
def model_sheme_split2(grid_sheme_orig, time_transform=1000, ws=0.1, stimulus_length=5, fft_type='mppsd',
a_fr=1, v_exp=1, exp_tau=0.1, shift=0.25):
load_name = 'models_big_fit_d_right.csv'
cell_nr = 8 # 5#5#6#3
# model_params = load_model(load_name=load_name, cell_nr = cell_nr)
models = resave_small_files("models_big_fit_d_right.csv", load_folder='calc_model_core')
flowchart_cell = '2012-07-03-ak-invivo-1'
model_params = models[models['cell'] == flowchart_cell].iloc[0]
print('cell=' + str(flowchart_cell))
# amp_frame = pd.read_csv('peak_amplitudes_power.csv')
cell = model_params.pop('cell') # .iloc[0]# Werte für das Paper nachschauen
eod_fr = model_params['EODf'] # .iloc[0]
deltat = model_params.pop("deltat") # .iloc[0]
v_offset = model_params.pop("v_offset") # .iloc[0]
sampling_rate = 1 / deltat
nfft = 2 ** 20 # int(sampling_rate / 4)
cell_nr = 8 # 5#5#6#3
eod_fe = [eod_fr + 50] # eod_fr*1+50,, eod_fr * 2 + 50
mult_nr = 0
# REMAINING rows
color_p3 = 'grey' # 'red'#palette['red']
color_p1 = 'grey' # 'blue'#palette['blue']
color_diagonal = 'grey' # 'cyan'#palette['cyan']
colors = [color_diagonal, color_p1, color_p1, color_p3]
ax_rec = [[]] * 4
ax_n = [[]] * 4
axt_IF1 = []
axt_IF2 = []
adds = [[0, 0, 0, 0], [0, 0, 2, 10]]
nrs_grid = [0, 1, 3, 4]
# delta_f = (eod_fe[mult_nr] - eod_fr) - eod_fr
delta_f = [50] # create_beat_corr(np.array([eod_fe[mult_nr] - eod_fr]), np.array([eod_fr]))[0]
# time, stimulus_here, eod_fish_r, eod_fish_e, stimulus = make_paramters(
# stimulus_length, deltat, eod_fr, a_fr, a_fe, eod_fe, mult_nr)
counter_here = 0
# ult_settings(column=2, length=5)
# # fdefaig = plt.figure()
# for mult_nr in range(len(eod_fe)):
c_sigs = [1, 1, 0.9] # 1,
c_sigs = [0, 0, 0.9] # '',0,
var_types = ['', '', 'additiv_cv_adapt_factor_scaled'] # ''#'additiv_cv_adapt_factor_scaled'
# 'additiv_visual_d_4_scaled', '']
# a_fes =
nrs = [1, 2, 3, 4]
a_fes = [0, 0.02, 0] # alpha\alpha0.1,
a_fes = [0, 0.02, 0] # alpha\alpha0.1,
titles = ['Baseline', r'Contrast$\,=2\,\%$', noise_name()] # r'Contrast$\,=20\,\%$',
#########################################################################################
# first row for the stimulus, and then three cols for the sheme, and the power 1 and power 3
grid0 = gridspec.GridSpecFromSubplotSpec(1, len(a_fes) + 1, subplot_spec=grid_sheme_orig,
width_ratios=[0.84, 2, 2, 2], wspace=0.45)
#####################################################
# Grid for the sheme
hs = 0.6 # 0.45
try:
grid_sheme = gridspec.GridSpecFromSubplotSpec(6, 1,
subplot_spec=grid0[0], wspace=0.2, hspace=hs,
height_ratios=[1, 1, 1, 1, 0, 1]) # 0.95
except:
print('grid thing1')
embed()
axshemes = []
axsheme = plt.subplot(grid_sheme[0])
axshemes.append(axsheme)
plot_sheme_nonlinearity(axsheme, color_p1)
axsheme = plt.subplot(grid_sheme[1])
axshemes.append(axsheme)
# axsheme.set_aspect('equal')
plot_sheme_lowpass(axsheme)
axsheme = plt.subplot(grid_sheme[2])
axshemes.append(axsheme)
plot_sheme_noise(axsheme)
axsheme = plt.subplot(grid_sheme[3])
axshemes.append(axsheme)
# axsheme.set_aspect('equal')
plot_sheme_IF(axsheme, exp_tau, v_exp)
###################################################################################
lw = 0.5
xlim = 0.065
axps = []
axps_lowpass = []
axps_stimulus = []
pps = []
pps_lowpass = []
pps_stimulus = []
ax_am_sp_s = []
ax_ams = []
ax_noise = []
colors_chosen = []
counter_g = 0
mult_nr = 0
# A grid for a single POWER column
axt_stims = []
for c, c_sig in enumerate(c_sigs):
a_fe = a_fes[c]
######################################
# colors = ['grey', 'grey', 'grey', 'grey']
grid_power_col = gridspec.GridSpecFromSubplotSpec(6, 1,
subplot_spec=grid0[nrs[counter_here]], wspace=0.45,
hspace=hs, height_ratios=[1, 1, 1, 1, 0, 1])
noise_final_c, spike_times, stimulus, stimulus_here, time, v_dent_output, v_mem_output, frame = get_flowchart_params(
a_fes, a_fr, c, c_sig, cell, deltat, eod_fr, model_params, stimulus_length, v_offset, var_types, eod_fe,
color_p1, color_p3, mult_nr=mult_nr, load_name=load_name, exp_tau=exp_tau, v_exp=v_exp, redo = False)
print(len(stimulus_here))
extracted, _ = extract_am(v_dent_output, stimulus_here / 1000, sampling=1 / deltat, eodf=eod_fr, norm=False)
print('noise var'+str(np.var(noise_final_c))+' stimulus var '+str(np.var(extracted))+' CV '+str(np.round(np.std(np.diff(spike_times)) / np.mean(np.diff(spike_times)), 2)))
##############################################
color = colors[counter_here]
# FIRST Row: Rectified stimulus
power_extra = False
if power_extra:
wr = [1, 1.2]
col = 2
else:
col = 1
wr = [1]
####################################################################
# stimulus
grid_lowpass = gridspec.GridSpecFromSubplotSpec(1, col,
subplot_spec=grid_power_col[0], wspace=ws, hspace=1.3,
width_ratios=wr)
ax_rec[counter_here], ff, pp, ff_am, pp_am = plot_lowpass2(grid_lowpass, time, stimulus_here, deltat, eod_fr,
shift, nfft, time_transform, color, fft_type, lw=lw,
xlim=xlim)
# ax_rec[counter.]
ax_rec[counter_here].show_spines('b')
remove_xticks(ax_rec[counter_here])
axt_stims.append(ax_rec[counter_here])
pps_stimulus.append(pp_am)
if power_extra:
axp_p2 = plt.subplot(grid_lowpass[1])
axp_p2.set_xticks_blank()
axps_stimulus.append(axp)
colors_chosen.append(color)
# if counter_here == 0:
# ax_rec[counter_here].text(-7, 0, '0', color='black', ha='center', va='center')
# if colorful_title:
# rainbow_title(fig, ax_rec[counter_here], titles[g], add_pos[g], color_add_pos[g])
# else:#add_pos[g]
ax_rec[counter_here].text(0, 1, titles[c] + '\n' + r'$\rm{CV}=%s$' % (
np.round(np.std(np.diff(spike_times)) / np.mean(np.diff(spike_times)), 2)) + '\n$f_{base}=%s$' % (
int(np.round(1 / np.mean(np.diff(spike_times))))) + '\,Hz',
transform=ax_rec[counter_here].transAxes,
va='bottom') # verticalalignment='right',#0.935
# And plot correspoding sheme
# if g == 0:
# REMAINING Rows: dendridic filter / LIF /EIF stimulus
# for ee, exponential in enumerate(exponentials):
# model
# v_offset, model_params, load_name= implement_three_core(cell,amp_frame, titles, g, cell_nr = cell_nr)
# for hard coding the offset here i check the change of the baseline
# if (ee == 0):
# SECOND Row: Dendridic Low pass filter
plot_point = [[], [], [], 'yes']
# ax_low[counter_here]
grid_lowpass = gridspec.GridSpecFromSubplotSpec(1, col,
subplot_spec=grid_power_col[1], wspace=ws, hspace=1.3,
width_ratios=wr)
axt2, ff, pp, ff_am, pp_am = plot_lowpass2(grid_lowpass, time, v_dent_output, deltat, eod_fr, shift, nfft,
time_transform, color, fft_type, lw=lw, xlim=xlim)
axt2.show_spines('b')
remove_xticks(axt2)
axt_stims.append(axt2)
pps_stimulus.append(pp_am)
if power_extra:
axp_p2 = plt.subplot(grid_lowpass[1])
axp_p2.set_xticks_blank()
axps_stimulus.append(axp_p2)
colors_chosen.append(color)
# if counter_here == 0:
# ax_low[counter_here].text(-7, 0, '0', color='black', ha='center', va='center')
####################################################################
# spikes
grid_lowpass = gridspec.GridSpecFromSubplotSpec(1, 1,
subplot_spec=grid_power_col[-3], wspace=ws, hspace=1.3,
) # width_ratios=wr2
# add = adds[g][2+ee]
plot_point = ['yes', 'yes', [], 'yes']
axt_spikes, axp_IF, ff, pp, axp_s, pp_s = plot_spikes(grid_lowpass, time_transform, v_mem_output, time, color,
spike_times, shift, deltat, fft_type, nfft, eod_fr,
xlim=xlim, labelpad=1,
counter_here=counter_here, psd=False) # , add = add
grid_lowpass_p = gridspec.GridSpecFromSubplotSpec(1, 1,
subplot_spec=grid_power_col[-1], wspace=ws, hspace=1.3,
) # width_ratios=wr2
axp_s = plt.subplot(grid_lowpass_p[0])
in_omega = False
if in_omega:
axp_s.set_xlabel('$\omega/\omega_1$')
else:
arrow = True
name = r'$f/f_{EOD}$' # r'$\frac{f}{f_{EOD}}$'
if arrow:
set_xlabel_arrow_core(axp_s, name)
# axp_s.text(1.05, -0.25, name, ha='center', va='center',
# transform=axp_s.transAxes)
axp_s.arrow_spines('b')
else:
axp_s.set_xlabel(name)
# pps.append(pp)
# axps.append(axp_IF)
axps.append(axp_s)
pps.append(pp_s)
# colors_chosen.append(color)
colors_chosen.append('black')
# if ee == 0:
axt_spikes.show_spines('b')
axt_IF1.append(axt_spikes)
# axt_stims,axt_IF1
# else:
# axt_IF2.append(axt_IF)
# if g == 0:
################################
# plot noise split
noise = np.random.randn(len(stimulus))
# noise *= noise_strength / np.sqrt(deltat)
noise_strength = model_params["noise_strength"] # .iloc[0]
# if 'd_right' in load_name:
noise_length = len(stimulus)
# noise_final = np.random.randn(noise_length) # len(stimulus)
# noise_strength_new = np.sqrt(noise_strength * 2)
# noise_final *= noise_strength_new / np.sqrt(deltat) # 0.05370289258320868 0.0015532069917408744
no_noise = False
if no_noise:
c_noise = c_sig
noise_final_c = np.random.randn(noise_length) # len(stimulus)
variance = (noise_strength * c_noise) * 2 / deltat
noise_strength_new = np.sqrt(variance)
noise_final_c *= noise_strength_new # 0.0015532069917408744
grid_lowpass = gridspec.GridSpecFromSubplotSpec(1, col, width_ratios=wr,
subplot_spec=grid_power_col[2], wspace=ws,
hspace=1.45)
ax_n, ff, pp, ff_am, pp_am = plot_lowpass2(grid_lowpass, time, noise_final_c, deltat, eod_fr, shift, nfft,
time_transform, color, fft_type, extract=False, lw=lw, xlim=xlim)
ax_n.show_spines('b')
ax_noise.append(ax_n)
remove_xticks(ax_n)
pps_lowpass.append(pp)
if power_extra:
axp_p2 = plt.subplot(grid_lowpass[1])
axp_p2.set_xticks_blank()
axps_lowpass.append(axp_p2)
# pps_stimulus.append(pp)
# axps_stimulus.append(axp_p2)
# axps.append(axp_p2)
# pps.append(pp)
counter_g += 1
# plt.show()
counter_here += 1
devide = np.max(np.max(pps))
# plot_points = np.array([[], [], [], 'yes',
# [], [], [], 'yes'
# ,'yes','yes',[],'yes'
# ,'yes','yes',[],'yes'])
# von oben nach unten von links nach rechts
# plot psd with shared log lim
####################################
# cut first parts
# because otherwise there is a dip at the beginning and thats a problem for the range thing
ff, pps_stimulus, pps_lowpass, pps = cut_first_parts(ff, pps_stimulus, pps_lowpass, pps, ll=0)
# here I calculate the log and do the same range for all power spectra
# this is kind of complicated but some cells spike even withouth thresholding and we want to keep their noise floor down
# not to see the peaks in the noise
# pp3_stimulus = create_same_max(np.concatenate([pps_stimulus, pps_lowpass]), same=True)
if power_extra:
pp3_stimulus = create_same_max(pps_stimulus, same=True)
pp3_noise = create_same_max(pps_lowpass, same=True)
pps3 = create_same_max(pps, same=True)
# pp3_stimulus = np.concatenate(pp3_stimulus)
# axps_stimulus = np.concatenate([axps_stimulus,axps_lowpass])
# pp3_stimulus = create_same_range_ps(axps, pps_stimulus)
pp3 = create_same_range(np.concatenate([pp3_stimulus, pp3_noise, pps3]))
axps_stimulus = np.concatenate([axps_stimulus, axps_lowpass, axps])
else:
axps_stimulus = axps
pp3 = pps
# pp3[4] = np.array([float('nan')]*len(pp3[4]))
# ok i do this because most cells dont spike, but they are not nice to show because of the very high offset between persynaptci potenatil and spike
# there are only few cells where the distance is not so high and this cells spike occationally but very randomly still we dont wanna se their power specturm
# therefore we dont show it
colors = [color_diagonal, color_p1, color_p1, color_p3,
color_diagonal, color_p1, color_p1, color_p3,
color_diagonal, color_p1, color_p1, color_p3, ]
plot_points = ['yes', 'yes', 'yes', 'yes',
'yes', 'yes', 'yes', 'yes',
'yes', 'yes', 'yes', 'yes',
'yes', 'yes', 'yes', 'yes', ]
plot_points = [[], 'yes', [], 'yes',
[], 'yes', [], 'yes',
[], 'yes', [], 'yes',
[], 'yes', [], 'yes', ]
axt_stims[0].get_shared_y_axes().join(*axt_stims)
axt_IF1[0].get_shared_y_axes().join(*axt_IF1)
ax_noise[0].get_shared_y_axes().join(*ax_noise)
set_same_ylim(ax_noise)
ax = np.transpose([axt_stims[0::2], axt_stims[1::2], ax_noise, axt_IF1, axps_stimulus])
fig = plt.gcf()
# fig.tag(ax, xoffs=-3.5, yoffs = 1.5)
nr = 1.4 # 7#2#.24
yoffs = 1 # 1.1
xoffs = -3
fig.tag(np.transpose(ax), xoffs=xoffs, yoffs=yoffs)
# tag2(fig, np.transpose(ax), xoffs=-3.5, yoffs=1-5, major_index=0)
tag2(fig, axshemes, xoffs=xoffs, yoffs=yoffs, major_index=0) # [5.5,nr,nr,nr]#[5.5,nr,nr,nr,nr-0.2]
# tag2(fig, np.transpose(ax), xoffs=-3.5)#yoffs = [5.5,nr,nr,nr,nr-0.2]
# get_ylim_same
#########################################################
# plot psds
for a, axp in enumerate(axps_stimulus):
lw_p = 0.8
# gemeinsamen Limit setzten
pp_here = 2 * 10 * np.log10(pp3[a] / np.max(pp3)) # der faktor 2 ist falsch
plot_power_common_lim(axp, pp_here, ff / eod_fr, colors[a], lw_p, plot_points[a], delta_f / eod_fr)
axp.show_spines('b')
axp.set_ylim(-22, 1) # -8.5
if a == 3: # % 4 == 3:
axp.yscalebar(1.1, 0.6, 20, 'dB', va='center', ha='right')
# axp.yscalebar(1.1, 0.8, 20, 'dB', va='center', ha='right')
axps_stimulus[0].get_shared_y_axes().join(*axps_stimulus)
# if power_extra:
# axps_stimulus[0].get_shared_y_axes().join(*axps_stimulus)
def get_flowchart_params(a_fes, a_fr, c, c_sig, cell, deltat, eod_fr, model_params, stimulus_length, v_offset,
var_types, eod_fe=[750], color_p1='black', color_p3='black', colorful_title=False, mult_nr=1,
load_name='models_big_fit_d_right.csv', exp_tau=0.1, v_exp=1, redo = False): # v_exp=1, exp_tau=0.1
# ok das hier scheint umständlich, aber ich hab einmal die eod_fe schleife und einmal die stimulus schleife
# einfach eine stimulus schleife zu machen würde mehrere änderungen bedeutetn
try:
time, stimulus_rec, eod_fish_r, eod_fish_e, stimulus = make_paramters(
stimulus_length, deltat, eod_fr, a_fr, a_fes[c], eod_fe, mult_nr)
except:
print('parameter thing4')
embed()
eod_fish_r_rec = eod_fish_r * 1
eod_fish_r_rec[eod_fish_r_rec < 0] = 0
add_pos, color_add_pos, _, stimuli, eod_fish_rs = titles_EIF(eod_fish_r, eod_fish_r_rec, color_p1,
color_p3, mult_nr, eod_fr, eod_fe, stimulus,
stimulus_rec, colorful_title)
######################################################################
# load
# else:
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
load_function = find_load_function()
g = 0
noise_final_c, spike_times, stimulus_here, v_dent_output, v_mem_output, frame = save_flowchart_susept(var_types[c],
cell, a_fes,
c,
c_sig, deltat,
eod_fish_rs,
eod_fr,
exp_tau,
g,
load_function,
load_name,
model_params,
stimulus_length,
v_exp,
v_offset,
version_comp, redo = redo)
time = np.arange(0, len(stimulus_here) * deltat, deltat)
return noise_final_c, spike_times, stimulus, stimulus_here, time, v_dent_output, v_mem_output, frame
def save_flowchart_susept(var_type, cell, a_fes, c, c_sig, deltat, eod_fish_rs, eod_fr, exp_tau, g, load_function,
load_name, model_params, stimulus_length, v_exp, v_offset, version_comp, exponential='', redo = False):
save_arrays = load_function + '_' + var_type + '_' + '_c_sig_' + str(c_sig) + '_a_fe_' + str(a_fes[c]) + '.csv'
save_spikes = load_function + '_' + var_type + '_' + 'spikes_c_sig_' + str(c_sig) + '_a_fe_' + str(
a_fes[c]) + '.csv'
#TODO: hier gibt es noch ein Problem!
if (version_comp == 'code') | (version_comp == 'develop'):
if (not os.path.exists(save_spikes)) | (redo == True):
print('redo flowchart')
stimulus_here = do_withenoise_stimulus(deltat, eod_fr, stimulus_length, a_fe=a_fes[c])
simple = False
if simple:
manual_offset = False
if manual_offset:
spike_times, v_dent_output, v_mem_output = simulate2(load_name,
v_offset,
eod_fish_rs[g], deltat=deltat,
exponential=exponential, v_exp=v_exp,
exp_tau=exp_tau,
**model_params)
print('Firing rate baseline ' + str(len(spike_times) / stimulus_length))
spike_times, v_dent_output, v_mem_output = simulate2(load_name,
v_offset,
stimulus_here, deltat=deltat,
exponential=exponential,
v_exp=v_exp, exp_tau=exp_tau,
**model_params)
else:
# frequently used value
# so harmonic is the right one since p-change and wave is some relict so take just harmonic
fish_morph_harmonics_vars = 'harmonic' # 'harmonic['harmonicwave_pchange']#####'harmonic'wave_pchange['']#'harmon','harmonic'
variant = 'sinz' # 'RecCore'['SigmoidAdjusted']#['sinz'] # ['ELU',]#'GELU','Sigmoid','LeakyReLU','Tanh','GELU','Tanh','Sigmoid''LeakyReLU',#['sinz']#['Tanh','Sigmoid','softplus', 'GELU','SiLU','LeakyReLU','ELU']#['sinz']#[,]['LeakyReLU','ELU']#['sinz']# ['sinz','square'] ['LeakyReLU','ELU']['softplus', 'GELU','SiLU','Sigmoid','Tanh','LeakyReLU']
fishe_emitter = 'Alepto' # ['Sternarchella', 'Sternopygus']
n = 1 # 3, 1.5,0.5
alpha = 0 # [0.4, 0.7, 0.9, 1.5] # [0.1,0.5,0.9]#[1.5,2,2.5,3,3.5,4,5]#0.6,0.7,0.8,0.2,0.3,0.4,
adapt_offset = 'adaptoffset_bisecting' # 'adaptoffsetallall2' # ''#'adaptoffsetallall2' #'', 'adaptoffsetallall2''adaptoffsetstableall''adaptoffsetallall2'3: noise is not random for the adaptation,'']#, 'adapt_offset_allall2''']
lower_tol = 0.995
upper_tol = 1.005
mimick = 'no' # '_MimickReceiverWavemorph'##'_MimickReceiverWavemorph'#'no'#'_MimickReceiverWavemorph'#no'#'_MimickJammerEmitterReceiverWavemorph''no''_MimickReceiverWavemorph'['_MimickJammerEmitterReceiverWavemorph','_MimickEmitterReceiverWavemorph','_MimickReceiverWavemorph']#['no']#'_MimickReceiverWavemorph','_MimickReceiverWavemorph''no''_MimickReceiverWavemorph']##['no']#['_MimickJammerEmitterReceiverWavemorph','_MimickEmitterReceiverWavemorph','_MimickReceiverWavemorph'] #'_MimickEmitterReceiverWavemorph''_MimickJammerEmitterReceiverWavemorph''_MimickReceiverWavemorph''no' '_MimickReceiverWavemorph''_MimickEmitterWavemorph''_MimickReceiverWavemorph''no''_MimickReceiverWavemorph''no' #'_MimickReceiverWavemorph' 'no''_MimickReceiverWavemorph''_MimickEmitterWavemorph''_MimickEmitterReceiverWavemorph''_MimickEmitterWavemorph''_ThunderMimickEmitterZentered' '_ThunderMimickReceiverZentered' '_ThunderMimickReceiverZentered''no' 'no' ['_ThunderMimickReceiverEmitterZentered','_ThunderMimickEmitterNotZentered','_ThunderMimickEmitterZentered','_ThunderMimickReceiverEmitterNotZentered'] #, 'no' ['_thunder_wavemorph_mimick']#['no']#['no''_thunder_wavemorph_mimick']#'no''_thunder_wavemorph_mimick','_mimick_copy3_''no''no''no''_mimick_copy3_''no''_thunder_wavemorph_mimick''_thunder_wavemorph_mimick','_mimick_copy2_','_mimick_copy2_''_thunder_wavemorph_mimick''_thunder_wavemorph_mimick''_thunder_mimick','no''no','_thunder_wavemorph_mimick''_thunder_wavemorph_mimick',,'no''_thunder_mimick''no''no','_thunder_wavemorph_mimick',,'no''_thunder_wavemorph_mimick','_thunder_mimick_both','no''_thunder_mimick''_mimick_copy_'#,''_thunder_mimick'_mimick_copy_''no','_thunder_mimick''no''_thunder_mimick''no''_thunder_mimick'[,'no','th_mimick_both','th_mimick',,'th_mimick_extensive_both','th_mimick_extensive','mimick_extensive','_thunder_mimick','_thunder_mirrowed_mimick']
a_fj = 0.1 # [0.1,0.05,0.025,0.02,0.015,0.0125,0.01,0.005]##[0.1,0.05,0.025,0.02,0.015,0.0125,0.01,0.005]#[0.1,0,][0.2,0.1,0.05,0.01,0] # 0, 0.01, 0.05, 0.1, #[0.2] # , 0.5, 0.8,0, 0.01, 0.05, 0.1,
freq_equal = '' # 'equal'
damping_factor = 0.45 # 0.65,0.2,0.5,0.2,0.6,0.45,0.6,0.35
damping_type = '' # 'damping'#'damping'#'' # 'damping'#'damping'#'damping' # 'damping'#'damping'#''#'damping'#''#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#''##'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#'damping'#damping'damping'#damping'damping'#'damping'#,'damping'#'damping' # 'damping_hundspeth','damping_kashimori','damping_nieman', 'damping_huxley', 'damping'
corrs_nr = 35
SAM = '' # ,
start = 0.3
end = 2.5
# stimulus_length = 1 # .5
phaseshift_frs = [0] # [0,0.62,1.25, 1.88,2.51,3.14,3.76,4.39,5.03,5.65]#
cell_recording = '' # '2019-09-23-aj-invivo-1' # ['2019-10-21-am-invivo-1']#['2019-09-23-aj-invivo-1']
fishe_receiver = 'Alepto' # ['Sternopygus']#['Alepto']'Sternarchella',
save = True
overhalf = '_overhalf_'
beat = '' # 'chirp''beat' beat
subpopulation = 'test'
subpopulation = 'test' # 'test'
exp_tau = np.array([0.0001, 0.01, 0.1, 1, 10, 1000, ])
v_exp = np.array([0, 0.5, 1, 1.5])
exponential = 'EIF' # 'QIF'#'EIF'#''
exponential = 'CIF'
exponential = ''
if exponential == '':
v_exp = 1
exp_tau = 0.001
elif exponential == 'EIF':
v_exp = np.array([0, 0.5, 1, 1.5])
exp_tau = np.array(
[0.0001, 0.00005, 0.001, 0.0002, 0.0003, 0.0005, 0.0007, 0.01, 0.1, 1, 10, 1000, ])
v_exp = np.array([0])
exp_tau = np.array([0.001, 0.01, 0.1]) # 10
elif exponential == 'CIF':
v_exp = np.array([0, 0.5, 1, 1.5, 2, 0.2, -0.5, -1]) #
exp_tau = np.array([0]) # 10
dent_taus_o = [1]
constant_reductions = ['']
three_waves = True
redo_level = 'celllevel' # celllevel
counter = 0
model_new = 'modelbigfit'
stimulus_type = '_StimulusOrig_' # ,#
variant = 'sinz'
n = 1
variance = 1
mean = 0
std = 1
num_samples = 10000
phaseshift_fr = 0
fish_morph_harmonics_vars = 'harmonic'
T = stimulus_length # T seems to be the timewindow, need still to figure out the units
a_fr = 1 # [1,0.75,0.5,0.3,0.2,0.1,0.05,0.01]#[0.05,0.01,0.1]#[0]#[1,0.75,0.5,0.3,0.2,0.1,0.05,0.01]#[1]##[1]#[0.05,0.01,0.1]#[0]##[0]##[0]##[1,0.75,0.5,0.3,0.2,0.1,0.05,0.01]#[0]#[0.05,0.25,0.01]#1,0.5,0.2,0.1,[1]#[1,0.5,0.2,0.1,0.05,0.01,0]#[1]#[0.01,0.05,0.1,0.15,0.2,0.25,0.3]0.5,1,0.2,0.1
model_cells = pd.read_csv(load_folder_name('calc_model_core') + "/models_big_fit_d_right.csv")
zeros = 'ones'
nfft = int(4096)
nfft_for_morph = 4096 * 4
nrs = [1] # 1,,3 3]#, 1, 1.5, 0.5] # 1.5,0.5]3, 1
versions = ['sinz'] # ,'']
counter = 0
var = 1 # 0.2# 0.07
formula = 'code' # 'formula'#'code'#'formula'#'code'#'formula' # 'code'
old_vertical_stack = []
symmetry = 'symetric'
duration_noise = '_short' # '_long'#'_short'# embed()
formula = 'formula' # 'code'#
formula = 'code' ##'formula'
step = 500 # int(2**15),int(2**14),int(2**13),int(2**16),int(2**12),
nffts = ['whole'] # stimulus_length*20000int(2**16)4096[int(4096),int(4096*2),int(4096*3)]16384
cut_off = 300 # 600 # 10000#600#10000#200#10000#600#10000#300#20000#20000#300##300#20000#300#20000#300 #
max_type = '_MaxCutOff2_'
noteverystep = ''
stim_type_noise = ''
cut = 300 # 300#450
cut_off2 = cut # 300#300
trans = 1 # 50#500#0
trials_pop = 1 # 10000#, 1000000#[100, 500, 1000, 3000, 10000,100000, 1000000]#1000000#1000#0#1 # 500
cut_off_type = ''
stim_type_noise = 'eRAM' # 'filtered_eRAM'##'filtered_eRAM'########'filtered_RAM'##'filtered_eRAM'#'filtered_eRAM'##'filtered_RAM_extracted' # #'filtered_eRAM_extracted'#'filtered_RAM'#######'_StimPreSavedRAM_'#################################,####### # #'eRAM''filtered_RAM'
stim_type_noise = 'RAM' # 'eRAM' # 'RAM'#
stim_type_afe = 'RAM' # 'eRAM' # 'RAM'##'RAM'#'eRAM'#'RAM'#'eRAM'#'RAM'#'eRAM'#'RAM'#'eRAM'#'eRAM'#'RAM'#'eRAM'#'RAM'######'RAM'###########'eRAM'####, # 'eRAM_extractVdent'#######'filtered_RAM_extracted'##'filtered_eRAM_extractedCarrier'#'filtered_eRAM'##'filtered_RAM_extracted' # #'filtered_eRAM_extracted'#'filtered_RAM'#######'_StimPreSavedRAM_'#################################,####### # #'eRAM''filtered_RAM'
extracted = ['']
extract = '_RAMdadjusted' # ['_RAMdadjusted','_RAMadjusted','_RAMscaled','']#]#'_RAMadjusted'
trials_stim = 9 # 30#1000000#250000#50000##1000000#250000#1000000#10000#1000000#1000000#250000#500000 #10000010,10000001000,10000, 100000010000001000,10000, 100000, 1000,1000,10000,#000 00,3000, 10, 500, 1000,500, 250,1000,1500,2000]#500#10,150,
noise_added = '' # '_noiseadded_'['_noiseadded_']#'''','''',]#'_noiseadded_',''##''#
# a_fes = [
# 0] # 0.05,0.01,0.05,0.1,0.2,0.01,0.05,0.10.010.01, 0.05, 0.100, .10.010.010.05#0.005,0,0.01, 0.05,0.1,0.2, ''[0.1,0]0.1,,0
if stim_type_noise == '_StimPreSavedRAM_':
noise_name = 'gwn300Hz50s0.3' # ,'gwn300Hz100s0.3','InputArr_400hz_30s'
else:
noise_name = ''
c_sigs = [0.9] # 0.45,0.9, 1.8]#0.9,0.1,1.5]#1.5#1.5#0.9#0.9#0.9#0.9
c_noises = [0.1] # ,0.9]#0.9#np.round(1 - c_sig,1)
adapt_type = '' # ,'_noAdapt_']#,,,]#'_noAdapt_','']
ref_type = '' # ,'_NoRef_']#,'_NoRef_''''_NoRef_''',]#'_NoRef_''',]#''##''##''##''##''##''#
dendrid = '' # ,'Stimulus_wo_dendrid',]#'Stimulus_wo_dendrid''''','Stimulus_wo_dendrid''''',]#'Stimulus_wo_dendrid',#,, ] # ,''''''''','noDendrid',]#''#'noDendrid'
a_fr = 1 # ,1,]#0,,0,]#,0 ,0 ] # ,0,]#0#1
f_max = cut_off2
N = int(T / deltat)
RAM = np.random.normal(0, var, size=N)
carrier_RAM = np.random.normal(0, 1, size=N) # *0.2
input_scaling = model_params.input_scaling
trial_nrs = 1
tt = 0
fft_i = 'forward' # 'ortho'#'backward'###
fft_o = 'forward' # 'ortho'##'backward'###'ortho'#######
mV = 'mV'
Hz = 'Hz'
nfft = 'whole'
burst_corr = ''
c_noise = np.round((1 - c_sig), 2)
cell_recording_save_name = find_cell_recording_save_name(cell_recording)
save_name_here = save_ram_model(stimulus_length, cut_off, nfft, a_fes[c], stim_type_noise, mimick,
variant, trials_stim, n,
cell_recording_save_name, burst_corr=burst_corr, Hz=Hz,
mV=mV, fft_i=fft_i, fft_o=fft_o, stim_type_afe=stim_type_afe,
extract=extract, noise_added=noise_added, adapt_type=adapt_type,
ref_type=ref_type, c_noise=c_noise, c_sig=c_sig, cut_off2=cut_off2,
a_fr=a_fr, var_type=var_type, zeros=zeros, trials_nr=trials_pop,
dendrid=dendrid, trans=trans)
print(save_name_here)
# if c_sig != 0:
if 'additiv_cv_adapt_factor_scaled' in var_type:
save_name_here_cv = save_name_here * 1
# save_name_here_cv = save_name_here_cv.replace('TrialsStim_'+str(trial_nr),'')
# save_name_here_cv = save_name_here_cv.replace(burst_corr,'')
# save_name_here_cv = save_name_here_cv+'_cvtable'
# todo: das muss noch der gleiche faktor sein wie bei dem calc_model
individual = False
if individual:
save_name_here_cv = name_for_cv_presaved(save_name_here_cv, burst_corr, trials_stim)
else: # DEFAULT
save_name_here_cv = save_ram_model_cv(a_fes[c], stim_type_noise, stim_type_afe=stim_type_afe,
noise_added=noise_added,
c_noise=c_noise, c_sig=c_sig,
var_type=var_type)
if not os.path.exists(save_name_here_cv + '.csv'):
calc_cv_adapt(save_name_here_cv, v_exp, upper_tol, lower_tol, trials_pop, extract,
stim_type_afe, cut_off2, (1 - c_sig), c_sig, cut_off_type, nfft_for_morph,
noise_name, var, T, nfft, stim_type_noise, cut_off2, model_cells, cell_recording,
fishe_receiver, n, variant, fish_morph_harmonics_vars, trans, noise_added,
ref_type, dendrid, damping_type, exp_tau, variance, stimulus_length, adapt_offset,
a_fes[c], mimick, zeros, adapt_type, phaseshift_fr, exponential=exponential,
a_fr=a_fr, var_type_scale=var_type, var_type='additiv_cv_adapt_scaled')
else:
save_name_here_cv = ''
RAM_afe, RAM_noise, stim_type, RAM, carrier_RAM, deltat, eod_fr, time_array, eod_fish_r, am_extracted, stimulus_length_here = get_stim(
carrier_RAM, a_fr, zeros, eod_fr, mimick, fishe_receiver, cell_recording, trans, nfft,
extract, noise_added, cut_off, stim_type_afe, model_params, variance, cell, save_name_here_cv,
c_sig, var_type,
cut_off_type, input_scaling, noise_name, stimulus_length, deltat, a_fes[c], stim_type_noise,
nfft_for_morph,
fish_morph_harmonics_vars)
sampling_rate = 1 / deltat
nfft = sampling_rate / 4
noise_final_c, offset, v_mem_output, spikes_bef, rate_adapted, rate_baseline_after, spike_times, isi, v_dent_output = get_spikes(
adapt_type, v_offset, adapt_offset, a_fr, nfft, lower_tol, 0, carrier_RAM, n, cell, trial_nrs,
variant,
stimulus_length, deltat, exponential, upper_tol, v_exp, exp_tau, dendrid, noise_added, cut_off,
c_sig,
var_type, ref_type, (1 - c_sig), model_params, fishe_receiver, phaseshift_fr, nfft_for_morph,
eod_fr, )
carrier_RAM[carrier_RAM < 0] = 0
stimulus_here = carrier_RAM
#embed()
frame = pd.DataFrame()
frame['v_dent_output'] = v_dent_output
frame['v_mem_output'] = v_mem_output
try:
frame['RAM_afe'] = np.concatenate([RAM_afe[0:int(trans * sampling_rate)], RAM_afe])
except:
print('assign thing')
embed()
frame['RAM_noise'] = np.concatenate([RAM_noise[0:int(trans * sampling_rate)], RAM_noise])
try:
frame['stimulus_here'] = stimulus_here
except:
print('stim here thing')
embed()
try:
frame['noise'] = noise_final_c # =
except:
print('noise something')
embed()
frame_sp = pd.DataFrame()
spike_times = spike_times[0]
frame_sp['spikes'] = spike_times
# save_here = save_csv_to_pandas(arrays[n])
frame.to_csv(save_arrays)
frame_sp.to_csv(save_spikes)
else:
noise_final_c, spike_times, stimulus_here, v_dent_output, v_mem_output, frame = load_arrays_flowchart(
save_arrays,
save_spikes)
else:
noise_final_c, spike_times, stimulus_here, v_dent_output, v_mem_output, frame = load_arrays_flowchart(
save_arrays,
save_spikes)
return noise_final_c, spike_times, stimulus_here, v_dent_output, v_mem_output, frame
def load_arrays_flowchart(save_arrays, save_spikes):
frame = pd.read_csv(save_arrays)
frame_sp = pd.read_csv(save_spikes)
v_dent_output = frame['v_dent_output']
v_mem_output = frame['v_mem_output']
stimulus_here = frame['stimulus_here']
# frame_sp = pd.DataFrame()
spike_times = frame_sp['spikes'] # .iloc[0][0]
noise_final_c = frame['noise']
return noise_final_c, spike_times, stimulus_here, v_dent_output, v_mem_output, frame
def plot_power_common_lim(axp, pp, ff, color, lw, plot_point, mult):
# if log:
# if devide == []:
# pp = 10 * np.log10(pp / np.max(pp))
# else:
# pp = 10 * np.log10(pp / devide)
# counterp += 1
# normalize_factor = eod_fr
axp.plot(ff, pp, zorder=3, color=color, linewidth=lw)
ylim_min = -45
# axp.set_ylim(ylim_min, 0)
# axp.show_spines('lb')
# plot_point = False
axp.set_xlim(-0.05, 1.5) # 2.2
# axp.show_spines('b')
axp.set_spines_bounds('b', ('ticks', 'full'))
axp.set_xticks_delta(1)
# axp.get_shared_y_axes().join(*axes)
add = 1
if plot_point:
pos = np.argmin(np.abs(ff - mult))
x_pos = ff # / normalize_factor
x_pos = x_pos[pos]
pp_point = pp
# if pp <-ylim_min:
pp_point[pp < ylim_min] = ylim_min
y_pos = pp_point[pos]
if y_pos < ylim_min:
y_pos = -38
# axp.point_to('', (x_pos+0.4, y_pos+7+add), (x_pos, y_pos+0.5+add), **asPoint)
def cut_first_parts(ff, pps_stimulus, pps_lowpass, pps, ll=40):
# [35::]
# pps_array = [pps_stimulus, pps_lowpass, pps]
for p, pp in enumerate(pps_stimulus):
pps_stimulus[p] = pps_stimulus[p][(ff > ll) & (ff < 1500)]
for p, pp in enumerate(pps_lowpass):
pps_lowpass[p] = pps_lowpass[p][(ff > ll) & (ff < 1500)]
for p, pp in enumerate(pps):
pps[p] = pps[p][(ff > ll) & (ff < 1500)]
ff = ff[ff > ll]
ff = ff[ff < 1500]
return ff, pps_stimulus, pps_lowpass, pps
def plot_spikes(g_p_t, transform_fact, v_mem_output, time, color1, spike_times, shift, deltat, fft_type, nfft, eod_fr,
xlim=0.05, exponential='', labelpad=0, counter_here=0, psd=True):
axt = plt.subplot(g_p_t[0])
plot_time(axt, transform_fact, v_mem_output, time, color1, exponential, spike_times,
lw=0.5, shift=shift, xlim=xlim, counter_here=counter_here)
# if (exponential == exponentials[-1]):
axt.set_xlabel('Time [ms]', labelpad=labelpad)
# else:
# axt.set_xticks_blank()
sampling_rate = int(1 / deltat)
spikes_mat = cr_spikes_mat(np.array(spike_times), sampling_rate, len(v_mem_output))
spikes_mat = spikes_mat[int(0.05 * sampling_rate)::]
ff, pp_s = calc_psd(spikes_mat, deltat, nfft)
axp = []
# calc power of presynaptic potential
ff, pp = calc_psd(v_mem_output, deltat, nfft)
# if exponential != exponentials[-1]:
# axp_s.set_xticks_blank()
# if (exponential == exponentials[-1]):
if psd:
gridp = gridspec.GridSpecFromSubplotSpec(1, 1,
subplot_spec=g_p_t[1],
hspace=1.45)
axp_s = plt.subplot(gridp[0])
axp_s.set_xlabel('$\omega/\omega_1$')
else:
axp_s = []
# else:
# axp_s.set_xticks_blank()
return axt, axp, ff, pp, axp_s, pp_s
def find_save_name(save_name, add=''):
# if full:
load_function = find_load_function()
if 'model' in save_name:
name0 = load_function + add + save_name.split('/')[-1].split('model/')[-1].replace(
'fft_o_forward_fft_i_forward_Hz_mV', '') + '.csv'
elif 'data' in save_name:
name0 = load_function + add + save_name.split('/')[-1].split('data/Noise/')[-1] + '_.csv'
else:
name0 = load_function + add + save_name.split('/')[-1].split('data/Noise/')[-1] + '_.csv'
name1 = name0.replace('calc_RAM_model-2_', '')
# else:
return name1, name0, load_function
def find_save_name2(save_name, add='', load_function=None):
if not load_function:
load_function = find_load_function()
if 'model' in save_name:
name0 = load_function + '_model_' + add + '_.csv' # save_name.split('/')[-1].split('model/')[-1].replace('fft_o_forward_fft_i_forward_Hz_mV', '') + '.csv'
elif 'data' in save_name:
name0 = load_function + '_data_' + add + '_.csv' # save_name.split('/')[-1].split('data/Noise/')[-1] + '_.csv'
else:
name0 = load_function + add + '_.csv' # save_name.split('/')[-1].split('data/Noise/')[-1] + '_.csv'
name1 = name0 # .replace('calc_RAM_model-2_', '')
# else:
return name1, name0, load_function
def find_load_function():
st = inspect.stack() # [-3][3]
if 'susceptibility1' in st[-1][1]:
load_function = st[-1][1].split('.py')[0].split('susceptibility1')[-1] # [1::]
elif 'susceptibility2' in st[-1][1]:
load_function = st[-1][1].split('.py')[0].split('susceptibility2')[-1] # [1::]
elif 'suseptibility' in st[-1][1]:
name = 'suseptibility'
load_function = st[-1][1].split('.py')[0].split(name)[-1].split('thesis')[-1] # [1::]
else:
names_extra_modules = names_folders()
found = False
for name in names_extra_modules:
if not found:
if name in st[-1][1]:
load_function = st[-1][1].split('.py')[0].split(name)[-1].split('thesis')[-1] # [1::]
found = True
if not found:
load_function = st[-1][1].split('.py')[0].split('suseptibility')[-1].split('thesis')[-1] # [1::]
# if '\\' in load_function:
try:
load_function = load_function.replace('\\', '')
except:
print('load_function_missing')
embed()
#embed()
return load_function
def load_data_susept(path_sascha, save_name, redo=False, cells=[], add='', load_function=None, load_version='csv',
load_type='mat', trial_nr=[], stimulus_length=[], amp=[], file_name=[]):
names = ['spikes', 'isf', 'osf']
name1_orig, name0_orig, load_function_orig = find_save_name(save_name,
add=add) # name1, name0, load_function = find_save_name(save_name, add = add)
name1, name0, load_function = find_save_name2(save_name, add=add,
load_function=load_function) # name1, name0, load_function = find_save_name(save_name, add = add)
# name1 = name1 + add
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
if version_comp == 'develop':
if len(cells) > 0:
dirs = os.listdir()
# load_function = find_load_function()
for dir in dirs:
if (dir.startswith(name1.replace(add, '').replace('.csv', ''))) & ('.py' not in dir):
# print(dir)
keep = False
for cell in cells:
if cell in dir:
keep = True
if not keep:
print('needs removement ' + dir)
os.remove(dir)
# print(name1)
if load_type == 'mat':
remove_old = False
# wenns auf Saschas Computer ist
# if version_comp == 'code':
# embed()
# data = load_mat(load_version, name1, names, save_name)
if version_comp != 'public':
if (not os.path.exists(name1)) | (
not os.path.exists(name1.replace('.csv', '') + '_' + names[-1] + '.npy')) | (
redo == True): # if (redo == True) | (not os.path.exists(name1)): os.path.exists(path_sascha):
print('redo')
# & (not os.path.exists(name0))
dated_up = update_ssh_file(path_sascha)
# if not os.path.exists(path_sascha):
# print('\n ssh not implemented: copy this file\n' + str(path_sascha)) # +'\n'+
# dated_up = update_ssh_file(path_sascha)
# ssh.load_system_host_keys()
# /home/rudnaya/Desktop/Masterarbeit/work/Putty_keys
# ssh -i ~/Pfad/zur/privatenSchlüsseldatei benutzer@server
# try:
# if 'isi' not in add:
if 'pkl' in path_sascha:
data = pd.read_pickle(path_sascha) # pd.read_pickle(path),low_memory=False
else:
data = pd.read_csv(path_sascha) # ,low_memory=Falsepd.read_pickle(path)
if len(cells) > 0:
data = data[data['cell'].isin(cells)]
# except:
# embed()
# if len(cells) >0:
# model = model[model['cell'].isin(cells)] # & (model_cell.file_name == file)& (model_cell.power == power)]
dirs = os.listdir()
# if remove_old:
# for d in dirs:
# if (load_function in d) & ('pdf' not in d) & ('py' not in d):
# os.remove(d)
if 'osf' in data.keys():
data = data.drop(['d_isf_all', 'd_osf_all',
'd_osf1', 'var_stim', 'var_spikes', 'brainregion', 'structure',
'species', 'dev', 'cell',
'mt_nr_last', 'way', 'loop',
'trials', 'freqs',
'freqs_idx', 'stimulus'], axis=1) # 'osf', 'isf','nfft','sampling','file_name2'
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
# spikes = data.spikes
if version_comp == 'develop':
if 'model' in save_name:
data.to_csv(
load_function + save_name.split('/')[-1].replace('calc_RAM_model-2_', '').split('model/')[
-1].replace(
'fft_o_forward_fft_i_forward_Hz_mV', '') + '.csv')
elif 'data' in save_name:
# save_name = load_function + save_name.split('/')[-1].replace('calc_RAM_model-2_', '').split('data/Noise/')[-1]
if 'pkl' in load_version:
data.to_pickle(name1.replace('csv', 'pkl', ))
else:
data.to_csv(name1.replace('pkl', 'csv')) # '_.csv'
# save_name = load_function + 'base_frame_all_arrays'
# calc_RAM_data_spikes__2022-01-06-aa-invivo-1
try:
save_object_from_frame(data, names, name1)
except:
print('parse thing')
embed()
# todo: spikes hier nochmal extra laden
if 'pkl' in load_version:
if os.path.exists(name1.replace('.csv', '.pkl')):
data = pd.read_pickle(name1.replace('.csv', '.pkl'))
else:
data = []
else:
if os.path.exists(name1):
data = pd.read_csv(name1, index_col=0)
if 'data' in save_name:
try:
data = load_object_to_pandas(names, name1, index_col=0)
except:
print('loading thing')
embed()
# save_name = load_function + 'base_frame_all_arrays'
else:
data = []
# bei dem ganzen sind die Indices noch in str aber das converten wir später noch!
# todo: das womöglich ähnlich wie in dieser FUnktion schon einbauen
# nämlich mit new_keys, stack_plot = convert_csv_str_to_float(stack_final)
# new_keys = stack_plot.index
# new_keys = list(map(str, new_keys))
# stack_plot = stack_plot.astype(complex)
# stack_plot[new_keys] = stack_plot[new_keys].apply(pd.to_numeric)
# stack_plot.columns = list(map(float, stack_plot.columns))
#
# dtype = {'col1': str, 'col2': float, 'col3': int}
# new_keys = data.index
# stack_plot = stack_final[np.array(list(map(float, new_keys)))]
# keys = stack_final.keys() # .astype(float)
# stack_final[keys[0:len(new_keys)]].name = np.array(list(map(float, new_keys)))
# stack_final.rename(columns=dict(zip(keys[0:len(new_keys)], np.array(list(map(float, new_keys))))))
# stack_final[keys[0:len(new_keys)]].astype("float")
# print('key thing')
else:
data = load_mat(load_version, name1, names, save_name)
else: # wenns man nicht auf Saschas Computer ist
data = load_mat(load_version, name1, names, save_name)
else: # hier laden wir die arrays extra um (spikes, isf, osf)
remove_old = False
# wenns auf Saschas Computer ist
name1 = name1.replace('.csv', load_type + '_.csv')
if version_comp != 'public':
# redo = False
if (not os.path.exists(name1)) | (redo == True): # if (redo == True) | (not os.path.exists(name1)):
# & (not os.path.exists(name0))
# try:
# if 'isi' not in add:
if 'pkl' in path_sascha:
data = pd.read_pickle(path_sascha) # pd.read_pickle(path),low_memory=False
else:
data = pd.read_csv(path_sascha, low_memory=False) # pd.read_pickle(path)
# frame_spikes = load_object_to_pandas(names, name1)
if len(cells) > 0:
data = data[data['cell'].isin(cells)]
# data
# except:
# embed()
# if len(cells) >0:
# model = model[model['cell'].isin(cells)] # & (model_cell.file_name == file)& (model_cell.power == power)]
dirs = os.listdir()
# if remove_old:
# for d in dirs:
# if (load_function in d) & ('pdf' not in d) & ('py' not in d):
# os.remove(d)
# todo irgendwas ist hier noch falsch
data_short = data[(data.trial_nr == trial_nr) & (data.amp == amp) & (
np.round(data.stimulus_length, 3) == np.round(stimulus_length, 3)) & (
data.file_name == file_name)]
# if len(data_short)> 0:
if (load_type == 'isf') | (load_type == 'osf'):
desired_val = get_array_from_pandas(data_short[load_type]) # 'isf'
else:
desired_val = get_array_from_pandas(data_short[load_type], abs=False) # 'isf'
if (load_type == 'spikes'): # (len(desired_val)<1) &
desired_val = load_spikes_RAM(amp, file_name, load_type, path_sascha)
# da machen wir die Burst corr spikes anders
# todo: hier kommt die burst correction
split_val = add.split('cell')[1]
cell = split_val.split('_')[0]
if 'burst' in add:
lim_here = find_lim_here(cell, burst_corr=add)
eod_fr = data.eod_fr.iloc[0]
spikes_all, isi, frs_calc, spikes_cont = load_spikes(desired_val, eod_fr, ms_factor=1)
if np.min(np.concatenate(isi)) < lim_here:
isi, desired_val, frs_calc2 = correct_burstiness(isi, desired_val,
[eod_fr] * len(desired_val),
[eod_fr] * len(desired_val), lim=lim_here,
burst_corr=add, ms_factor=1)
try:
data = save_csv_to_pandas(desired_val)
except:
print('saving thing')
embed()
# else:
# data = []
# data_short[load_type]
# data_short['isf']
# if 'osf' in data.keys():
# data = data.drop(['d_isf_all', 'd_osf_all',
# 'd_osf1', 'var_stim', 'var_spikes', 'brainregion', 'structure',
# 'species', 'dev', 'cell',
# 'mt_nr_last', 'way', 'loop',
# 'trials', 'freqs',
# 'freqs_idx'], axis=1) # 'osf', 'isf','nfft','sampling','file_name2'
# spikes = data.spikes
if len(data) > 0:
if version_comp == 'develop':
if 'model' in save_name:
data.to_csv(
load_function +
save_name.split('/')[-1].replace('calc_RAM_model-2_', '').split('model/')[
-1].replace(
'fft_o_forward_fft_i_forward_Hz_mV', '') + '.csv')
elif 'data' in save_name:
print('saved data')
if 'pkl' in load_version:
data.to_pickle(name1.replace('.csv', '.pkl')
) # load_function + save_name.split('/')[-1].replace('calc_RAM_model-2_', '').split('data/Noise/')[
# -1] + '_.pkl'
else:
data.to_csv(name1
) # load_function + save_name.split('/')[-1].replace('calc_RAM_model-2_', '').split('data/Noise/')[
# -1] + '_.csv'
# todo: spikes hier nochmal extra laden
if 'pkl' in load_version:
if os.path.exists(name1.replace('.csv', '.pkl')):
data = pd.read_pickle(name1.replace('.csv', '.pkl'))
else:
data = []
else:
if os.path.exists(name1):
data = pd.read_csv(name1, index_col=0)
data.columns = list(map(float, data.columns))
else:
data = []
# bei dem ganzen sind die Indices noch in str aber das converten wir später noch!
# todo: das womöglich ähnlich wie in dieser FUnktion schon einbauen
# nämlich mit new_keys, stack_plot = convert_csv_str_to_float(stack_final)
# new_keys = stack_plot.index
# new_keys = list(map(str, new_keys))
# stack_plot = stack_plot.astype(complex)
# stack_plot[new_keys] = stack_plot[new_keys].apply(pd.to_numeric)
# stack_plot.columns = list(map(float, stack_plot.columns))
#
# dtype = {'col1': str, 'col2': float, 'col3': int}
# new_keys = data.index
# stack_plot = stack_final[np.array(list(map(float, new_keys)))]
# keys = stack_final.keys() # .astype(float)
# stack_final[keys[0:len(new_keys)]].name = np.array(list(map(float, new_keys)))
# stack_final.rename(columns=dict(zip(keys[0:len(new_keys)], np.array(list(map(float, new_keys))))))
# stack_final[keys[0:len(new_keys)]].astype("float")
# print('key thing')
else:
try:
data = redo_load_spikes(load_version, name1)
except:
print('load something0')
embed() # data = data[data.index]
else: # wenns man nicht auf Saschas Computer ist
try:
data = redo_load_spikes(load_version, name1)
except:
print('load something0')
embed()
# embed()
# if len(data) < 1:
# print('data not there')
# embed()
# embed()
return data
def redo_load_spikes(load_version, name1):
if os.path.exists(name1):
if 'pkl' in load_version:
data = pd.read_pickle(name1.replace('.csv', '.pkl'))
else:
data = pd.read_csv(name1, index_col=0)
# try:
data.columns = list(map(float, data.columns))
# except:
# print('float something')
# embed()
test = False
if test:
data = pd.read_csv('../../work/code/suseptibility/' + name1, index_col=0)
else:
data = []
return data
def load_mat(load_version, name1, names, save_name):
if os.path.exists(name1):
if 'pkl' in load_version:
data = pd.read_pickle(name1.replace('.csv', '.pkl'))
else:
data = pd.read_csv(name1, index_col=0)
if 'data' in save_name:
data = load_object_to_pandas(names, name1, index_col=0)
else:
data = []
return data
def load_spikes_RAM(amp, file_name, load_type, path_sascha, data_name=None):
spikes_path = load_only_spikes_RAM(data_name=data_name, path_sascha=path_sascha)
spikes_data = pd.read_pickle(spikes_path)
spikes_data_short = spikes_data[(spikes_data.amp == amp) & (
spikes_data.file_name == file_name)]
desired_val = get_array_from_pandas(spikes_data_short[load_type], abs=False)
# embed()
return desired_val
def load_only_spikes_RAM(data_name='', emb=True, path_sascha='', core_name='calc_RAM_data_spikes__'):
if not data_name:
data_name = path_sascha.split('.pkl')[0].split('_')[-1]
spikes_path = load_folder_name('calc_RAM') + '/' + core_name + data_name + '.pkl'
if emb:
if not os.path.exists(spikes_path):
dated_up = update_ssh_file(spikes_path)
# embed()
return spikes_path
def save_csv_to_pandas(desired_val):
if len(np.shape(desired_val)) == 1:
# das ist vor allem relevant für die Spikes
data = pd.DataFrame()
length = []
for d in range(len(desired_val)):
length.append(len(desired_val[d]))
# try:
data[np.argmax(length)] = desired_val[np.argmax(length)]
# except:
# print('sp thing')
# embed()
for d in range(len(desired_val)):
data.loc[0:len(desired_val[d]) - 1, d] = desired_val[d]
# todo: das so fillen dass spikes mit verschiedener Länge reinpassen
else:
# embed()
try:
np.shape(desired_val)[1]
except:
print('shape thing')
embed()
if np.shape(desired_val)[1] > np.shape(desired_val)[0]:
data = pd.DataFrame(np.transpose(desired_val))
else:
data = pd.DataFrame(desired_val)
return data
def RAM_norm_data(D_c_sig, stack_plot, trials_stim, abs=True, power=2, stack_here=[]):
# if type(D_c_sig) != list:# das ist wenn wir das durch A teilen
# embed()
if len(stack_here) == 0:
# also 2*D*c ist die intensity und (2*D*c)**2 ist das power spectrum
power_sp = (2 * D_c_sig) ** 2
power_spektrum = ((2 * power_sp))
# * stimulus_length input_scaling* *300/2000*stimulus_length
norm = 1 / power_spektrum
# stack_plot = ((np.abs((stack_plot) * norm)) ** power / trials_stim) #
# stack_plot = ((np.abs((stack_plot) * norm)) ** 1 / trials_stim) #
try:
stack_plot = ((np.abs((stack_plot.astype('complex')) * norm)) ** 1 / trials_stim) #
except:
print('stack something')
embed()
else:
# hier teilen wir es durch etwas anders
# todo: das muss man noch adaptieren für andere varianten als Stimpresaved
# print('right norm')
try:
isf = get_isfs(stack_here)
except:
print('isf thing')
embed()
stack_plot = norm_suscept_whole(abs, isf, stack_here, stack_plot, trials_stim, power)
test = False
if test:
test_complex2()
# embed()
# ((np.abs((stack_plot) * norm)) ** 2 / trials_stim)
# stack_plot = ((np.abs((stack_plot) * norm)) ** power / trials_stim) #
return stack_plot
def norm_suscept_whole(abs, isf, stack_here, stack_plot, trials_stim, power=2):
mat_cut, mats_all_here = norm_suscept(abs, isf, stack_here, stack_plot, trials_stim, power)
test = False
if test:
test_complex()
# todo:hier stimmt halt nochwas nicht
# todo: hier noch die 0.5 sekunden einabuen
# embed()
try:
stack_plot = mats_all_here / (mat_cut) # norm_char2
except:
print('stack something')
embed()
return stack_plot
def norm_suscept(abs, isf, stack_here, stack_plot, trials_stim, power=2):
##############################################
# das ist ein power spectrum, ich nehme die Snippets und für jedes machen wir |fft|**2 und dann mitteln wir
# daraus generieren wir im nächsten Schritt die Werte
# also richtig ist es wenn power 2 ist
# todo: das mit den zweien noch überprüfen
# also analog zu dem oberen muss hier wohl das noch mal zwei genommen werden
# Die zwei da in norm_power ist richtig
# das stimmt schon so
# also hier mitteln wir über die power spectra
# die beiden scheinen das gleiche zu machen aber wir können das auch so rum machen
isf_mean = np.mean((np.abs(isf)) ** power, axis=0)
# isf = get_isfs(stack_here)
isf_mean = np.mean(isf * np.conj(isf), axis=0)
# embed()
# isf_mean = np.mean((2 * np.abs(isf)) ** power, axis=0)
norm_char2 = norm_power(isf_mean, stack_here, power=power)
# norm_char2 = norm_power(isf_mean, stack_here)
# norm_char22_dived = find_norm_susept(stack_here, isf_mean[f_range])
# norm_char2 = 1 / norm_char22_dived
# mats_all_here / norm_char2
# stack_here.isf
# stack_here.freqs
# stack_here.snippets #
# f, restrict = restrict_freqs(f_orig, max_f)
# norm_factor = 2*np.dot(D_c_sig[0])
# stack_plot = ((np.abs((stack_plot) * norm)) ** power / trials_stim) #
# stack_plot = ((np.abs((stack_plot) * norm)) ** 1 / trials_stim) #
# mat_to_div = stack_here[keys[keys < ends_nr[e]]]
# mat_to_div = mat_to_div.loc[mat_to_div.index < ends_nr[e]]
if abs: # DEFAULT! IMMER WAHR
mat_complex = np.abs((stack_plot.astype('complex')))
else:
mat_complex = (stack_plot.astype('complex'))
reload = False
# embed()
stim_len = float(stack_here.nfft.iloc[0].split('sec')[0])
if reload: # with the reload we can test if the presaved matrices are reproducable by the sole formula (they are)
deltat = 1 / stack_here.sampling.iloc[0]
osf = get_isfs(stack_here, isf_name='osf')
scales, cross_norm, f_mat2, mats_all_here_orig, mats_all_here_norm, norm_char3 = find_norm_compars(isf,
isf_mean,
osf,
deltat,
stack_plot)
# mat_cut = norm_char3[0:len(stack_plot), 0:len(stack_plot)]
mats_all_here = pd.DataFrame(np.abs(mats_all_here_orig) * stim_len, index=mat_complex.index,
columns=mat_complex.columns)
else:
mats_all_here = (mat_complex * stim_len) / (trials_stim)
# embed()
mat_cut = norm_char2[0:len(stack_plot), 0:len(stack_plot)]
return mat_cut, mats_all_here
def norm_power(isf_mean, stack_here, power=2):
f_range = np.arange(len(stack_here))
power_isf_1, power_isf_2 = find_isf_matrices(stack_here, isf_mean[f_range])
# und hier ist die zwei davor weil das halt so in der formel ist!
norm_char2 = (2 * np.array(np.abs(power_isf_1) ** (2 / power) * np.abs(power_isf_2) ** (2 / power)))
return norm_char2
def get_isfs(stack_here, isf_name='isf'):
isf = stack_here[isf_name]
isf = isf.dropna()
isf = np.concatenate(np.array(isf))
return isf
def colorbar_outside(ax, im, fig=None, add=0, round_digit=2, shrink=0.6, delta=None, width=0.02, pos_axis='bottom',
orientation='top',
top=False, ticks=True, ls=None): # ls = 8
if not fig:
fig = plt.gcf()
if top:
# embed()
cbar, left, bottom, width, height = colorbar_body(ax, plt.gcf(), im, add=add, orientation=orientation,
pos_axis=pos_axis, ls=ls, ticks=ticks)
cbar.outline.set_visible(False)
else:
pos = ax.get_position() # [[xmin, ymin], [xmax, ymax]].
pos = np.array(pos)
xmin = pos[0][0]
ymin = pos[0][1]
xmax = pos[1][0]
ymax = pos[1][1]
# embed()
left = xmax + 0.005
bottom = ymax + 0.06 + add # 85
bottom = ymin # - 0.076+add#85
height = (ymax - ymin)
cbar_ax = fig.add_axes([left, bottom, width, height]) # [left, bottom, width, height
cbar_ax.xaxis.set_label_position(pos_axis)
cbar = fig.colorbar(im, orientation="vertical", cax=cbar_ax, shrink=shrink) # v
cbar.outline.set_visible(False)
ticks_direction = 'in'
# embed()
if ticks:
if ls:
cbar_ax.tick_params(labelsize=ls, direction=ticks_direction) # , length=5
else:
cbar_ax.tick_params(direction=ticks_direction)
im.get_clim()
#
if delta:
# embed()
# round_digit = 1
range_here = np.arange(np.round(im.get_clim()[0], round_digit),
np.round(im.get_clim()[-1], round_digit) + delta / 2, delta)
range_here = np.round(range_here, round_digit)
zero = int(0)
try:
range_here[np.where(range_here == 0)[0][0]] = np.round(zero)
except:
print('range something')
embed()
# range_here[np.where(range_here == 0)[0][0]].as_type(int) = #int(range_here[np.where(range_here == 0)[0][0]])
# embed()
cbar.ax.set_yticks(range_here)
# cbar.ax.set_yticklabels(range_here)
return cbar, left, bottom, width, height
def colorbar_body(ax, fig, im, orientation='top', ticks=True, ls=8, add=0, pos_axis='bottom'):
# embed()
pos = ax.get_position() # [[xmin, ymin], [xmax, ymax]].
pos = np.array(pos)
xmin = pos[0][0]
ymin = pos[0][1]
xmax = pos[1][0]
ymax = pos[1][1]
left = xmin
if orientation == 'top':
bottom = ymax + 0.06 + add # 85
elif orientation == 'bottom':
bottom = ymin - 0.076 + add # 85
width = (xmax - xmin)
height = 0.01
cbar_ax = fig.add_axes([left, bottom, width, height]) # [left, bottom, width, height
cbar_ax.xaxis.set_label_position(pos_axis)
cbar_ax.set_xticklabels(cbar_ax.get_xticklabels(), rotation='vertical')
cbar_ax.tick_params(labelsize=6)
cbar = fig.colorbar(im, orientation="horizontal", cax=cbar_ax)
ticks_direction = 'in'
# embed()
if ticks:
if ls:
cbar_ax.tick_params(labelsize=ls, direction=ticks_direction) # , length=5
else:
cbar_ax.tick_params(direction=ticks_direction) # , length=5
return cbar, left, bottom, width, height
def started_from_makefile():
# Get the current process
current_process = psutil.Process()
# Get the parent process
parent_process = current_process.parent()
# Check if the parent process is 'make'
return "make" in parent_process.name().lower()
def save_sheme_representation(show=False, show_pdf2=True, emb=False, add='', pdf=False, png=True, svg=False, jpg=False):
if 'miniconda3' in inspect.stack()[1][1]:
module_name = inspect.stack()[-16][1].split('/')[-1].split('.')[0]
last_function = inspect.stack()[-16][4][0].split('(')[0].strip()
if show == []:
show = True
#
else:
module_name = inspect.stack()[-1][1].split('\\')[-1].split('.')[0]
last_function = inspect.stack()[-1][4][0].split('(')[0].strip()
list = []
next = False
last_func_true = False
function_name = []
for i in range(len(inspect.stack())):
save_name = inspect.stack()[i][3]
# print(save_name)
list.append(save_name)
if emb:
embed()
if last_func_true:
if save_name != '<module>':
function_name.extend(save_name)
# last_func_true
if next == True:
last_function = save_name
last_func_true = True
# else:
# last_func_true = False
if save_name == 'save_sheme_representation': # 'save_visualization':
next = True
else:
next = False
if len(function_name) < 1:
function_name = ''
else:
function_name = "".join(function_name)
if show == []:
show = False
# if not os.path.isdir(module_name):
# try:
# os.mkdir(module_name)
# except:
# print('mkdir thing')
# embed()
version_comp, subfolder, mod_name_slash, mod_name, subfolder_structure = find_code_vs_not()
# embed()
# add_on_vis# = add_on_vis + '-' + mod_name
seperate_py_files = find_separate_files(mod_name_slash, module_name)
# function_name2 = function_name
if seperate_py_files:
# also falls wir unterschiedliche .py files haben wollen wir das einmal
# in dem höheren directory und zwar dort mit den FUnktions namen gar nicht mit dem
# .py File denn hier interessiert uns das erstmal gar nicht so wo das ist
# erst speichern wir das mit dem MODUL NAMEN nicht
# NICHT mit dem funktionsnamen
# embed() subfolder != ''
if (version_comp == 'public') | (
version_comp == 'develop'): # das brauchen wir nur wenn wir in den Subfoldern sind
# embed()
try:
plt.savefig(module_name + '.pdf')
except:
# p = subprocess.Popen([module_name+'.pdf'], shell=True)
# p.kill()
# embed()
# ok das ist jetzt noch suboptimal die ganze Anwendung zu schließen
# aber dann muss ich mir halt was überelgen
if (version_comp != 'public'):
os.system('taskkill /f /im Acrobat.exe')
plt.savefig(module_name + '.pdf')
if version_comp != 'public': #
plt.savefig(module_name + '.png')
##########
# ich hab keine Lust die PDFs immer händisch aufzummachen also mache ich sie auf wenn
# ich show als true hatte
if version_comp != 'public': #
# embed()
# if show_pdf:
########
# check if this process has been started form a makefile
makestarted = started_from_makefile()
if makestarted:
show_pdf2 = False
# embed()
# here it might be that I update this as a global variable
dict_new = dict_default()
if 'show_pdf2' in dict_new.keys():
show_pdf2 = dict_new['show_pdf2']
# locals().update(dict_new)
# locals().update(dict_new)
print(show_pdf2)
# embed()
if show_pdf2:
subprocess.Popen([module_name + '.pdf'], shell=True)
if (version_comp == 'public') | (version_comp == 'develop'): #
# also hier wollen wir dass der Funktion name gleich bleibt
# function_name = module_name
# aber wir wollen das das Modul Name sich eben anpass
# und zwar nicht auf das File was wir haben sondern auf
# das Modul
# das was wir hier als png speichern da änder sich der name auf dne modul namen
function_name2 = module_name
module_name = 'plt_' + mod_name_slash.replace('/', '')
# embed()
else:
# sonst speichern wir das als Funktion, wenn es nur ein Modul gibt
plt.savefig(function_name + '.pdf')
if version_comp != 'public': #
plt.savefig(function_name + '.png')
# also hier wollen wir dass der Funktion name gleich bleibt
# function_name = module_name
module_name = 'plt_' + mod_name_slash.replace('/', '')
###########################
# ich mache das nur auf meinen Rechner
# beim Teilen gibts die ja nicht!
if version_comp != 'public': #
# embed()
last_function = last_function.replace('-save_all', '').replace('save_all', '')
# save_visualization
last_function = last_function.replace('-save_visualization', '').replace('save_visualization', '')
if not os.path.exists(subfolder + 'visualize_all'):
os.mkdir(subfolder + 'visualize_all')
if not os.path.exists(subfolder + module_name):
os.mkdir(subfolder + module_name)
if not os.path.exists(subfolder + module_name + '/pdf_folder'):
os.mkdir(subfolder + module_name + '/pdf_folder')
# gca = plt.gca()
# fig_there = plt.get_fignums()
fig = plt.gcf()
fig_there = fig.get_axes()
# ax.get_data()
# embed()
if png:
# embed()
if add != '':
path = subfolder + module_name + '/' + function_name + last_function + '_' + add + '.png'
path_pdf = subfolder + module_name + '/pdf_folder/' + function_name + last_function + '_' + add + '.pdf'
path_pdf_png = subfolder + module_name + '/pdf_folder/' + function_name + last_function + '_' + add + '.png'
if (len(fig_there) > 0) | (not os.path.exists(path)):
if not os.path.exists(path_pdf):
plt.savefig(path_pdf)
plt.savefig(path_pdf_png)
plt.savefig(path)
plt.savefig(
subfolder + 'visualize_all/' + mod_name + module_name + '-' + function_name + last_function + '_' + add + '.png')
if subfolder_structure != '':
# if not os.path.exists(path):
# os.mkdir(path)
plt.savefig(path_pdf)
else:
path = subfolder + module_name + '/' + function_name + last_function + '.png'
path_pdf = subfolder + module_name + '/pdf_folder/' + function_name + last_function + '.pdf'
path_pdf_png = subfolder + module_name + '/pdf_folder/' + function_name + last_function + '.png'
# embed()
if (len(fig_there) > 0) | (not os.path.exists(path)):
if not os.path.exists(path_pdf):
# embed()
plt.savefig(path_pdf)
plt.savefig(path_pdf_png) # So behalte ich eine Repräsentation im ursprünglichen Folder
plt.savefig(path)
# embed()
plt.savefig(
subfolder + 'visualize_all/' + module_name + '-' + function_name + last_function + '.png')
if subfolder_structure != '': # hier speicher ichs auch noch mal in den neuen Folder
if not os.path.exists(path_pdf):
os.mkdir(path_pdf)
plt.savefig(path_pdf)
# except:
# print('pdf thing')
# embed()
##############################
# also ich will dass es nur drüber plottet wenn es ein figure gibt!
path1 = module_name + '/' + function_name + last_function + add
path2 = 'visualize_all/' + mod_name + module_name + '-' + function_name + last_function + add
if pdf:
if (len(fig_there) > 0) | (not os.path.exists(subfolder + path1 + '.pdf')):
plt.savefig(subfolder + path1 + '.pdf')
plt.savefig(subfolder + path2 + '.pdf')
if svg:
if (len(fig_there) > 0) | (not os.path.exists(subfolder + path1 + '.svg')):
plt.savefig(subfolder + path1 + '.svg')
plt.savefig(subfolder + path2 + '.svg')
if jpg:
if (len(fig_there) > 0) | (not os.path.exists(subfolder + path1 + '.jpg')):
plt.savefig(subfolder_structure + path1 + '.jpg')
plt.savefig(subfolder + path2 + '.jpg')
# embed()
# if show:
# plt.show()
test = False
if test:
test_parser()
def find_separate_files(mod_name_slash, module_name):
# wenn das File den gleichen Namen hat wie der Ordner dann ist das ein File
# sonst gehen wir von separaten Files aus
if module_name == mod_name_slash.replace('/', ''):
seperate_py_files = False
else:
seperate_py_files = True
return seperate_py_files
def global_params():
# embed()
print('')
# if show_global:
# global show_global
def save_visualization(individual_tag='', show=True, save_sheme=True, show_anything=True, svg=False, pdf=False, add='',
jpg=False, png=True, emb=False, counter_contrast=0,
savename=''):
# embed()
dict_new = dict_default()
locals().update(dict_new)
# if 'show_global' in globals():
# show = show_global
# embed()
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
# plt.close()
# plt.figure()
if save_sheme:
save_sheme_representation(add=add) # show=False
# embed()
if savename == '':
if 'miniconda3' in inspect.stack()[1][1]:
module_name = inspect.stack()[-16][1].split('/')[-1].split('.')[0]
last_function = inspect.stack()[-16][4][0].split('(')[0].strip()
# show = True
else:
module_name = inspect.stack()[-1][1].split('\\')[-1].split('.')[0]
last_function = inspect.stack()[-1][4][0].split('(')[0].strip()
list = []
next = True
for i in range(len(inspect.stack())):
save_name = inspect.stack()[i][3]
# print(save_name)
list.append(save_name)
if next == True:
last_function = save_name
if save_name == 'save_visualization': # 'save_all'
next = True
else:
next = False
if emb:
embed()
# show = True
seperate_py_files = find_separate_files(mod_name_slash, module_name)
if version_comp != 'public':
# embed()
if subfolder_path != '': #
module_name = '../plt_' + mod_name_slash.replace('/', '')
# # das ist jetzt so wie wir es bis jetzt gemacht haben, also immer unterchiedliche .py files
# # aber vielleicht finde ich das auch gut das zusammen zuführen in eines
# #if not seperate_py_files:
# #embed()
# save_folder_structure(add, subfolder_path, counter_contrast, individual_tag, jpg, last_function, module_name,
# pdf, png, svg)
# last_function = module_name
# #else: # für den Fall dass ich das doch alles in so ein großes File fusioniere, was vielleicht einfacher wäre
# else:
save_folder_structure(add, '', individual_tag, jpg, last_function,
module_name, pdf, png, svg)
# elif subfolder_path == '': #also das ist für den Fall das wir in dem Parent folder Ordner sind
# save_folder_structure(add, subfolder_path, counter_contrast, individual_tag, jpg, last_function, module_name, pdf, png, svg)
else:
if counter_contrast == 0:
savename_new = savename
else:
savename_new = savename[0:-4] + str(counter_contrast) + savename[
-4::]
plt.savefig(savename_new)
# if version_comp != 'public':
# t1 = time.time()
# t2 = time.time() - t1
# print(f'save time {t2}')
counter_contrast += 1
t1 = time.time()
# embed()
# wenn wir in einer Funktion sind und mehrmals plotten wollen wir ja weder dazwischen dass es das zeigt oder es schließt!
if show_anything:
# nur plotten wennn wir auf Windows sind!
if os.name == 'nt':
# embed()
if show:
# nur in dem Parent folder in dem anderen schauen wir es direkt als PDF an
if subfolder_path == '': #
# print('somehow show')
# embed()
plt.show()
else:
plt.close()
else:
plt.close()
# ich finde das hilfreich, brauchen alle anderen ja nicht unbedingt
if version_comp != 'public':
t2 = time.time() - t1
print(f'plot time {t2}')
# embed()
return last_function
def save_folder_structure(add, add_on, individual_tag, jpg, last_function, module_name, pdf, png,
svg):
print(add_on + module_name) # initial_function+ initial_function+
# if initial_function != 'plt_ROC':
# embed()
# embed()
t1 = time.time()
if not os.path.isdir(add_on + module_name):
os.mkdir(add_on + module_name)
if not os.path.isdir(add_on + module_name + '/' + last_function):
# embed()
os.mkdir(add_on + module_name + '/' + last_function)
# diese Detailed Sachen machen wir nicht mehr!
# if not os.path.isdir(add_on + module_name + '/' + '_detailed'):
# os.mkdir(add_on + module_name + '/' + '_detailed')
#######################################################
# save all figures in the function folder
# embed()
if individual_tag == '':
individual_tag_here = last_function
else:
individual_tag_here = individual_tag
if png:
try:
plt.savefig(add_on + module_name + '/' + last_function + '/' + individual_tag_here + add + '.png')
except:
try:
plt.savefig(add_on + module_name + '/' + last_function + '/' + individual_tag_here + add + '.png')
except:
print('saving thing')
print(add_on + module_name + '/' + last_function + '/' + individual_tag_here + add + '.png')
try:
plt.savefig(add_on + module_name + '/' + last_function + '/' + individual_tag_here + add + '.png')
except:
print('still plotting thing')
embed()
if pdf:
plt.savefig(add_on + module_name + '/' + last_function + '/' + individual_tag_here + add + '.pdf')
if jpg:
plt.savefig(add_on + module_name + '/' + last_function + '/' + individual_tag_here + add + '.jpg')
if svg:
plt.savefig(add_on + module_name + '/' + last_function + '/' + individual_tag_here + add + '.svg')
#######################################################
# dave detailed all figures in one folder
# embed()
# diese Detailed Sachen machen wir nicht mehr!
# if png:
# plt.savefig(add_on + module_name + '/' + '_detailed/' + last_function + '_' + individual_tag + add + '.png')
# if pdf:
# plt.savefig(add_on + module_name + '/' + '_detailed/' + last_function + '_' + individual_tag + add + '.pdf')
# if jpg:
# plt.savefig(add_on + module_name + '/' + '_detailed/' + last_function + '_' + individual_tag + add + '.jpg')
# if png:+initial_function+
# plt.savefig(initial_function + '_detailed/' + last_function + individual_tag + add + '.png')
# if pdf:
# plt.savefig(initial_function + '_detailed/' + last_function + individual_tag + add + '.pdf')
# if jpg:
# plt.savefig(initial_function + '_detailed/' + last_function + individual_tag + add + '.jpg')
# as save visualization
# if pdf:
# plt.savefig(initial_function + '/' + last_function + add + '.pdf')
# if jpg:
# plt.savefig(initial_function + '/' + last_function + add + '.jpg')
t2 = time.time() - t1
print(f'save time {t2}')
# counter_contrast += 1
# embed()
# plt.savefig(
def calc_base_reclassification(names: object = ['calc_base/calc_base_data-base_frame_firingrate_.pkl',
'calc_base/calc_base_data-base_frame.pkl'],
cell_type='cell_type_info'): # -> object:
# classificaiton based on baseline
for name in names:
if 'pkl' in name:
frame = pd.read_pickle(name)
else:
frame = pd.read_csv(name) # , index_col=0 # das mit dem index_col könnte der Grund sein!
# frame = pd.read_csv(load_folder_name('calc_base')+'/calc_base_data-base_frame_overview.csv')
# frame1 = pd.read_csv(name)
# frames = [frame, frames1]
# for frame in frames:
# cell_types = frame['cell_type_info'].unique()
frame = unify_cell_names(frame, cell_type=cell_type)
try:
frame['cell_type_reclassified'] = frame[cell_type] # 'cell_type_info'
except:
print('cell type thing')
embed()
# cells = frame[((frame.gwn == True) | (frame.fs == True))].cell.unique()
# '2021-08-02-aa-invivo-1'
# stay
# frame[frame['cell'] == '2013-04-16-ac-invivo-1']['cell_type_reclassified'] = ' Ampullary'
# change
# frame.dropna#'2010-07-29-ag-invivo-1': ' Ampullary', '2010-06-21-ai-invivo-1': ' P-unit',
# '2010-07-13-au-invivo-1': ' P-unit_problem','2010-07-29-ae-invivo-1': 'unkown',
# '2010-07-13-ae-invivo-1': ' P-unit_problem',#2012-03-20-ad
cells_changed = {'2010-04-16-af-invivo-1': ' unknown',
'2010-05-21-ae': ' unknown',
"2010-05-07-aq": ' P-unit_problem',
'2010-06-15-ap-invivo-1': ' Ampullary',
'2010-06-15-aq-invivo-1': ' Ampullary',
'2010-06-15-am-invivo-1': ' Ampullary',
'2010-07-08-ah-invivo-1': 'unkown',
'2010-07-08-ah-invivo-1': 'unkown_problem',
'2010-07-08-af-invivo-1': ' P-unit',
'2010-07-08-ag-invivo-1': ' P-unit_problem',
'2010-07-13-aa-invivo-1': ' P-unit',
'2010-07-13-an-invivo-1': ' P-unit',
'2010-07-13-ao-invivo-1': ' P-unit',
'2010-07-13-au-invivo-1': 'unkown',
'2010-07-13-bb-invivo-1': ' P-unit_problem',
'2010-07-13-am-invivo-1': ' P-unit',
'2010-07-29-ac-invivo-1': ' P-unit',
'2010-07-29-af-invivo-1': ' P-unit',
'2010-07-29-ae-invivo-1': ' P-unit_problem',
'2010-07-29-ae': ' P-unit_problem',
'2010-10-07-ae-invivo-1': ' P-unit_problem',
'2010-08-11-ae-invivo-1': ' P-unit',
'2010-08-11-ao-invivo-1': ' P-unit',
'2010-08-25-ac-invivo-1': ' P-unit',
'2010-08-27-ad-invivo-1': ' P-unit',
'2010-08-31-ae-invivo-1': ' P-unit',
'2010-08-31-af-invivo-1': ' P-unit',
'2010-11-08-ad-invivo-1': ' P-unit',
'2010-11-11-ab-invivo-1': ' P-unit',
'2010-11-26-ai-invivo-1': ' Ampullary_problem',
'2010-11-26-ai-invivo-1': ' P-unit',
'2011-02-18-ab-invivo-1': ' Ampullary',
'2011-02-18-ac-invivo-1': ' P-unit_problem',
'2011-11-04-ab-invivo-1': 'unkown',
'2011-11-06-aa-invivo-1': 'unkown',
'2011-11-06-ab-invivo-1': 'unkown',
'2011-11-06-ag-invivo-1': 'unkown',
'2011-11-10-ad-invivo-1': 'unkown',
'2011-11-21-ai': ' P-unit_problem',
'2012-01-11-ab': ' T-unit',
'2012-01-17-ad': ' Ampullary',
'2012-01-17-ag': ' Ampullary',
'2012-01-31-aa': 'unkown',
'2012-02-21-af': ' Ampullary',
'2012-02-21-ah': ' unknown',
'2012-02-23-ae': ' unknown',
'2012-02-23-am': ' unknown',
'2012-02-23-aj': ' unknown',
'2012-02-23-ar': ' unknown',
'2012-02-23-ag-invivo-1': ' Ampullary',
'2012-02-23-aq': 'unkown',
'2012-02-28-ar': ' unknown',
'2012-03-09-ad': ' unknown',
'2012-03-09-af': ' Ampullary',
'2012-03-09-ag': ' Ampullary',
'2012-02-13-aw': 'unkown',
'2012-02-23-ak': 'unkown',
'2012-02-23-al': 'unkown',
'2012-02-28-aq': 'unkown',
'2012-03-13-aw': ' unknown',
'2012-03-20-ad': ' Ampullary',
'2012-03-20-ae': ' Ampullary',
'2012-03-20-ah': 'unkown',
'2012-03-20-al': 'unkown',
'2012-04-20-ae': 'Ampullary_problem',
'2012-04-20-ae-invivo-1': 'Ampullary_problem',
'2012-05-15-aa-invivo-1': ' Ampullary',
'2012-06-08-ak-invivo-1': ' P-unit',
'2012-06-08-ah-invivo-1': 'unknown',
'2012-06-27-ad-invivo-1': ' P-unit_problem',
'2012-06-27-ai-invivo-1': ' P-unit_problem',
'2012-06-27-am-invivo-1': ' P-unit_problem',
'2012-06-28-ad-invivo-1': 'unknown',
'2012-07-03-ah-invivo-1': 'unknown',
'2012-07-11-ak-invivo-1': ' Ampullary',
'2012-07-11-ag-invivo-1': ' P-unit_problem',
'2012-07-11-aj-invivo-1': ' P-unit_problem',
'2012-07-03-ac-invivo-1': ' P-unit',
'2012-07-03-ad-invivo-1': ' P-unit',
'2012-07-12-ae-invivo-1': 'unkown',
'2012-10-23-ae': ' P-unit_problem',
'2012-10-25-ae': ' P-unit_problem',
'2012-10-26-ab': ' P-unit_problem',
'2012-10-30-al': ' P-unit_problem',
'2012-10-25-aj': ' unknown',
'2012-10-25-af': 'unkown',
'2012-10-25-aw': 'unkown',
'2012-10-26-ad': 'unkown',
'2012-10-26-ag': 'unkown',
'2012-10-26-al': ' P-unit',
'2012-10-26-am': ' T-unit',
'2012-10-26-ac': ' unknown',
'2012-10-26-af': ' Ampullary',
'2012-10-30-ab': ' unknown',
'2012-10-30-ac': ' unknown',
'2012-10-30-am': ' T-unit',
'2012-11-04-ab': 'unkown',
'2012-11-04-ab': ' T-unit',
'2012-11-06-ae': 'unkown',
'2012-11-06-ag': 'unkown',
'2012-11-07-ai': 'unkown',
'2012-11-20-ab-invivo-1': ' P-unit',
'2012-11-20-ab-invivo-1': ' P-unit',
'2012-12-12-ad-invivo-1': ' Ampullary_problem',
'2012-12-12-ae-invivo-1': ' P-unit',
'2012-12-12-af-invivo-1': ' P-unit',
'2012-12-12-ag-invivo-1': ' P-unit',
'2012-12-13-ab-invivo-1': ' P-unit',
'2013-04-10-ab-invivo-1': 'unkown',
'2013-04-16-ad-invivo-1': 'unkown',
'2014-01-16-ae-invivo-1': ' P-unit',
'2013-04-16-ag-invivo-1': ' P-unit_problem',
'2014-01-16-ai-invivo-1': ' Ampullary',
'2014-01-23-ab-invivo-1': ' P-unit_problem',
'2014-05-21-ab-invivo-1': ' P-unit',
'2014-05-21-ae-invivo-1': ' P-unit_problem',
'2014-05-21-af-invivo-1': ' P-unit',
'2014-06-06-ah-invivo-1': 'unkown',
'2014-07-17-am-invivo-1': ' P-unit_problem',
'2014-07-23-af-invivo-1': ' P-unit_problem',
'2014-01-23-af-invivo-1': 'unkown',
'2014-03-19-aa-invivo-1': ' P-unit',
'2014-03-19-ab-invivo-1': ' P-unit',
'2014-03-19-ac-invivo-1': ' P-unit',
'2014-12-11-an-invivo-1': ' P-unit',
'2017-10-25-af-invivo-1': ' P-unit',
'2017-07-18-aa-invivo-1': ' P-unit_problem',
'2017-08-15-af-invivo-1': ' P-unit_problem',
'2018-01-10-af-invivo-1': ' P-unit_problem',
'2018-01-10-ag-invivo-1': ' P-unit_problem',
'2018-01-12-ah-invivo-1': ' P-unit_problem',
'2018-01-12-aj-invivo-1': ' P-unit_problem',
'2018-01-16-aa-invivo-1': 'unkown',
'2018-01-17-ac-invivo-1': ' P-unit_problem',
'2018-01-19-ao-invivo-1': ' P-unit_problem',
'2018-03-21-aa-invivo-1': ' P-unit_problem',
'2018-03-28-ab-invivo-1': 'unkown',
'2018-05-08-ag-invivo-1': ' P-unit_problem',
'2018-05-17-ab-invivo-1': ' P-unit_problem',
'2018-06-26-aa-invivo-1': 'unkown',
'2018-07-26-ab-invivo-1': ' P-unit_problem',
'2018-07-26-ak-invivo-1': ' P-unit_problem',
'2018-08-24-ab-invivo-1': ' P-unit_problem',
'2018-09-06-aq-invivo-1': ' P-unit',
'2018-09-25-aa-invivo-1': ' P-unit_problem',
'2018-09-25-ab-invivo-1': ' P-unit_problem',
'2018-09-25-ac-invivo-1': ' P-unit_problem',
'2018-09-25-ad-invivo-1': ' P-unit_problem',
'2018-09-28-aa-invivo-1': ' P-unit_problem',
'2018-11-09-aa-invivo-1': 'unkown',
'2018-11-09-ac-invivo-1': ' P-unit_problem',
'2018-11-14-ae-invivo-1': 'unkown',
'2018-12-17-af-invivo-1': ' P-unit_problem',
'2018-12-21-ac-invivo-1': ' Ampullary',
'2019-05-15-ai-invivo-1': 'unkown',
'2019-05-22-aa-invivo-1': 'unkown',
'2019-05-22-ab-invivo-1': 'unkown',
'2019-06-28-ag-invivo-1': ' Ampullary',
'2019-06-28-ah-invivo-1': 'unkown',
'2019-02-26-aa-invivo-1': 'unkown',
'2019-02-26-ab-invivo-1': 'unkown',
'2019-02-26-ac-invivo-1': 'unkown',
'2019-02-26-ad-invivo-1': 'unkown',
'2019-02-26-ae-invivo-1': 'unkown',
'2019-02-26-af-invivo-1': 'unkown',
'2019-02-26-ag-invivo-1': 'unkown',
'2019-02-26-ah-invivo-1': 'unkown',
'2019-02-26-ai-invivo-1': 'unkown',
'2019-05-07-bp-invivo-1': 'unkown',
'2019-05-15-af-invivo-1': 'unkown',
'2019-09-10-af-invivo-1': 'unkown',
'2019-09-11-ae-invivo-1': ' P-unit',
'2019-09-28-ag-invivo-1': 'unkown',
'2019-06-28-ab-invivo-1': 'unkown',
'2019-06-28-ag-invivo-1': 'unkown',
'2019-10-21-ak-invivo-1': 'unkown',
'2019-10-21-an-invivo-1': 'unkown',
'2020-07-24-ad-invivo-1': 'unkown',
'2020-10-29-aa-invivo-1': ' P-unit_problem',
'2020-10-20-ac-invivo-1': ' P-unit_problem',
'2021-06-18-aa-invivo-1': 'unkown',
'2021-06-18-ac-invivo-1': 'unkown',
"2021-06-18-ae-invivo-1": ' P-unit',
'2021-08-02-ad-invivo-1': ' P-unit_problem',
'2021-08-03-aa-invivo-1': ' P-unit',
'2021-08-03-ab-invivo-1': ' P-unit',
'2021-08-03-ae-invivo-1': ' P-unit',
'2021-11-05-ai-invivo-1': 'unkown',
'2021-12-17-aa-invivo-1': 'unkown',
'2022-01-06-ad-invivo-1': 'unkown',
'2022-01-08-aa-invivo-1': ' P-unit',
'2022-01-26-aa-invivo-1': ' Pyramidal',
'2022-01-26-ab-invivo-1': ' Pyramidal',
'2022-01-26-ac-invivo-1': ' Pyramidal',
'2022-02-07-am-invivo-1': ' Pyramidal',
'2022-02-07-ag-invivo-1': ' Pyramidal',
'2022-02-07-aj-invivo-1': ' Pyramidal',
'2022-02-07-ak-invivo-1': ' Pyramidal',
'2022-02-07-al-invivo-1': ' Pyramidal',
'2022-02-08-ao-invivo-1': ' P-unit',
'2022-02-08-aj-invivo-1': ' Pyramidal',
'2022-02-10-ad-invivo-1': ' P-unit_problem',
} # '2018-08-14-af-invivo-1': ' P-unit_problem',
# '2018-09-05-aj-invivo-1': ' P-unit_problem',
# '2022-01-08-ah-invivo-1': ' P-unit_problem'todo: die letzten drei habe ich ausgeschlossen weil da etwas mit cv_stim nicht stimmt!
# '2018-09-06-aq-invivo-1': ' P-unit_problem','2022-01-27-aa-invivo-1': Ok das doch rausnhemen einfach ein viel zu hoher CV!!!! '2013-04-16-ag-invivo-1'' P-unit_problem','2013-04-16-ag-invivo-1': ' P-unit_problem',
for cell_change in cells_changed:
indeces = frame[frame['cell'] == cell_change].index # ['cell_type_reclassified'] = [' Ampullary']*2
frame.loc[indeces, 'cell_type_reclassified'] = cells_changed[cell_change]
# cell_type_reclassified
# frame.loc[indeces, 'cell_type_info'] = cells_changed[cell_change]
if 'pkl' in name:
frame.to_pickle(name)
else:
frame.to_csv(name)
# embed()
# frame.to_csv(load_folder_name('calc_base')+'/calc_base_data-base_frame_overview.csv')
# frame[frame['cell'] == '2022-02-10-af-invivo-1']['cell_type_reclassified'] = ' Pyramidal'
def load_cv_table(path_sascha='', name='calc_base_data-base_frame_overview.csv',
redo=False): # ../calc_base/calc_base_data-base_frame.pkl
if path_sascha == '':
path_sascha = load_folder_name('calc_base') + '/' + name
path_here = name
# embed()
if os.path.exists(path_sascha):
# hier machen wir nur das resaven des frames
if '.pkl' in path_sascha:
try:
# embed()
frame = pd.read_pickle(path_sascha)
except:
frame = pd.read_pickle(path_sascha.replace('.pkl', '_long.pkl'))
# print('pickling error')
# embed()
else:
frame = pd.read_csv(path_sascha, index_col=0)
if ('cell_type_reclassified' not in frame.keys()) | (redo == True):
print('redid reclassfying')
calc_base_reclassification(names=[path_sascha])
if '.pkl' in path_sascha:
try:
frame = pd.read_pickle(path_sascha)
except:
frame = pd.read_pickle(path_sascha.replace('.pkl', '_long.pkl'))
else:
frame = pd.read_csv(path_sascha, index_col=0)
# embed()
# frame.to_pickle(path_here)
# quit
# embed()
# else:
# #embed()
# if os.path.exists(path_here):
# frame = pd.read_pickle(path_here)
# else:
try:
frame
except:
print('frame there')
embed()
# embed()
return frame
def unify_cell_names(frame, cell_type='cell_type_info'):
frame = frame.reset_index(drop=True)
for r in range(len(frame)):
try:
frame.loc[r, cell_type]
except:
print('somehow not working1')
embed()
if frame.loc[r, cell_type] in [' Eigenmannia virescens', 'unknown', 'unkown', ' unkown', float('nan'),
' unknow', ' Unknown', ' unknown',
' No idea', ' another funny cell', ' funny double spiker']:
frame.loc[r, cell_type] = 'unkown'
elif frame.loc[r, cell_type] in [' Ampullary', ' A-unit']:
frame.loc[r, cell_type] = ' Ampullary'
elif frame.loc[r, cell_type] in ['Ampullary_problem', ' Ampullary_problem']:
frame.loc[r, cell_type] = ' Ampullary_problem'
elif frame.loc[r, cell_type] in ['pyramidal', ' Ovoid', ' Ovoid?', ' I-cell', ' I-Cell', ' E-cell', ' E-Cell',
' E-cell deep', ' E-cell superficial', ' P unit or Pyramidal', ' Pyramidal']:
frame.loc[r, cell_type] = ' Pyramidal'
elif type(frame[cell_type].iloc[r]) == float:
# embed()
# if np.isnan(frame[cell_type].iloc[r]) == float('nan'):#
frame.loc[r, cell_type] = 'unkown'
# print('float thing')
elif frame.loc[r, cell_type] in ['unkown_problem', ' unknown_problem']: # frame.loc[r,cell_type]
frame.loc[r, cell_type] = ' unknown_problem'
elif frame.loc[r, cell_type] in [' P-unit', 'p-unit', ' P-unit ?']:
frame.loc[r, cell_type] = ' P-unit'
a = 0
elif frame.loc[r, cell_type] in [' T-unit']:
a = 0
elif frame.loc[r, cell_type] in [' P-unit_problem']:
frame.loc[r, cell_type] = ' P-unit_problem'
else:
print('cell type names not in list yet')
embed()
return frame
def colors_overview(): # 'tab:orange''tab:blue'
colors = {'unkown': 'grey',
' unkown': 'grey',
' unknown': 'grey',
'unkown_problem': 'grey',
' P-unit': 'tab:purple',
' P-unit_problem': 'cyan',
' unknown_problem': 'darkgrey',
' Ampullary_talk': 'tab:green',
' P-unit_talk': 'tab:purple',
' eigen_P-unit_talk': 'tab:blue',
' Ampullary': 'tab:green',
' Ampullary_problem': 'lightgreen',
'nan': 'grey',
' T-unit': 'purple',
' E-cell': 'red',
' Pyramidal': 'darkred',
' I-cell': 'pink',
' E-cell superficial': 'orange',
' Ovoid': 'cyan',
' Eigenmannia virescens': 'tab:green',
' Apteronotus leptorhynchus': 'tab:purple'}
return colors
def remove_yticks(ax):
ax.yaxis.set_major_formatter(ticker.NullFormatter())
return ax
def title_color(cell):
good_data, remaining = overlap_cells()
if cell in good_data:
color = 'red'
elif cell in remaining:
color = 'blue'
else:
color = 'black'
return color
def overlap_cells():
# good_data = ['2011-10-25-ad-invivo-1', '2018-05-08-aa-invivo-1',
# '2018-05-08-ac-invivo-1', '2018-05-08-ae-invivo-1', '2012-07-03-ak-invivo-1', ]
# pay attention that these are not cv sorted
good_data = ['2011-10-25-ad-invivo-1', '2018-05-08-aa-invivo-1', '2018-05-08-ac-invivo-1', '2018-05-08-ae-invivo-1',
'2012-07-03-ak-invivo-1', ]
# and these are
good_data = ['2012-07-03-ak-invivo-1', '2018-05-08-ae-invivo-1', '2018-05-08-ac-invivo-1', '2011-10-25-ad-invivo-1',
'2018-05-08-aa-invivo-1', ]
good_data = ['2012-07-03-ak-invivo-1', '2018-05-08-ae-invivo-1']
remaining = ['2012-07-12-ap-invivo-1',
'2012-12-13-af-invivo-1', '2012-12-13-ag-invivo-1',
'2012-12-13-ah-invivo-1', '2012-12-13-ao-invivo-1', '2012-12-20-aa-invivo-1',
'2012-12-20-ab-invivo-1', '2012-12-20-ac-invivo-1',
'2012-12-21-ak-invivo-1']
return good_data, remaining
def define_names(var_type, stim_type_noise, dendrid, ref_type, adapt_type):
if 'additiv' in var_type: # ' ser1 ' + str(np.round(model_show.ser_first_stim.iloc[0], 2))+ ' ser mean ' + str(np.round(model_show.ser_stim.iloc[0], 5))
stim_type_noise_name = stim_type_noise
else:
stim_type_noise_name = ''
if dendrid == '':
dendrid_name = 'standard'
else:
dendrid_name = dendrid
if ref_type == '':
ref_type_name = 'standard'
else:
ref_type_name = dendrid
if adapt_type == '':
adapt_type_name = 'standard'
else:
adapt_type_name = adapt_type
return adapt_type_name, ref_type_name, dendrid_name, stim_type_noise_name
def make_cell_unique(cvs, cells):
# embed()
cvs_unique = cvs[np.unique(cells, return_index=True)[1]]
cells = np.unique(cells)
cells = cells[np.argsort(cvs_unique)]
return cells, cvs_unique
def remove_xticks(ax):
ax.xaxis.set_major_formatter(ticker.NullFormatter())
return ax
def find_f(stack_final):
rest_nr = 0
# restrict = (f_orig > rest_nr)
try:
if 'sec' in stack_final.nfft.iloc[0]:
f_orig = np.fft.fftfreq(int(stack_final.sampling.iloc[0] * float(stack_final.nfft.iloc[0].split('sec')[0])),
1 / stack_final.sampling.iloc[0])
else:
f_orig = np.fft.fftfreq(stack_final.nfft.iloc[0], 1 / stack_final.sampling.iloc[0])
except:
print('nfft problem')
embed()
f = f_orig[(f_orig > rest_nr)]
return f
def get_array_from_pandas(isf, abs=True):
means = []
for i in range(len(isf)):
if type(isf.iloc[i]) == list:
# embed()
if len(isf.iloc[i]) > 1:
for j in range(len(isf.iloc[i])):
if abs:
means.append(np.abs(isf.iloc[i][j]))
else:
means.append(isf.iloc[i][j])
else:
if abs:
means.append(np.abs(isf.iloc[i][0]))
else:
means.append(isf.iloc[i][0])
# if len(np.shape(isf.iloc[i])) == 2:
# print('the spike thing')
# embed()
# elif type(isf.iloc[i]) == str:
# embed()
# means.append(np.abs(isf.iloc[i][0]))
return means
def rate_calculation(spikes, eod, deltat):
if len(spikes) > 0:
rate_baseline_before = len(spikes) / (len(eod) * deltat)
else:
rate_baseline_before = 0
return rate_baseline_before
def load_model_susept(path, cells, save_name, save=True, redo=False, creation_time_update=True, size_update=False):
load_function, name1, save_name = find_model_names_susept(save_name)
# if full_name:
remove_old = False
versions_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
# embed()
print(name1)
# embed()
if (versions_comp == 'develop'):
# embed()
if (os.path.exists(name1)):
cont = check_creation_time(load_function, name1)
else:
cont = True
# embed()
if (redo == True) | cont: # (not os.path.exists(name1))
print('redo model')
model = resave_model_susept(cells, load_function, name1, path, remove_old, save, versions_comp,
creation_time_update=creation_time_update, size_update=size_update)
else:
if os.path.exists(name1):
model = pd.read_csv(name1, index_col=0)
if len(np.unique(model.cell)) != len(cells):
model = resave_model_susept(cells, load_function, name1, path, remove_old, save, versions_comp,
creation_time_update=creation_time_update, size_update=size_update)
else:
model = []
if len(model) > 0:
if 'io_cross' not in model.keys():
model = resave_model_susept(cells, load_function, name1, path, remove_old, save, versions_comp,
creation_time_update=creation_time_update, size_update=size_update)
test = False
if test:
frame = pd.read_pickle(path)
# embed()
# if len(model)<0:
# embed()
elif (versions_comp == 'code'):
print('loaded for code')
if os.path.exists(path):
model = pd.read_pickle(path) # pd.read_pickle(path)
else:
model = []
# embed()
else:
# wenn es den localen Computer von Sascha nicht findet soll es schauen ob Sascha das schon vorgespeichert hat
if os.path.exists(name1):
model = pd.read_csv(name1, index_col=0)
else:
# wenn nicht dann kann es nichts laden
model = []
# embed()
test = False
if test:
load_function, name1, save_name = find_model_names_susept(save_name)
# if '/' in name1:
# name1 = name1.split('/')[1]
model1 = pd.read_csv(name1, index_col=0)
return model
def resave_model_susept(cells, load_function, name1, path, remove_old, save, versions_comp, creation_time_update=False,
size_update=True):
# if not os.path.exists(path):
# pay attention, das muss an sein!
dated_up = update_ssh_file(path, creation_time_update=creation_time_update, size_update=size_update)
# embed()
if os.path.exists(path):
################################
# wenn es den localen Computer von Sascha findet soll es die Versionen nochmal updaten
# if (redo == True) : # | (not os.path.exists(name1))& (not os.path.exists(name0))
# try:
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
model = pd.read_pickle(path) # pd.read_pickle(path)
# embed()
# except:
# embed()
if len(cells) > 0:
model = model[
model['cell'].isin(cells)] # & (model_cell.file_name == file)& (model_cell.power == power)]
dirs = os.listdir()
if remove_old:
for d in dirs:
if (load_function in d) & ('pdf' not in d) & ('py' not in d):
os.remove(d)
# embed()
new_keys = np.unique(model.index)
model[new_keys] = np.abs(model[new_keys])
# stack_plot = stack_plot.iloc[np.arange(0, len(new_keys), 1)]
# stack_plot.columns = list(map(float, stack_plot.columns))
# embed()
if save == True:
model = model.drop(['d_isf1', 'd_osf1',
'var_spikes', 'norm',
'cross', 'trials_stim', 'dev', 'a_fe',
'power', 'nfft', 'stimulus_length', 'stimulus_spikes',
'counter', 'version', 'rate_baseline_after',
'rate_baseline_before', 'rate_adapted', 'adapt',
'offset'], axis=1) # 'trial_nr','var_RAM',#'d_isf_all',
# embed()
if 'contrast' in load_function:
load_function = 'con'
if (versions_comp == 'develop') & (save == True):
# model.to_csv(load_function + save_name.replace('calc_RAM_model-2_', '').split('model/')[-1].replace(
# 'fft_o_forward_fft_i_forward_Hz_mV', '').replace('_trans1s__TrialsNr_1_', '') + '.csv')
model.to_csv(name1)
# except:
if os.path.exists(name1):
model = pd.read_csv(name1, index_col=0)
else:
model = []
test = False
if test:
model_imshow()
# embed()
else:
# wenn nicht dann kann es nichts laden
model = []
return model
def find_model_names_susept(save_name):
st = inspect.stack() # [-3][3]
load_function = find_load_function() # st[-1][1].split('.py')[0].split('suseptibility')[-1][1::]
if 'contrast' in load_function:
load_function = 'con'
if '/' in save_name:
save_name = save_name.split('/')[-1]
name0 = load_function + save_name.split('model/')[-1].replace('fft_o_forward_fft_i_forward_Hz_mV', '').replace(
'_trans1s_', '') + '.csv' # _TrialsNr_1_
name1 = name0.replace('calc_RAM_model-2_', '')
name1 = name1.replace('eRAM', 'eg')
name1 = name1.replace('power', 'p')
name1 = name1.replace('TrialsStim', 'TS')
if len(name1) > 260:
print('name problems')
embed()
return load_function, name1, save_name
def restrict_punits(cell, amps):
if cell == '2018-09-06-au-invivo-1':
amps = [10]
if cell == '2020-10-20-ab-invivo-1':
amps = [1]
return amps
def find_stop_cell(cells, stop_cell='2010-06-15-af-invivo-1', ):
if stop_cell != '':
try:
cells = cells[np.where(np.array(cells) == stop_cell)[0][0]::]
except:
print('stop cell problem')
embed()
return cells
def load_cv_base_frame(cells_given, redo=False, cell_type_type='cell_type_reclassified'):
name1, name0, load_function = find_save_name('calc_base_data-base_frame_here', add='_isi_')
remove_old = False
# wenns auf Saschas Computer ist
# einfach nur das calc_base_data-base_frame laden
path_sascha = load_folder_name('calc_base') + '/calc_base_data-base_frame_overview.pkl' # ../calc_base/
# frame = load_cv_table(path_sascha=path_sascha)
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
# embed()
if (not os.path.exists(name1.replace('csv', 'pkl'))) | (
redo == True): # if (redo == True) | (not os.path.exists(name1)):
frame = redo_frame_cells(cell_type_type, cells_given, name1, path_sascha, version_comp)
else: # wenns man nicht auf Saschas Computer ist
if os.path.exists(name1.replace('csv', 'pkl')):
frame = pd.read_pickle(name1.replace('csv', 'pkl')) # , index_col=0)
if version_comp != 'public':
for cell in cells_given:
# embed()
if type(frame[frame['cell'] == cell].spikes.iloc[0]) != list:
frame = redo_frame_cells(cell_type_type, cells_given, name1, path_sascha, version_comp)
# frame = pd.read_csv(name1, index_col=0)
else:
frame = []
return frame
def redo_frame_cells(cell_type_type, cells_given, name1, path_sascha, version_comp):
frame = load_cv_table(path_sascha=path_sascha)
frame = unify_cell_names(frame, cell_type=cell_type_type)
# todo: wenn spikes nur von bestimmten gebraucht werden den rest löschen
frame.pop('mts')
frame.pop('tags')
if len(cells_given) > 0:
# embed()#frame.memory_usage()
frame_examples = frame[frame['cell'].isin(cells_given)] # .spikes = float('nan')
frame_remaining = frame[~frame['cell'].isin(cells_given)]
frame_remaining.spikes = float('nan')
frame_remaining.eod = float('nan')
frame_to_save = pd.concat([frame_remaining, frame_examples])
else:
frame_to_save = frame
if version_comp == 'develop':
# frame_to_save.to_csv(name1)
frame_to_save.to_pickle(name1.replace('csv', 'pkl'))
frame = frame_to_save
return frame
def save_ram_model(stimulus_length, cut_off1, nfft, a_fe, stim_type_noise, mimick, variant,
trials_stim, n, cell_recording_save_name, extract='', stim_type_afe='', Hz='Hz',
mV='mV', wierd_add0='', fft_o='', fft_i='', wierd_add='', c_noise=0.1, c_sig=0.9, adapt_type='',
noise_added='', a_fr=1, zeros='', burst_corr='', ref_type='', cut_off2=300, trans=0,
var_type='', dendrid='', quadrant='', trials_nr=1, nr=2, name_save='calc_RAM_model-'):
# if trials_nr != 1:
# trials_nr_name = '_TrialsNr_'+str(trials_nr)
# else:
# trials_nr_name = ''
trials_nr_name = '_TrialsNr_' + str(trials_nr)
if trans == 0:
trans_name = ''
else:
trans_name = '_trans' + str(trans) + 's_'
trans_name = '_trans' + str(trans) + 's_'
if dendrid == '':
dendrid_name = ''
else:
dendrid_name = '_' + dendrid + ''
dendrid_name = '_' + dendrid + ''
if a_fr == 1:
a_fr_name = ''
else:
a_fr_name = '_a_fr_' + str(a_fr) + '_' + zeros
if a_fr == 1:
a_fr_name = '_a_fr_' + str(a_fr)
else:
a_fr_name = '_a_fr_' + str(a_fr) + '_' + zeros
if 'additiv' in var_type:
var_type = '_' + var_type + '_cNoise_' + str(c_noise) + '_cSig_' + str(c_sig)
a_fe_name = ''
noise_added_name = noise_added
elif var_type != '':
var_type = '_' + var_type + '_'
noise_added_name = ''
else:
a_fe_name = '_afe_' + str(a_fe)
var_type = ''
noise_added_name = ''
if a_fe == 0:
a_fe_name = ''
else:
if 'additiv' in var_type:
a_fe_name = '_afe_' + str(a_fe) + '_' + str(stim_type_afe)
else:
a_fe_name = '_afe_' + str(a_fe) + '_' + str(stim_type_afe)
# embed()
if cut_off1 == cut_off2:
cut_off_name = '_cutoff_' + str(cut_off1)
else:
cut_off_name = '_cutoff1_' + str(cut_off1) + '_cutoff2_' + str(cut_off2)
cut_off_name = '_cutoff1_' + str(cut_off1) + '_cutoff2_' + str(cut_off2)
# cut_off_name = '_cutoff1_' + str(cut_off1) + '_cutoff2_' + str(cut_off2)
# load_folder_name('calc_model')+'/noise4_' + duration_noise + '_nfft_' + str(nfft) + '_power_' + str(n) + a_fe_name + str(
# formula) + '_' + stim_type_name + var_type + cut_off_name + cell_recording_save_name + mimick + '_' + variant + '_' + stimulus_type + 'length' + str(
# stimulus_length) + '_TrialsStim_' + str(
# trials_stim) + a_fr_name + dendrid_name + trans_name + trials_nr_name + ref_type + adapt_type #str(formula)
# embed()# stimulus_type
if 'additiv' in var_type:
if 'RAM' in stim_type_noise:
stim_type_name1 = '_' + stim_type_noise # + str(1)
else:
stim_type_name1 = '_' + stim_type_noise
else:
stim_type_name1 = ''
# if a_fe !=
return load_folder_name('calc_model') + '/' + name_save + str(nr) + '_' + '_nfft_' + str(nfft) + '_power_' + str(
n) + a_fe_name + stim_type_name1 + wierd_add0 + extract + wierd_add + var_type + cut_off_name + cell_recording_save_name + mimick + '_' + variant + '_' + 'length' + str(
stimulus_length) + '_TrialsStim_' + str(
trials_stim) + a_fr_name + dendrid_name + trans_name + trials_nr_name + ref_type + adapt_type + noise_added_name + '_fft_o_' + str(
fft_o) + '_fft_i_' + str(fft_i) + '_' + Hz + '_' + mV + burst_corr + quadrant
def stim_type_names(a_fe, c_sig, stim_type_afe, stim_type_noise_name):
if 'eRAM' in stim_type_afe:
stim_type_afe_name = 'RAM$\mathcal{P}(0,\,1)$'
elif 'RAM' in stim_type_afe:
stim_type_afe_name = 'RAM$\mathcal{P}(0,\,1)$ $\mathcal{A}(0,\,1)$ '
if 'eRAM' in stim_type_noise_name:
stim_type_noise_name2 = 'RAM$\mathcal{P}(0,\,1)$(c=' + str(c_sig) + ') '
elif 'RAM' in stim_type_noise_name:
stim_type_noise_name2 = 'RAM$\mathcal{P}(0,\,1)$ $\mathcal{A}(0,\,1)$ (c=' + str(c_sig) + ') '
else:
stim_type_noise_name2 = '(c=0)'
if a_fe == 0:
stim_type_afe_name = ''
# embed()
return stim_type_noise_name2, stim_type_afe_name
def calc_cut_offs(file):
if 'gwn' in file:
cut_off = int(file.split('gwn')[1].split('Hz')[0])
elif 'blwn' in file:
cut_off = int(file.split('blwn')[1].split('Hz')[0])
elif ('Arr' in file) & (('InputArr_50hz' in file) | ('InputArr_400hz' in file)):
cut_off = int(file.split('InputArr_')[1].split('hz')[0])
elif ('Arr' in file):
cut_off = int(file.split('to')[1].split('hz')[0])
elif (file == ''):
cut_off = float('nan')
elif 'stimtinsmissed' in file.lower():
cut_off = float(file.split('C')[0].split(' ')[1])
else:
print('cannot find cut_off3')
embed()
return cut_off
def sort_cells_base(small_cvs_first=True, name='calc_base_data-base_frame_overview.pkl', base_sorted='base_sorted',
redo_base=False, cell_sorted='cell_sorted', redo=False,
save_names=['noise_data8_nfft1sec_original__LocalEOD_CutatBeginning_0.05_s_NeurDelay_0.005_s'],
cell_type_type='cell_type_reclassified', cell_type_sort=[], sorted_cv='cv', gwn_filtered=False):
# diese Funktion sortiert die Zellen damit man sie der Reihe nach laufen lassen kann
# load the initial frame
frame = load_RAM_frame(cell_type_type, redo=redo_base, name=name)
# embed()
cell_types = frame[cell_type_type].unique()
load_name = load_folder_name('calc_RAM') + '/' + save_names[0] + '.csv'
frame_csv_overview = pd.read_csv(load_name, low_memory=False)
frame_csv_overview[['cell', 'amp']].sort_values('cell')
if ('cell_type_reclassified' not in frame_csv_overview) | (redo == True):
frame_csv_overview = update_RAM_with_CVs_baseline(frame_csv_overview, load_name)
lists = np.array(frame_csv_overview.amp.unique())
# hier machen wir das damit das gruppenweise nach cell_type plottet
# hier nehmen wir wirklich nur die die auch ein GWN haben, das ist der Unterschied
cells = frame[((frame.gwn == True) | (frame.fs == True))].cell.unique()
cells = np.sort(cells) # [::-1]
stop_cell = '2013-04-18-aa-invivo-1'
stop_cell = ''
cells = find_stop_cell(cells, stop_cell=stop_cell) # '2010-06-15-af-invivo-1'
# Attention: so sortiert es für alle Zellen, auch die die kein gwn haben
if len(cell_type_sort) < 1:
cell_type_sort = np.sort(cell_types)
# embed()
if gwn_filtered:
frame_filtered = frame[((frame.gwn == True) | (frame.fs == True))]
else:
frame_filtered = frame
if 'cell_sorted' in cell_sorted:
if base_sorted == 'base_sorted':
# embed()
cells.extend(cells_only)
cells = cluster_cells_by_group(cell_type_sort, frame_filtered, cell_type_type, cell_sorted=cell_sorted,
sorted='cv', small_cvs_first=small_cvs_first)
elif base_sorted == 'base_ram_sorted':
# embed()
frame_selected = pd.read_csv('calc_base/cv_fr_punits_gwn_selected.csv')
frame_selected_amp = pd.read_csv('calc_base/cv_fr_ampullary_gwn_only.csv')
cells = []
# embed()
cells_only = cluster_cells_by_group(cell_type_sort, frame_selected, cell_type_type=None,
cell_sorted=cell_sorted,
sorted='cv', small_cvs_first=small_cvs_first)
cells.extend(cells_only)
cells_only_amp = cluster_cells_by_group(cell_type_sort, frame_selected_amp, cell_type_type=None,
cell_sorted=cell_sorted,
sorted='cv', small_cvs_first=small_cvs_first)
cells.extend(cells_only_amp)
# embed()
cells_genreall = cluster_cells_by_group(cell_type_sort, frame_filtered, cell_type_type,
cell_sorted=cell_sorted,
sorted='cv', small_cvs_first=small_cvs_first)
cells.extend(cells_genreall)
cells_bef = np.unique(frame_csv_overview.cell)
missing_cells = list(set(cells) - set(cells_bef))
#
cells.extend(missing_cells)
# embed()
elif base_sorted == 'base_ram_sorted_only':
# embed()
frame_selected = pd.read_csv('calc_base/cv_fr_punits_gwn_selected.csv')
frame_selected_amp = pd.read_csv('calc_base/cv_fr_ampullary_gwn_only.csv')
cells = []
# embed()
if sorted_cv not in frame_selected:
print(
'load calc_base/cv_fr_punits_gwn_selected.csv and calc_base/cv_fr_ampullary_gwn_only.csv from the other computer')
# frame_load = update_overview_class(frame_load, save_name_here, True)
#
cells_only = cluster_cells_by_group(cell_type_sort, frame_selected, cell_type_type=None,
cell_sorted=cell_sorted,
sorted=sorted_cv, small_cvs_first=small_cvs_first)
cells.extend(cells_only)
elif base_sorted == 'stim_sorted':
cells = cluster_cells_by_group(cell_type_sort, frame_csv_overview, cell_type_type, cell_sorted=cell_sorted,
sorted=sorted_cv, small_cvs_first=small_cvs_first)
cells_bef = np.unique(frame_csv_overview.cell)
missing_cells = list(set(cells_bef) - set(cells))
#
cells.extend(missing_cells)
# embed()
return cells, frame, cell_types
def load_RAM_frame(cell_type_type='cell_type_reclassified', name='calc_base_data-base_frame_overview.csv', redo=False):
frame = load_cv_table(redo=redo, name=name)
frame = unify_cell_names(frame, cell_type=cell_type_type)
return frame
def cluster_cells_by_group(cell_types, frame, cell_type_type='cell_type_reclassified', cell_sorted='', sorted='cv',
small_cvs_first=True):
cells = []
if not cell_type_type:
cell_types = [0]
for ct in cell_types:
# cv_sort = True
if cell_type_type:
frame_type = frame[frame[cell_type_type] == ct]
else:
frame_type = frame
# todo: hier nochmal umspeichern
# embed()
if 'cv' in cell_sorted:
try:
cvs = frame_type[sorted] # # .iloc[0]
except:
print('sorting thing')
embed()
cells_sort = np.array(frame_type.cell)
cvs = np.array(cvs)
cells_sort, cvs_unique = make_cell_unique(cvs, cells_sort)
# die Zellen sind schon argsorted
# cells_sort = cells_sort[np.argsort(cvs_unique)]
# embed()
else:
cells_sort = np.sort(np.unique(frame_type.cell))
if small_cvs_first:
cells.extend(cells_sort)
else:
cells.extend(cells_sort[::-1])
# embed()
return cells
def cr_spikes_mat(spikes_cut, sampling_rate, length):
# we take the whole length of the segment, otherwise we will artificially cut the low beats!
spikes_mat = np.zeros(length + 2)
spikes_idx = np.round((spikes_cut) * sampling_rate)
# embed()
# if np.max(spikes_idx)> spikes_idx[-1]
# np.where(spikes_idx)> spikes_idx[-1]:
# try:
spikes_mat[list(map(int, spikes_idx))] = 1 * sampling_rate
# except:
# print('spikes something')
# embed()
# for spike in spikes_idx:
# spikes_mat[int(spike)] = 1 * sampling_rate
# spikes_mat = spikes_mat.astype(np.int)
return spikes_mat[0:-2]
def stimulus2RAM(nfft, a_fr, zeros, RAM, trans, stimulus_length, deltat, eod_fr, mimick, cell_recording,
nfft_for_morph, fish_morph_harmonics_vars, fishe_receiver='', sampling_rate=40000):
if trans != 0:
stimulus_length_here = stimulus_length + trans
else:
stimulus_length_here = stimulus_length
time_here = np.arange(0, stimulus_length_here, deltat)
time_fish_r = time_here * 2 * np.pi * eod_fr
eod_fish_r, deltat, eod_fr, time_array = eod_fish_r_generation(time_here, eod_fr, a_fr, stimulus_length_here, 0,
cell_recording, zeros, mimick, 1 / deltat,
fishe_receiver, deltat, nfft, nfft_for_morph,
fish_morph_harmonics_var=fish_morph_harmonics_vars,
beat='beat')
if trans != 0:
# if 'plus' in var_type:
# carrier_RAM = np.concatenate([RAM, RAM]) + eod_fish_r
# else:
# try:
carrier_RAM = (1 + np.concatenate([RAM[0:int(trans * sampling_rate)], RAM])) * eod_fish_r
# except:
# print('transient problem')
# embed()
# stimulus_orig = (1 + np.concatenate([RAM,RAM])) * eod_fish_r
else:
# if 'plus' in var_type:
# carrier_RAM = RAM + eod_fish_r
# else:
carrier_RAM = (1 + RAM) * eod_fish_r
# stimulus_orig = (1 + RAM) * eod_fish_r
return carrier_RAM, eod_fish_r, deltat, eod_fr, time_array, stimulus_length_here
def get_stim(carrier_RAM, a_fr, zeros, eod_fr, mimick, fishe_receiver, cell_recording, trans, nfft, extract,
noise_added, cut_off, stim_type_afe, model_params, variance, cell, laod_path, c_sig, var_type,
cut_off_type, input_scaling, noise_name, stimulus_length, deltat, a_fe, stim_type_noise, nfft_for_morph,
fish_morph_harmonics_vars, ):
stim_type = []
time_array = []
eod_fish_r = []
am_extracted = []
stimulus_length_here = []
if 'StimPreSaved' in stim_type_noise:
RAM, carrier_RAM, eod_interp, sampling, eod_mt, time_wn_cut = load_stim_presaved_model(noise_name,
stimulus_length, deltat,
a_fe)
else:
# define if RAM only intrinsic, intrinsic + additiv, additiv
# embed()
stim_type, RAM, height, RAM_afe, RAM_noise = RAM_additiv_not(stim_type_noise, input_scaling, a_fe,
stim_type_afe, model_params,
stimulus_length, deltat, noise_added,
cut_off, variance, cell, c_sig, var_type,
cut_off_type,
laod_path=laod_path)
# make the stimulus as a RAM or directly as stimulus
if 'RAM' in stim_type:
carrier_RAM, eod_fish_r, deltat, eod_fr, time_array, stimulus_length_here = stimulus2RAM(nfft, a_fr, zeros, RAM,
trans,
stimulus_length,
deltat, eod_fr, mimick,
cell_recording,
nfft_for_morph,
fish_morph_harmonics_vars,
fishe_receiver,
sampling_rate=1 / deltat)
if 'extractedCarrier' in extract:
am_extracted = extract_am(carrier_RAM, np.arange(0, len(carrier_RAM) * deltat, deltat), norm=False)[0]
am_extracted = am_extracted[0:len(RAM)]
RAM = am_extracted
test = False
if test:
plt_am_test()
return RAM_afe, RAM_noise, stim_type, RAM, carrier_RAM, deltat, eod_fr, time_array, eod_fish_r, am_extracted, stimulus_length_here
def load_stim_presaved_model(noise_name, stimulus_length, deltat, a_fe):
stimulus_wn = []
time_wn = []
for line in open(load_folder_name('calc_RAM') + '/' + noise_name + '.dat'):
if not line.startswith("#") and line != "\n":
stimulus_wn.append(float(line.rstrip("\n").split()[1]))
time_wn.append(float(line.rstrip("\n").split()[0]))
# embed()
time_wn_cut = np.array(time_wn)[np.array(time_wn) < stimulus_length]
eod_mt = np.array(stimulus_wn)[np.array(time_wn) < stimulus_length]
sampling = 1 / np.diff(time_wn_cut)[0]
eod_interp = interpolate(time_wn_cut, eod_mt,
np.arange(0, stimulus_length, deltat),
kind='cubic')
RAM = eod_interp * a_fe
carrier_RAM = eod_interp
return RAM, carrier_RAM, eod_interp, sampling, eod_mt, time_wn_cut
def egerland_noise(stimulus_length, deltat, cut_off, noise_added='', noise_strength=1, c_signal=0.9,
var='additiv', cell='', input_scaling=1, load_path='', height=0):
# define the phases random, uniformely distributed!
time_array = np.arange(0, stimulus_length / 2, deltat)
# random_phases = np.random.normal(0, 1,
# size=len(time_array) + 1) # np.random.randn()#np.random.normal(0, 1, size=N)# *0.2
random_phases = np.random.rand(len(time_array) + 1)
f_low = 0 # -(1/deltat*2)
f_high = cut_off
datapoints = len(random_phases)
# define cut_off indices
cut_low = f_low * datapoints * deltat
cut_high = f_high * datapoints * deltat * 2
indeces = np.arange(0, len(random_phases), 1)
# todo: d ist die rausstärke vom neuron, 2*d*c ist die rauschstärke die variance bestimmt, c ist 0.9 für das signal dann ist T die simulations Zeit also eine Sekunden
# embed()
# TRASH more or less
if 'additiv_right' in var:
################
# RIGHT VERSION
d = noise_strength # (noise_strength ** 2) / 2
var_val = (d * c_signal) * 2 / deltat
n = int(np.ceil((stimulus_length + 0.5 * deltat) / deltat))
# ja weil am Anfang dachte ich ja gleich halten die Varianz aber jetzt habe ich verstanden nicht unbedingt
if 'split' not in var:
sigma = 0.5 / np.sqrt(float(cut_high - cut_low)) * n
height2 = np.sqrt(var_val * 2) * sigma
else:
sigma = 0.5 / np.sqrt(n) * n
height2 = np.sqrt(var_val * 4) * sigma
# /input_scaling_adapt # multiplikation mit deltat machts hier nicht besser
# generate the random frequencies
noise_bef_cut_off = height2 * (np.cos(2 * np.pi * random_phases)) + height2 * (1j *
np.sin(
2 * np.pi * random_phases))
# make real parts at the cut offs and the niquest frequency
noise_bef_cut_off[0] = height2 * (np.cos(2 * np.pi * random_phases))[0]
noise_bef_cut_off[int(datapoints / 2)] = height2 * (np.cos(2 * np.pi * random_phases))[int(datapoints / 2)]
noise_bef_cut_off[int(cut_high)] = height2 * (np.cos(2 * np.pi * random_phases))[int(cut_high)]
noise_after_cut_off = noise_bef_cut_off * 1
indeces = np.arange(0, len(noise_bef_cut_off), 1)
noise_after_cut_off[(indeces < cut_low)] = 0
noise_after_cut_off[(indeces < cut_high)] = 0
# set before cut off to zero
indeces = np.arange(0, len(noise_bef_cut_off), 1)
noise_bef_cut_off[(indeces < cut_low)] = 0
noise_bef_cut_off[(indeces > cut_high)] = 0
# make the real transofrm
noise_bef = np.real(np.fft.irfft(noise_bef_cut_off))[:n] # also multilikatio mit sigma erhöht das shcon mal
noise_aft = np.real(np.fft.irfft(noise_after_cut_off))[:n] # also multilikatio mit sigma erhöht das shcon mal
# DOUBLECHEKC var_val & (var_bef+var_aft)
var_bef = np.var(noise_bef)
var_aft = np.var(noise_aft)
# in the model later we are multiplying the input with the input scaling, therefore here I am deviding it by the input scaling
if 'scaled' in var:
input_scaling_adapt = input_scaling
else:
input_scaling_adapt = 1
noise_bef = noise_bef / input_scaling_adapt
# var4 = 4 * d * c_sig * cut_off
#
# embed()
# TRASH more or less
elif 'additiv_cutoff' in var:
d = noise_strength # (noise_strength ** 2) / 2
noise_final = np.random.randn(int(stimulus_length / deltat))
noise_strength_new = np.sqrt(noise_strength * 2)
# also wenn ich das hier multipliziere wird noise_strength die std
# und die varianz ist std**2, also ist d die varianz und mal zwei weil wir ja negative und positive freqs haben
noise_orig = noise_final * noise_strength_new # / np.sqrt(deltat) # 0.05370289258320868 0.0015532069917408744
# np.var(noise_orig) = 0.05352956770474393
d = noise_strength # (noise_strength ** 2) / 2
var_val = (d * c_signal) * 2 # / deltat
height2 = np.sqrt(var_val * stimulus_length) # * sigma#/deltat
# also das ist das power spektrum das ist ein Punkt der power und wir teilen das hoch zwei weil das ja zwei mal drin vorkommt!
power_spektrum = ((2 * (2 * d) ** 2))
# n = int(np.ceil((stimulus_length + 0.5 * deltat) / deltat))
# ja weil am Anfang dachte ich ja gleich halten die Varianz aber jetzt habe ich verstanden nicht unbedingt
# if 'split' not in var:
# sigma = 0.5 / np.sqrt(float(cut_high - cut_low)) * n
# height2 = np.sqrt(2*var_val ) * sigma
# else:
# sigma = 0.5 / np.sqrt(n) * n
# height 2 ist wieder sehr nah an der std, also das muss die std sein denn für das powe spektrum wirds ja nochmal hoch zwei genommen
# die höhe ist hier die varianz
# /input_scaling_adapt # multiplikation mit deltat machts hier nicht besser
# generate the random frequencies
# noise_bef_cut_off = height2 * (np.cos(2 * np.pi * random_phases)) + height2 * (1j *
# np.sin(
# 2 * np.pi * random_phases))#
noise = height2 * (np.cos(2 * np.pi * random_phases)) + height2 * (1j * np.sin(2 * np.pi * random_phases))
noise_bef_cut_off = noise * 1
# make real parts at the cut offs and the niquest frequency
noise_bef_cut_off[0] = height2 * (np.cos(2 * np.pi * random_phases))[0]
noise_bef_cut_off[int(datapoints / 2)] = height2 * (np.cos(2 * np.pi * random_phases))[int(datapoints / 2)]
# noise_bef_cut_off[int(cut_high)] = height2 * (np.cos(2 * np.pi * random_phases))[int(cut_high)]
noise_after_cut_off = noise_bef_cut_off * 1
indeces = np.arange(0, len(noise_bef_cut_off), 1)
noise_after_cut_off[(indeces < cut_low)] = 0
noise_after_cut_off[(indeces < cut_high)] = 0
# set before cut off to zero
indeces = np.arange(0, len(noise_bef_cut_off), 1)
noise_bef_cut_off[(indeces < cut_low)] = 0
noise_bef_cut_off[(indeces > cut_high)] = 0
# make the real transofrm
# a = np.real(np.fft.irfft(noise))#[:n]
# b = np.real(np.fft.ifft(noise))#[:n]
# c = np.fft.irfft(noise)#[:n] # also a & c sind das gleiche
# ffff = np.fft.irfft(noise, norm='backward')*n# * np.sqrt(1 / deltat)
# ok also das hat sich aufgeklärt das ist ein normierungs ding jetzt muss ich nur noch schauen welches davon ich denn eigentlich haben will :)
# also das resultat ist tatsächlich das doppelte von der länge her, das geht wohl wirklich von den negativen davon aus
noise_bef = np.real(np.fft.irfft(noise_bef_cut_off,
norm='forward')) # [:n]#*np.sqrt(1/deltat)#*np.sqrt(1/deltat) # also multilikatio mit sigma erhöht das shcon mal
noise_aft = np.real(np.fft.irfft(noise_after_cut_off,
norm='forward')) # [:n]#*np.sqrt(1/deltat)#*np.sqrt(1/deltat) # also multilikatio mit sigma erhöht das shcon mal
noise_time = np.real(np.fft.irfft(noise,
norm='forward')) # [:n]# ja weil sqrt ist halt doch der richtige faktor nicht wahr!*np.sqrt(1/deltat) #*np.sqrt(1/deltat) # also multilikatio mit sigma erhöht das shcon mal
# also hier ist das tatsächlich forward
freqs = np.fft.fftfreq(len(noise_time), d=deltat)
var_power_spektrum = np.sum(np.diff(freqs)[0] * np.abs(noise) ** 2) * 2
var_noise = np.var(noise_time)
var_bef = np.var(noise_bef)
var_aft = np.var(noise_aft)
if 'scaled' in var:
input_scaling_adapt = input_scaling
else:
input_scaling_adapt = 1
noise_bef = noise_bef / input_scaling_adapt
test = False
# embed()
if test:
test_forward()
# np.round(np.var(arrays[0]) * c_sig / c_noise, 5) * c_sig * cut_off / max_f
# var4 = 4 * d * c_sig * cut_off
# THIS is the one to find the correct CV
elif 'additiv_cv_adapt_factor' in var:
# 'additiv_cv_adapt_factor_scaled'
#embed()
frame = pd.read_csv(load_path + '.csv', index_col=0) # "2012-12-21-ai-invivo-1","2012-06-27-an-invivo-1",
try:
height = float(frame.loc['height', cell])
except:
print('height already')
embed()
var_val = []
var_bef, var_aft, noise_aft, noise_bef, noise_bef_cut_off, noise_aft_cut_aft = noise_c2r(random_phases, height,
datapoints, cut_high,
cut_low, deltat)
# embed()
elif 'additiv_cv_adapt' in var:
# RIGHT r2c VERSION!
# embed()
if height == 0:
height, frame, factor, var_val, d = load_height_d(noise_strength, c_signal, cell, var, stimulus_length,
input_scaling)
else:
var_val = []
var_bef, var_aft, noise_aft, noise_bef, noise_bef_cut_off, noise_aft_cut_aft = noise_c2r(random_phases, height,
datapoints, cut_high,
cut_low, deltat)
# embed()
else:
# THIS IS THE RIGHT!
# embed()
if 'additiv_visual_d' in var:
if 'visual_d_4' in var: # mit Noise additive
height, frame, factor, var_val, d = load_height_d(noise_strength, c_signal, cell, var, stimulus_length,
input_scaling)
# RIGHT r2c VERSION!
var_bef, var_aft, noise_aft, noise_bef, noise_bef_cut_off, noise_aft_cut_aft = noise_c2r(random_phases,
height, datapoints,
cut_high, cut_low,
deltat)
# embed()
# TRASH more or less
elif 'additiv' in var:
# d = noise_strength**2/2
d = noise_strength # (noise_strength ** 2) / 2
var_val = (d * c_signal) * 2 / deltat
# c_sig = 0.9
# c_noise = 1 - c_sig
if 'deltat' in var:
height = np.sqrt(2 * d * c_signal * (1 / deltat))
elif 'visual4' in var: # mit Noise additive
if cell == '2013-01-08-aa-invivo-1':
if noise_added == '_noiseadded_':
if c_signal == 0.9:
if cut_off == 300:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 14750
elif cut_off == 400:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 15800
else:
if c_signal == 0.9:
if cut_off == 300:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 14750
elif cut_off == 400:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 15800
elif cut_off == 5000:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 79000
# embed()
elif 'visual3' in var: # mit Noise additive
if c_signal == 0.9:
if cut_off == 300:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 16050
elif cut_off == 400:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 18000
elif cut_off == 5000:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 84200
# embed()
elif 'visual2' in var: # ohne Noise additiv
if c_signal == 0.9:
if cut_off == 300:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 10000
elif cut_off == 400:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 11000
elif cut_off == 5000:
height = np.sqrt(2 * d * c_signal * stimulus_length) * 23000
# embed()
elif 'visual' in var:
if cut_off == 300:
height = 178
else:
height = 31
else:
height = np.sqrt(2 * d * c_signal * stimulus_length)
# embed()
# white noise in frequency space has constant amplitude, random phase
noise_bef_cut_off = height * (np.cos(2 * np.pi * random_phases) + 1j * np.sin(2 * np.pi * random_phases))
noise_aft_cut_aft = np.ones(len(noise_bef_cut_off)) * 1 # noise_bef_cut_off*1#
# embed()
# hier generiere ich den noise danach
noise_bef_cut_off[0] = height * (np.cos(2 * np.pi * random_phases))[
0] # todo. das müsste man genau so ziehen und setzt dann im anschluss den imaginär teil auf null, die amplitude bleibt gleich
noise_bef_cut_off[int(datapoints / 2)] = height * (np.cos(2 * np.pi * random_phases))[int(datapoints / 2)]
noise_bef_cut_off[int(cut_high)] = height * (np.cos(2 * np.pi * random_phases))[int(cut_high)]
indeces = np.arange(0, len(noise_bef_cut_off), 1)
noise_bef_cut_off[(indeces < cut_low)] = 0
noise_bef_cut_off[(indeces > cut_high)] = 0
# hier generiere ich den noise danach
noise_aft_cut_aft[0] = height * (np.cos(2 * np.pi * random_phases))[
0] # todo. das müsste man genau so ziehen und setzt dann im anschluss den imaginär teil auf null, die amplitude bleibt gleich
noise_aft_cut_aft[int(datapoints / 2)] = height * (np.cos(2 * np.pi * random_phases))[int(datapoints / 2)]
noise_aft_cut_aft[int(cut_high)] = height * (np.cos(2 * np.pi * random_phases))[int(cut_high)]
indeces = np.arange(0, len(noise_aft_cut_aft), 1)
noise_aft_cut_aft[(indeces < cut_low)] = 0
noise_aft_cut_aft[(indeces < cut_high)] = 0
noise_bef = np.real(np.fft.irfft(noise_bef_cut_off))
noise_aft = np.real(np.fft.irfft(noise_aft_cut_aft))
var_bef = np.var(noise_bef)
var_aft = np.var(noise_aft)
# TRASH more or less
else:
d = noise_strength # (noise_strength ** 2) / 2
T_cut_off = np.max(indeces[indeces > cut_high]) / 20000
var_val = (d * c_signal) * 2 / deltat
var_val = 1
height = np.sqrt(2 * var_val * T_cut_off)
# embed()
# white noise in frequency space has constant amplitude, random phase
noise_bef_cut_off = height * (np.cos(2 * np.pi * random_phases) + 1j * np.sin(2 * np.pi * random_phases))
noise_aft_cut_aft = np.ones(len(noise_bef_cut_off)) * 1 # noise_bef_cut_off*1#
# embed()
# hier generiere ich den noise danach
noise_bef_cut_off[0] = height * (np.cos(2 * np.pi * random_phases))[
0] # todo. das müsste man genau so ziehen und setzt dann im anschluss den imaginär teil auf null, die amplitude bleibt gleich
noise_bef_cut_off[int(datapoints / 2)] = height * (np.cos(2 * np.pi * random_phases))[int(datapoints / 2)]
noise_bef_cut_off[int(cut_high)] = height * (np.cos(2 * np.pi * random_phases))[int(cut_high)]
indeces = np.arange(0, len(noise_bef_cut_off), 1)
noise_bef_cut_off[(indeces < cut_low)] = 0
noise_bef_cut_off[(indeces > cut_high)] = 0
# hier generiere ich den noise danach
noise_aft_cut_aft[0] = height * (np.cos(2 * np.pi * random_phases))[
0] # todo. das müsste man genau so ziehen und setzt dann im anschluss den imaginär teil auf null, die amplitude bleibt gleich
noise_aft_cut_aft[int(datapoints / 2)] = height * (np.cos(2 * np.pi * random_phases))[int(datapoints / 2)]
noise_aft_cut_aft[int(cut_high)] = height * (np.cos(2 * np.pi * random_phases))[int(cut_high)]
indeces = np.arange(0, len(noise_aft_cut_aft), 1)
noise_aft_cut_aft[(indeces < cut_low)] = 0
noise_aft_cut_aft[(indeces < cut_high)] = 0
noise_bef = np.real(np.fft.irfft(noise_bef_cut_off))
noise_aft = np.real(np.fft.irfft(noise_aft_cut_aft))
var_bef = np.var(noise_bef)
stimulus_spikes = noise_bef * 1
white_noise_eng_abs = np.abs(noise_bef_cut_off)
#embed()
test = False
if test:
test_egerland()
return noise_bef, stimulus_spikes, white_noise_eng_abs, var_bef, var_val, height
def generate_noise(input_scaling, a_fe, stim_type, model_params, stimulus_length, deltat, noise_added, cut_off,
cell, c_sig, var_type, height=0, load_path=''):
# if 'filtered' in stim_type:
if 'eRAM' in stim_type:
# RIGHT VERSION
noise_strength = model_params.noise_strength # **2/2
RAM, carrier_RAM, freq_wn, var2_time, var2, height = egerland_noise(stimulus_length, deltat, cut_off,
noise_added=noise_added, height=height,
input_scaling=input_scaling, cell=cell,
noise_strength=noise_strength,
c_signal=c_sig,
var=var_type, load_path=load_path)
test = False
if test:
test_carrier()
else:
# RIGHT NOISE
# https://github.com/janscience/adaptationprimer/tree/master/filter
# da gibts relativ weit irgendwo in filter.py
# whitenoise_cut of
# todo: https://github.com/janscience/adaptationprimer/blob/master/filter/filter.py
# embed()
noise_strength = model_params.noise_strength # **2/2
input_scaling = model_params.input_scaling
# embed()#
RAM2, carrier_RAM2, freq_wn2, var2_time2, var22, height = egerland_noise(stimulus_length, deltat, cut_off,
noise_added=noise_added,
input_scaling=input_scaling, cell=cell,
noise_strength=noise_strength,
height=height,
c_signal=c_sig, var=var_type,
load_path=load_path)
# embed()
white_noise, freq_wn = whitenoise(0, cut_off, deltat, stimulus_length,
rng=np.random)
input_scaling = model_params.input_scaling
noise_strength = model_params.noise_strength # **2/2
d = noise_strength # (noise_strength ** 2) / 2
var2 = (d * c_sig) * 2 / deltat
white_noise = white_noise[0:-1]
if 'additiv' in var_type:
RAM = white_noise * (np.sqrt(np.var(RAM2)))
else:
RAM = white_noise * a_fe
test = False
if test == True:
test_ram()
plt_noise()
# embed()
return RAM, height
def RAM_additiv_not(stim_type_noise, input_scaling, a_fe, stim_type_afe, model_params, stimulus_length, deltat,
noise_added,
cut_off, variance, cell, c_sig, var_type, cut_off_type, height=0, laod_path=''):
# embed()
if ('additiv' in var_type) & (a_fe != 0):
# here if i have additive AND a a_fe unequal 0
RAM_noise, height = generate_noise(input_scaling, 0, stim_type_noise, model_params, stimulus_length, deltat,
noise_added,
cut_off, cell, c_sig, var_type, height=height,
load_path=laod_path)
RAM_afe, _ = generate_additive(a_fe, c_sig, cell, cut_off, cut_off_type, deltat, height, input_scaling,
laod_path, model_params, noise_added, stim_type_afe, stimulus_length,
var_type, variance)
RAM = RAM_afe + RAM_noise
stim_type = stim_type_noise
elif ('additiv' in var_type):
# here for not additive so basically for just a_fe != 0
RAM, height = generate_noise(input_scaling, a_fe, stim_type_noise, model_params, stimulus_length, deltat,
noise_added,
cut_off, cell, c_sig, var_type, height=height,
load_path=laod_path)
RAM_afe = np.zeros(len(RAM))
RAM_noise = RAM
stim_type = stim_type_noise
else:
# here for not additive so basically for just a_fe != 0
if a_fe != 0:
RAM, height = generate_noise(input_scaling, a_fe, stim_type_afe, model_params, stimulus_length, deltat,
noise_added,
cut_off, cell, c_sig, var_type, load_path=laod_path)
RAM = RAM / np.sqrt(np.var(RAM))
RAM = RAM * a_fe
else:
RAM = np.zeros(int(stimulus_length / deltat))
stim_type = stim_type_afe
RAM_afe = RAM
RAM_noise = np.zeros(len(RAM)) # np.zeros(len(RAM))
# embed()
plot = False
if plot:
plt.plot(np.abs(np.fft.fft(RAM)))
plt.show()
plt_noise2()
# embed()
return stim_type, RAM, height, RAM_afe, RAM_noise
def generate_additive(a_fe, c_sig, cell, cut_off, cut_off_type, deltat, height, input_scaling, laod_path, model_params,
noise_added, stim_type_afe, stimulus_length, var_type, variance):
RAM_afe, height = generate_noise(input_scaling, a_fe, stim_type_afe, model_params, stimulus_length, deltat,
noise_added,
cut_off, cell, c_sig, var_type, height=height,
load_path=laod_path)
RAM_afe = RAM_afe / np.sqrt(np.var(RAM_afe))
RAM_afe = RAM_afe * a_fe
return RAM_afe, height
def name_for_cv_presaved(save_name_here_cv, burst_corr, trial_nrs_base):
save_name_here_cv = save_name_here_cv.replace('TrialsStim_' + str(trial_nrs_base), '')
save_name_here_cv = save_name_here_cv.replace(burst_corr, '')
save_name_here_cv = save_name_here_cv + '_cvtable'
return save_name_here_cv
def load_cv_vals_susept(cells, names_keep=[], EOD_type='', path_sp='/calc_base_data-base_frame_EOD1__overview.pkl',
frame_general=True, redo=False):
path = load_folder_name('calc_base') + '/calc_base_data-base_frame.pkl'
path_spikes = load_folder_name('calc_base') + path_sp # '+end+'
# frame = load_cv_table(path)
load_function = find_load_function()
load_function = load_function + '-'
names = ['spikes', 'EOD'] # , 'EOD_synch']
save_name_spikes = load_function + 'base_frame_all_arrays'
cont = False
if os.path.exists(save_name_spikes + '.csv'):
names_now = []
frame_spikes = load_object_to_pandas(names_now, save_name_spikes)
for cell in cells:
if cell not in np.unique(frame_spikes.cell):
cont = True
else:
cont = True
# embed()
if cont | (redo == True):
print('redoing it')
frame = load_cv_table(path)
# have an extra path for spikes because the are not downsmaple
# as in only path!
frame_sp = load_cv_table(path_spikes)
frame_spikes = frame_sp[frame_sp.cell.isin(cells)]
# frame_spikes = frame_sp[frame_sp.cell.isin(cells)]
# embed()
keys_keep = frame.keys()[frame.memory_usage(index=False, deep=True) < 1000000]
if names_keep == 'all':
frame = frame[keys_keep]
load_function_big = ''
elif len(names_keep) == 0:
load_function_big = 'no'
frame = []
else:
frame = frame[names_keep]
load_function_big = load_function
# embed()
if len(frame_spikes) > 0:
# embed()
if 'synch' in EOD_type:
for i in range(len(frame_spikes)):
# embed()
# try:
if 'EOD' in frame_spikes.keys():
eod = frame_spikes.EOD.iloc[i][0][0]
if not np.isnan(np.max(eod)):
try:
if not np.isnan(np.max(eod)):
a_fr = (np.max(eod[0]) + np.abs(
np.min(eod[0]))) / 2
except:
print('afe thing')
embed()
time, time_fish_r, eod_fish_r, ff_first, eod_fr_data_first, pp_first_not_log, eod_fish_r_first, p_array_new_first, f_new_first = load_waves(
4068 * 4, frame_spikes.cell.iloc[i], a_fr=a_fr, stimulus_length=0.2,
sampling=frame_spikes.sampling.iloc[i] / frame_spikes.downsample.iloc[i],
eod_fr=frame_spikes.EODf.iloc[i], global_eod=eod)
# sampling_rate = frame_cell.sampling.iloc[0]
ds = int(frame_spikes.downsample.iloc[i])
# embed()
frame_spikes = save_structure_to_frame(frame_spikes.index[i], frame_spikes,
eod_fish_r[0::ds],
name='EOD_synch')
test = False
if test:
test_frame()
if load_function_big != 'no':
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
if version_comp == 'develop':
# if subfolder != '':
################################
# das machen wir einmal
if frame_general:
frame.to_csv(load_function_big + 'base_frame_all.csv')
else:
frame = frame_spikes
################################
# das machen wir für die konkreten Zellen
# frame_spikes.to_pickle(load_function+'base_frame_all_spikes.pkl')
frame_spikes.to_csv(load_function + 'base_frame_all_spikes.csv')
# hier save ich die spikes extra
# desired_val = get_array_from_pandas(frame_spikes['spikes']) # 'isf'
# save_name_spikes = load_function + 'base_frame_all_arrays'
if 'EOD' not in frame_spikes.keys():
names = ['spikes'] # , 'EOD']
print(
'you changed the cell choice!\n rerun the cell choice on anaconda \n with calc_base() and the setting firings_save = _EOD1_ and do the intro files in cells = spikes_for_desired_cells(firings_save, names = [intro],data_names = [])')
# embed()
save_object_from_frame(frame_spikes, names, save_name_spikes)
frame_spikes = load_object_to_pandas(names, save_name_spikes)
# embed()
# frame_spikes.to_pickle(load_function + 'base_frame_all_spikes.pkl')
else:
print('load')
try: # save_name.replace('.csv', '') + '.csv'
frame_spikes = load_object_to_pandas(names, save_name_spikes)
except:
print('parse thing')
embed()
# embed()
if frame_general:
if os.path.exists(load_function + 'base_frame_all.csv'):
frame = pd.read_csv(load_function + 'base_frame_all.csv')
else:
frame = pd.read_csv('base_frame_all.csv')
else:
frame = frame_spikes
return frame, frame_spikes
def save_object_from_frame(frame_spikes, names, save_name):
frame_spikes.to_csv(save_name.replace('.csv', '') + '.csv')
for name in names:
array = np.array(frame_spikes[name])
np.save(save_name.replace('.csv', '') + '_' + name + '.npy', array)
# embed()
def load_object_to_pandas(names, save_name, index_col=1):
if index_col == 0:
frame_spikes = pd.read_csv(save_name.replace('.csv', '') + '.csv', index_col=0)
else:
frame_spikes = pd.read_csv(save_name.replace('.csv', '') + '.csv')
for name in names:
if name in frame_spikes.keys():
try:
array_pure = np.load(save_name.replace('.csv', '') + '_' + name + '.npy', allow_pickle=True)
except:
array_pure = np.load(save_name.replace('.csv', '') + name + '.npy', allow_pickle=True)
frame_spikes[name].astype(object)
# embed()
try:
frame_spikes[name] = array_pure
except:
print('assign thing')
embed()
# embed()
return frame_spikes
def RAM_norm(stack_plot, trials_stim=None, D_c_sig=None, model_show=[], bias_factor=1):
# bias_facto: Default 1. Es gibt Zellen die wurden zu sensitiv gefittet, da muss man den contrast hier für die
# Susceptiblitätsberechnung manuell anpassen
if len(model_show) > 0:
# embed()
if 'isf_psd' in model_show.keys():
# DAS IST RICHTIG!
# RICHTIGE VERSION
# embed()
print('did the right norm with psd!')
if not trials_stim:
# print('trials stim not there')
trials_stim = model_show.trial_nr.unique()[0]
isf_mean = np.array(model_show['isf_psd'] / bias_factor) / trials_stim
norm_char2 = norm_power(isf_mean, stack_plot)
norm = 1 / norm_char2
# RAM_norm_data
# embed()
# hier kommt das gleiche normen hin wie in den Daten
else:
# das hier wollen wir vermeiden
print('pay attention this is not desired norm!')
norm = norm_based_on_D(D_c_sig)
else:
# embed()
# das hier wollen wir vermeiden
print('pay attention this is not desired norm!')
# embed()
norm = norm_based_on_D(D_c_sig)
stack_plot = (np.abs((stack_plot.astype('complex'))) / trials_stim) #
stack_plot = stack_plot * norm
test = False
if test:
deriv_test()
# embed()
# ((np.abs((stack_plot) * norm)) ** 2 / trials_stim)
# stack_plot = ((np.abs((stack_plot) * norm)) ** power / trials_stim) #
return stack_plot
def norm_based_on_D(D_c_sig):
# Formel stimmt halt
# wir passen das alles an das es so ist wie hier
# doch das passt schon so
power_spektrum = ((2 * (2 * D_c_sig) ** 2))
# power_spektrum = ((2 * (D_c_sig) ** 2))
norm = 1 / power_spektrum # * stimulus_length input_scaling* *300/2000*stimulus_length
# stack_plot = ((np.abs((stack_plot) * norm)) ** power / trials_stim) #
# stack_plot = ((np.abs((stack_plot) * norm)) ** 1 / trials_stim) #
# embed()
return norm
def plt_RAM_perc(ax, perc, stack_plot, pcolor=False):
if pcolor:
if perc == 'perc':
im = ax.pcolormesh(
np.array(list(map(float, stack_plot.columns))), np.array(stack_plot.index),
np.abs(stack_plot), vmin=np.percentile(stack_plot, 5), vmax=np.percentile(stack_plot, 95),
cmap='viridis',
rasterized=True) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
elif perc == '10':
im = ax.pcolormesh(
np.array(list(map(float, stack_plot.columns))), np.array(stack_plot.index),
np.abs(stack_plot), vmin=np.min(np.array(stack_plot)) * 10,
vmax=np.max(np.array(stack_plot)) / 10,
cmap='viridis',
rasterized=True) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
elif perc == '100':
im = ax.pcolormesh(
np.array(list(map(float, stack_plot.columns))), np.array(stack_plot.index),
np.abs(stack_plot), vmin=np.min(np.array(stack_plot)) * 100,
vmax=np.max(np.array(stack_plot)) / 100,
cmap='viridis',
rasterized=True) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
elif perc == '1000':
im = ax.pcolormesh(
np.array(list(map(float, stack_plot.columns))), np.array(stack_plot.index),
np.abs(stack_plot), vmin=np.min(np.array(stack_plot)) * 1000,
vmax=np.max(np.array(stack_plot)) / 1000,
cmap='viridis',
rasterized=True) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
elif perc == 'no':
im = ax.pcolormesh(
np.array(list(map(float, stack_plot.columns))), np.array(stack_plot.index),
np.abs(stack_plot),
cmap='viridis',
rasterized=True) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
else:
im = ax.pcolormesh(
np.array(list(map(float, stack_plot.columns))), np.array(stack_plot.index),
np.abs(stack_plot),
cmap='viridis',
rasterized=True) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
else:
if perc == 'perc':
im = ax.imshow(stack_plot, origin='lower',
extent=[np.min(stack_plot.columns), np.max(stack_plot.columns),
np.min(stack_plot.index), np.max(stack_plot.index)],
vmin=np.percentile(stack_plot, 5), vmax=np.percentile(stack_plot, 95),
cmap='viridis', )
elif perc == '10':
im = ax.imshow(stack_plot, origin='lower',
extent=[np.min(stack_plot.columns), np.max(stack_plot.columns),
np.min(stack_plot.index), np.max(stack_plot.index)],
vmin=np.min(np.array(stack_plot)) * 10,
vmax=np.max(np.array(stack_plot)) / 10,
cmap='viridis', )
elif perc == '100':
im = ax.imshow(stack_plot, origin='lower',
extent=[np.min(stack_plot.columns), np.max(stack_plot.columns),
np.min(stack_plot.index), np.max(stack_plot.index)],
vmin=np.min(np.array(stack_plot)) * 100,
vmax=np.max(np.array(stack_plot)) / 100,
cmap='viridis', ) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
elif perc == '1000':
im = ax.imshow(stack_plot, origin='lower',
extent=[np.min(stack_plot.columns), np.max(stack_plot.columns),
np.min(stack_plot.index), np.max(stack_plot.index)],
vmin=np.min(np.array(stack_plot)) * 1000,
vmax=np.max(np.array(stack_plot)) / 1000,
cmap='viridis', ) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
elif perc == 'no':
im = ax.imshow(stack_plot, origin='lower',
extent=[np.min(stack_plot.columns), np.max(stack_plot.columns),
np.min(stack_plot.index), np.max(stack_plot.index)],
cmap='viridis', ) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
else:
im = ax.imshow(stack_plot, origin='lower',
extent=[float(np.min(stack_plot.columns)), float(np.max(stack_plot.columns)),
float(np.min(stack_plot.index)), float(np.max(stack_plot.index))],
cmap='viridis', ) # 'Greens'#vmin=np.percentile(np.abs(stack_plot), 5),vmax=np.percentile(np.abs(stack_plot), 95),
return im
def plt_50_Hz_noise(ax, cutoff, power_noise_color='blue', ):
# horizontal lines
cutoff = float(cutoff)
ax.plot([0, 25], [50, 50], color=power_noise_color, linestyle='--')
ax.plot([0, 25], [100, 100], color=power_noise_color, linestyle='--')
ax.plot([0, 25], [150, 150], color=power_noise_color, linestyle='--')
ax.plot([0, 25], [200, 200], color=power_noise_color, linestyle='--')
ax.plot([0, 25], [250, 250], color=power_noise_color, linestyle='--')
# vertical lines
ax.plot([50, 50], [cutoff, cutoff - 25], color=power_noise_color, linestyle='--')
ax.plot([100, 100], [cutoff, cutoff - 25], color=power_noise_color, linestyle='--')
ax.plot([150, 150], [cutoff, cutoff - 25], color=power_noise_color, linestyle='--')
ax.plot([200, 200], [cutoff, cutoff - 25], color=power_noise_color, linestyle='--')
ax.plot([250, 250], [cutoff, cutoff - 25], color=power_noise_color, label='50 Hz Artefact', linestyle='--')
def plt_triangle(ax, fr, fr_stim, cutoff, eod_fr=750, eod_fr_half_color='darkorange', line_length=1 / 4, lines=False,
fr_color='magenta', eod_metrice=True, nr=3, stim_triangle=False,
eod_fr_color='crimson', fr_stim_color='darkred'): # [1, 0.4, 0]
# print(fr_color)
# half_triangle(ax, counter, fr, color = 'red', label = 'Sum Fr')
# embed()
if nr > 0:
quater_triangle(ax, fr, cutoff, color=fr_color, label='Baseline Fr Triangle ', line_length=line_length)
if nr > 1:
quater_triangle(ax, fr * 2, cutoff, color=fr_color, label='')
if nr > 2:
quater_triangle(ax, fr * 3, cutoff, color=fr_color, label='')
if stim_triangle:
quater_triangle(ax, fr_stim, cutoff, color=fr_stim_color, label='Stimulus Fr Triangle ')
quater_triangle(ax, fr_stim * 2, cutoff, color=fr_stim_color, label='')
quater_triangle(ax, fr_stim * 3, cutoff, color=fr_stim_color, label='')
if eod_metrice:
ax.plot([0, eod_fr / 4], [eod_fr / 2, eod_fr / 4, ], color=eod_fr_half_color,
label='', linestyle='--')
ax.plot([0, eod_fr / 4], [eod_fr / 2, eod_fr / 4, ], color=eod_fr_half_color,
label='EODfr/2 Triangle', linestyle='--')
ax.plot([0, eod_fr / 2], [eod_fr, eod_fr / 2, ], color=eod_fr_color,
label='EODfr Triangle', linestyle='--')
ax.plot([0, eod_fr / 2], [eod_fr, eod_fr / 2, ], color=eod_fr_color,
label='', linestyle='--')
if lines:
plt_50_Hz_noise(ax, cutoff, power_noise_color='blue', )
def find_row_col(full_name, row=0, col=0):
if row == 0:
row = int(np.round(np.sqrt(len(full_name))))
if col == 0:
col = int(np.ceil(len(full_name) / row))
else:
row = int(np.ceil(len(full_name) / col))
return col, row
def cut_title(score_name, datapoints=18):
title = "\n".join(wrap(score_name, datapoints))
return title
def D_derive(model_show, save_name, c_sig, base='', D='', nr=''):
# embed()
var = model_show.var_RAM.iloc[0]
cut_off = int(save_name.split('cutoff1_')[1].split('_')[0])
if 'var_based' in base:
# richtig
# D_derived = var / (4 * c_sig * cut_off)
# if nr == '':
#
# D_derived = var / (4 * cut_off)
# else:
# D_derived = var/ model_show['trial_nr'].iloc[0] / (4 * cut_off)
# das mit dem Cut off kommt auch weg
# keine Ahnung ob das jetzt stimmt mit dem Cut off ich lass das mal
if nr == '':
D_derived = var / (4 * cut_off)
var = var
else:
D_derived = var / model_show['trial_nr'].iloc[0] / (4 * cut_off)
var = var / model_show['trial_nr'].iloc[0] # / (4 * cut_off)
else:
# die zwei Sachen sind halt nicht das gleiche, weil das obere da wurde über die varianz gemittelt
# und unten über die standard abweichung und das ist halt falsch üebr die standardabweichung zu mitteln!
# das hätte halt die varianz sein müssen, über std mittelt man halt nicht!
# falsch
# ok doch das ist halt schon richtig hier
# das ist die absolute höhe und da die sich nicht ändert kann man das hier so machen
# ja doch für den Egerland Noise stimmt das schon!
# und das c_sig ist meistens 1 also wenn man den Input so nimmt wie er ist und dnicht das D
# wenn man das D nimmt muss man das natürlich anpassen
try:
d = model_show['d_isf_all'].iloc[0] / model_show['trial_nr'].iloc[0]
except:
d = D
D_derived = d * c_sig
# embed()
return D_derived, var, cut_off
def plot_eod_waveform():
plt.subplot(2, 2, 1)
plt.plot(time_array, global_eod)
plt.subplot(2, 2, 2)
nfft = 4000 * 4
p, f = ml.psd(global_eod - np.mean(global_eod), Fs=sampling_data, NFFT=nfft,
noverlap=nfft // 2)
plt.xlim([0, 2000])
plt.plot(f, p)
plt.subplot(2, 2, 3)
plt.plot(mean_eod[:, 0], mean_eod[:, 1])
plt.subplot(2, 2, 4)
nfft = 4000
p, f = ml.psd(mean_eod[:, 1] - np.mean(mean_eod[:, 1]), Fs=sampling_data, NFFT=nfft,
noverlap=nfft // 2)
plt.plot(f, p)
plt.xlim([0, 2000])
plt.show()
def find_harmonics_new(global_eod, sampling_data, eod_fr, ff, pp, time_array, normalized_global_eod,
fish_morph_harmonics_var='analyzed'):
# function calling the thunderfish toolbox and sorting the output
# input
# sampling_data -- sampling of the data
# eod_fr -- the expected eodf of this data array
# ff -- frequency of the forier analysis
# pp -- power of the spectral analysis
# time_array -- time of the baseline
# baseline_array -- the baseline array
# in case you want to do the harmonic group manually then you get the sixth column with the initial power spectre for comparison
if 'wave' in fish_morph_harmonics_var:
# THIS IS NOT WHAT WE WANT
# ok so this kind of makes the mean eod waveform and reduces the size of the abstract, doenst seem to be a good idea
input, eod_times = eodanalysis.eod_waveform(global_eod, sampling_data, time_array)
# embed()
nfft = 500
# embed()
if 'pchange' in fish_morph_harmonics_var:
pp, ff = ml.psd(input[:, 1] - np.mean(input[:, 1]), Fs=sampling_data, NFFT=nfft,
noverlap=nfft // 2)
plot = False
if plot == True:
test_wave(ff, input, pp, sampling_data)
else:
# THIS IS WHAT WE WANT
# here we can directly take the wave form as input its just normalized
input = np.transpose(np.array([time_array, normalized_global_eod]))
plot = False
if plot == True:
plot_eod_waveform()
# harmonic and not yield more or less the same results, i think jan said that harmonic is better here
if 'harmonic' in fish_morph_harmonics_var:
# TAKE THIS ONE (since the eodf can change but here lets retrieve the harmonics directly!)
# retrieve the peaks with restricting them to the eod_fr since we know it and dont want to be confused with other harmonic groups
# embed()
try:
harmon = harmonics.harmonic_groups(ff, pp, min_freq=eod_fr * 0.9, max_freq=eod_fr * 1.1)
except:
harmon = harmonics.harmonic_groups(ff, pp, min_freq=eod_fr * 0.9, max_freq=eod_fr * 1.1)
# in case the restriction still allowed for several harmonic groups find the closest to the expected f
if len(harmon[0]) > 1:
fundamental = [[]] * len(harmon[0])
for i in range(len(harmon[0])):
fundamental[i] = harmon[0][i][0, 0]
theright_harmonics = harmon[0][np.argmin(np.abs(np.array(fundamental) - eod_fr))]
else:
theright_harmonics = harmon[0]
# if len(harmon)
morphed_waveform = eodanalysis.analyze_wave(input, theright_harmonics[0], n_harm=10,
power_n_harmonics=0, n_harmonics=3, flip_wave='none')
# else if you just have the eodfr
else:
harmon = []
morphed_waveform = eodanalysis.analyze_wave(input, int(eod_fr), n_harm=10,
power_n_harmonics=0, n_harmonics=3, flip_wave='none')
# this variable is how good the new and code_old wave look like
fit_between_waves = morphed_waveform[0]
spec_data = morphed_waveform[2]
# if fish_morph_harmonics_var == 'harmon':
# # ok this is a toy thing to see if the difference considering the second EOD multiple is significant!
# amp = np.sqrt(spec_data[:, 6]) / np.max(np.sqrt(spec_data[:, 6]))
# else:
# 'amplitude relative to the fundamental'
amp = spec_data[:, 3]
# phase I here yet just reproduce with the analyze function
phase = spec_data[:, 5]
amp2 = amp * 1
phase2 = phase * 1
# embed()
# plot function to check how the different outputs of the thunderfish functions fit to the initial power spectrum
if plot == True:
plot_different_harmonic_representation(fit_between_waves, normalized_global_eod, time_array, sampling_data, ff,
pp,
theright_harmonics, spec_data)
return input, harmon, morphed_waveform, fit_between_waves, spec_data, amp, phase
def plot_different_harmonic_representation(fit_between_waves, baseline_array, time_array, sampling_data, ff, pp,
theright_harmonics, spec_data):
plt.subplot(2, 5, 1)
plt.title('original before function')
plt.plot(ff, pp / (np.max(pp)))
plt.scatter(theright_harmonics[0][:, 0], theright_harmonics[0][:, 1] / np.max(theright_harmonics[0][:, 1]))
plt.subplot(2, 5, 2)
plt.title('amplitude relative to the fundamental')
ref = (np.max(pp))
pp_new = 10 * np.log10(pp / ref)
plt.plot(ff, np.sqrt(pp) / (np.max(np.sqrt(pp))))
plt.scatter(spec_data[:, 1], spec_data[:, 3])
plt.subplot(2, 5, 3)
plt.title('amplitude')
ref = (np.max(pp))
pp_new = 10 * np.log10(pp / ref)
plt.plot(ff, np.sqrt(pp) / (np.max(np.sqrt(pp))))
plt.scatter(spec_data[:, 1], spec_data[:, 2])
plt.subplot(2, 5, 4)
plt.title(' power of harmonics relative to fundamental in decibel')
ref = (np.max(pp))
pp_new = 10 * np.log10(pp / ref)
plt.plot(ff, pp_new)
plt.scatter(spec_data[:, 1], spec_data[:, 4])
plt.subplot(2, 5, 5)
plt.title(' original')
ref = (np.max(pp))
pp_new = 10 * np.log10(pp / ref)
plt.plot(ff, pp)
plt.scatter(spec_data[:, 1], spec_data[:, 6])
plt.subplot(2, 5, 6)
plt.title('modified base on the harmon group and not on eodanalysis')
ref = (np.max(pp))
pp_new = 10 * np.log10(pp / ref)
plt.plot(ff, np.sqrt(pp) / np.max(np.sqrt(pp)))
plt.scatter(spec_data[:, 1], np.sqrt(spec_data[:, 6]) / np.max(np.sqrt(spec_data[:, 6])))
spectrum = scipy.fft(baseline_array)
# phase = np.angle(spectrum)
# Number of sample points
N = len(time_array)
# Sample spacing
T = 1.0 / sampling_data # f = 800 Hz
# Create a signal
x = np.linspace(0.0, N * T, N)
yf = np.fft.fft(baseline_array)
freq = np.fft.fftfreq(x.size, d=T)
phase_here = [[]] * len(spec_data[:, 0])
amp_here = [[]] * len(spec_data[:, 0])
for i in range(len(spec_data[:, 0])):
index = np.where(np.isclose(freq, spec_data[:, 1][i], atol=1 / (T * N)))
phase_here[i] = np.angle(yf[index[0][0]])
amp_here[i] = np.abs(yf[index[0][0]])
plt.subplot(2, 5, 7)
plt.title('amplitude fft')
plt.scatter(spec_data[:, 1], amp_here / np.max(amp_here))
plt.plot(ff, np.sqrt(pp) / np.max(np.sqrt(pp)))
plt.subplot(2, 5, 8)
plt.title('Phase')
plt.scatter(np.arange(0, len(phase_here), 1), phase_here, color='red', label='my')
plt.scatter(np.arange(0, len(phase), 1), phase, color='blue', label='thunder')
plt.subplot(2, 5, 9)
plt.plot(fit_between_waves[:, 0], fit_between_waves[:, 1])
plt.plot(fit_between_waves[:, 0], fit_between_waves[:, 2])
plt.xlim([0, 0.006])
plt.show()
def thunder_morph_func(phaseshift_fr=0, cell_recording='2013-01-08-aa-invivo-1', eod_fr=750, sampling=20000,
stimulus_length=1, a_fr=1, nfft=4068 * 4, data_dir="../data/cells", sampling_data=40000,
fish_morph_harmonics_var='analyze', global_eod=[]):
# function using the thunderfish toolbox to make a morph on a provided baseline recording with a desired eodf
#
# Input
# time_array -- the corresponding time array for the morph
# cell_recording -- the cell recording you have a baseline activity of the EOD and you want to morph
# eod_fr -- the desired frequency for the EOD, that might deviate from the one in your recording
# sampling -- sampling rate
# stimulus_length -- stimulus length
# a_fr -- amplitude of created array
# data_dir -- default "../data", where you store your baseline recording
# nfft -- nfft for the spectral analysis
# sampling_data -- sampling of your recorded cell default 40000
#
# Output
#
# retrieve the grlobal EOD
if len(global_eod) == 0:
# load the data of the desired cell
# embed()
if os.path.exists(data_dir + '/' + cell_recording):
b = open_files(filename=cell_recording, data_dir=data_dir)
else:
b = open_files(filename=cell_recording, data_dir="../data/Ramona")
# retrieve the baseline
t = find_tags(b, names='baseline')
global_eod = t.retrieve_data('EOD')
global_eod = global_eod[0:10 * sampling_data]
eod_fr_data = b.metadata.sections[0].sections['Subject']['EOD Frequency']
else:
eod_fr_data = eod_fr
t = []
b = []
# calculate its initial power spectral properites
p_initial_array, f_initial_array = ml.psd(global_eod - np.mean(global_eod), Fs=sampling_data, NFFT=nfft,
noverlap=nfft // 2)
if eod_fr == 'equal':
eod_fr = eod_fr_data
# normalized_data = global_eod[0:-1] / np.max(global_eod)
normalized_data = zenter_and_normalize(global_eod, 1)
# make a time array in seconds with the sampling of the data
time_data = np.arange(0, len(normalized_data) / sampling_data, 1 / sampling_data)
# embed()
input, harmon, new, fitting, spec_data, amp, phase = find_harmonics_new(global_eod, sampling_data, eod_fr_data,
f_initial_array, p_initial_array, time_data,
normalized_data,
fish_morph_harmonics_var=fish_morph_harmonics_var)
eod_fish_r = fakefish.wavefish_eods((amp, phase), frequency=eod_fr, samplerate=sampling,
duration=stimulus_length, phase0=phaseshift_fr, noise_std=0.00)
eod_fish_r = zenter_and_normalize(eod_fish_r, a_fr)
p_new_array, f_new_array = ml.psd(eod_fish_r - np.mean(eod_fish_r), Fs=sampling, NFFT=nfft,
noverlap=nfft // 2) #
# embed()
test = False
if test == True:
plt.plot(np.arange(0, stimulus_length, 1 / sampling) / eod_fr, eod_fish_r)
plt.plot(time_data[0:len(eod_fish_r)] / eod_fr_data, normalized_data[0:len(eod_fish_r)])
plt.show()
if test == True:
input = thunder_morph_test_something(amp, cell_recording, eod_fish_r, eod_fr, f_initial_array, f_new_array,
fish_morph_harmonics_var, global_eod, input, normalized_data,
p_initial_array, p_new_array, phase, sampling, sampling_data,
stimulus_length, time_data)
return input, eod_fr_data, global_eod, time_data, eod_fish_r, p_initial_array, f_initial_array, p_new_array, f_new_array, amp, phase, b, t
def eod_fish_r_generation(time_array, eod_fr=750, a_fr=1, stimulus_length=1, phaseshift_fr=0, cell_recording='',
offset_of_zero_wave=0, mimick='no', sampling=20000, fish_receiver='Alepto', deltat=1 / 20000,
nfft=2 ** 14, nfft_for_morph=2 ** 14, fish_morph_harmonics_var='analyzed', beat='beat',
plot=False, test=False):
# WICHTIG: erste drei argumente
# Function to generate the EOD wave of the receiver fish
# Input
# cell_recording -- if any mimick, then this cell recording can be utilized
# offset_of_zero_wave -- if this wave has zero amplitude does it has any offset, if 'zeros' then not
# time_array -- desired time array in seconds
# mimick -- if 'copy_mimick' then stretch the cell_recording to the right EODf, if 'thunderfish' in mimick
# use the fourier analysis provided by thunderfish
# eod_fr -- desired frequency
# sampling -- the sampling
# stimulus_length -- the sitmulus length
# a_fr -- amplitude
# fish_receiver -- species of fish, in case 'thunder' in mimick
# deltat -- reverse of sampling
# nfft -- nfft for fourier anylsis
# nfft_for_morph -- nfft for fourier anylsis of the baseline
#
## Output
# eod_fish_r -- the eod array of the receiver fish
# deltat -- sampling, in case it changed if 'copy' was in mimick, else it will be equal to the input deltat
# eod_fr -- eodf, in case it changed if 'copy' was in mimick, else it will be equal to the input deltat
# time_array -- time_array, in case it changed if 'copy' was in mimick, else it will be equal to the input deltat
if (phaseshift_fr == 'rand') | (phaseshift_fr == 'randALL'):
# embed()
phaseshift_fr = np.random.rand() * 2 * np.pi
# choose offset in case there is no wave
if (a_fr == 0) and (offset_of_zero_wave != 'zeros'):
eod_fish_r = np.ones(len(time_array))
# else create the EOD array
else:
# if not pure sinus
if 'Mimick' in mimick:
# if fish wave form based on thunderfish toolbox and is not already in the wavefish_eod library
if ('Wavemorph' in mimick) and ('Receiver' in mimick):
input, eod_fr_data, data_array_eod, time_data_eod, eod_fish_r, pp, ff, p_array_new, f_new, amp, phase, b, t = thunder_morph_func(
phaseshift_fr, cell_recording, eod_fr, sampling, stimulus_length, a_fr, nfft_for_morph,
fish_morph_harmonics_var=fish_morph_harmonics_var)
# if you want a fish from a species but dont have a sample or want to use the samples provided by thunderfish
elif ('Thunder' in mimick) and ('Wavemorph' not in mimick) and ('Receiver' in mimick):
# embed()
eod_fish_r = fakefish.wavefish_eods(fish_receiver, frequency=eod_fr, samplerate=sampling,
duration=stimulus_length, phase0=phaseshift_fr, noise_std=0.00)
# embed()
if ('Zentered' in mimick) and ('NotZentered' not in mimick):
eod_fish_r = zenter_and_normalize(eod_fish_r, a_fr)
# in case you want to mirrow the array
if 'Mirrowed' in mimick:
eod_fish_r = -eod_fish_r[::-1]
if plot == True:
powerspectraallfish()
# embed()
# in case you want to mimick by stretching the known array and adjusting the sampling
elif 'Copy' in mimick:
eod_fish_r, sampling, eod_fr, cell_recording = mimick_func(time_array, stimulus_length,
eod_fr, a_fr, deltat,
mimick=mimick, data_name=cell_recording, )
# we changed the eod_fish_r we need to figure out the changed paramters
p_array, f = ml.psd(eod_fish_r - np.mean(eod_fish_r), Fs=sampling, NFFT=nfft,
noverlap=nfft // 2) #
# the new eod_fr at the position of the highest peak in the power spectrum
eod_fr = f[np.argmax(p_array)]
# the new deltat depending on the sampling
deltat = 1 / sampling
# also adjusted time array
time_array = np.arange(0, stimulus_length, deltat)
# sometimes due to the stretching there is a difference of one in lenght therefore adjust for same length
if len(time_array) > len(eod_fish_r):
time_array = time_array[0:len(eod_fish_r)]
elif len(time_array) < len(eod_fish_r):
eod_fish_r = eod_fish_r[0:len(time_array)]
else:
# in case of no mimick we just create a sinus with the right frequency
time_fish_r = time_array * 2 * np.pi * eod_fr
eod_fish_r = a_fr * np.sin(time_fish_r + phaseshift_fr)
else:
# in case of no mimick we just create a sinus with the right frequency
if 'chirp' in beat:
sigma = 0.014 / math.sqrt((2 * math.log(10)))
time_new = np.arange(-time_array[-1] / 2, time_array[-1] / 2 + 3, np.diff(time_array)[0])
time_new = time_new[0:len(time_array)]
eod_fish_r = integrate_chirp(a_fr, time_new, eod_fr, 0, 60, sigma)
# eod_fe = 700
# time_fish_e = time_array * 2 * np.pi * eod_fe
# eod_fish_e = a_fr * np.sin(time_fish_e + phaseshift_fr)
else:
time_fish_r = time_array * 2 * np.pi * eod_fr
# embed()
# try:
eod_fish_r = a_fr * np.sin(time_fish_r + phaseshift_fr)
# except:
# print('phaseshift thing')
# embed()
# embed()
# if the amplitude of the signal was zero this would have resulted in a zero arrray, since sometimes there are negative
# zeros here we make them all positive
if (a_fr == 0) and (offset_of_zero_wave == 'zeros'):
eod_fish_r = np.abs(eod_fish_r)
# ok this is for toy testing if all variants give the same results
if test == True:
compare_all_eod_creations()
# embed()
return eod_fish_r, deltat, eod_fr, time_array
def mimick_func(time, stimulus_length, eod_fr, a_fr, deltat, mimick='Mimick',
data_name="2019-09-10-ab-invivo-1"):
data_dir = "../data"
data = ["2019-09-10-ab-invivo-1", '2019-09-23-ae-invivo-1', '2019-10-21-ac-invivo-1', '2019-10-28-ae-invivo-1',
'2020-07-07-aa-invivo-1']
# data_name = '2020-07-07-aa-invivo-1'
# data_name = "2019-09-10-ab-invivo-1"
eod_frs = [[]] * (len(data) + 2)
globs = [[]] * (len(data) + 2)
pps = [[]] * (len(data) + 2)
ffs = [[]] * (len(data) + 2)
testing = False
if testing == True:
mimick_test()
# data_name = data[i]
if os.path.exists(data_dir + '/' + data_name):
b = open_files(filename=data_name, data_dir=data_dir)
else:
b = open_files(filename=data_name, data_dir=load_folder_name('calc_model'))
# b = open_files(filename=data_name, data_dir=data_dir)
t = find_tags(b, names='baseline') # tag
# local_eod = t.retrieve_data('LocalEOD-1')
lu = 50
nfft = 8000
sampling_data = 40000
global_eod = t.retrieve_data('EOD')
global_eod = global_eod[1500:40000 + 1500]
pp, ff = ml.psd(global_eod - np.mean(global_eod), Fs=sampling_data, NFFT=nfft, noverlap=nfft // 2)
eod_fr1 = b.metadata.sections[0].sections['Subject']['EOD Frequency']
sampling = 1 / deltat
if 'Copy' in mimick:
# len(time) * delta
global_eod0 = t.retrieve_data('EOD')
global_eod = global_eod0[
1500:int(stimulus_length * sampling_data + 1500)] # take global EOD in appropriate length
sec = len(global_eod) / sampling_data # see the initial time
fit_in_glob = (sec / (1 / eod_fr1)) # see how often the eod is inside there
sec_new = fit_in_glob * (1 / eod_fr) # see if this is the time the eod is inside how much time you would have
sampling_new = len(global_eod) / sec_new # to explain this time you need a new sampling
global_eod1 = global_eod0[1500:int(stimulus_length * sampling_new) + 1500]
nr = int(np.round(sampling_new / sampling)) # see how many numbers you need to sample
sampling_very_new = sampling_new / nr # new sampling rate
eod_fish_r = global_eod1[0:len(global_eod1):nr] # resample
zentered = (eod_fish_r - (np.max(eod_fish_r) + np.min(eod_fish_r)) / 2)
eod_fish_r = ((zentered / np.max(zentered))) * a_fr
# eod_fish_r = (eod_fish_r / np.max(eod_fish_r)) * a_fr
p, f = ml.psd(eod_fish_r - np.mean(eod_fish_r), Fs=sampling_very_new, NFFT=nfft, noverlap=nfft // 2)
eod_fr = f[np.argmax(p)]
plot = False
if plot == True:
plot_test()
else:
eod_fish_r_mix = [[]] * 8
for l in range(8):
# print(l)
time_fish_r = time * 2 * np.pi * eod_fr * l
height = np.sqrt(pp[np.argmin(np.abs(ff / eod_fr1 - l))])
eod_fish_r_mix[l] = height * np.sin(time_fish_r) # np.max(global_eod)*
eod_fish_r = np.sum(eod_fish_r_mix, axis=0)
sampling_very_new = sampling * 1
# pp, ff = ml.psd(eod_fish_r - np.mean(global_eod), Fs=sampling_data, NFFT=nfft, noverlap=nfft // 2)
eod_fish_r = (eod_fish_r / np.max(eod_fish_r)) * a_fr
plot = False
if plot == True:
test_fish(deltat, eod_fish_r, eod_fr, ff, nfft, pp)
return eod_fish_r, sampling_very_new, eod_fr, data_name
def find_tags(b, names='ficurve'):
for t in b.tags:
if names in t.name.lower():
break
return t
def integrate_chirp(a_fe, time, beat, phase_zero, size, sigma):
I = ((np.pi ** 0.5) / 2) * sp.special.erf(time / sigma) - ((np.pi ** 0.5) / 2) * sp.special.erf(-np.inf)
phase = time * 2 * np.pi * beat + 2 * np.pi * size * sigma * I + phase_zero
eod_fe_chirp = a_fe * np.sin(phase)
return eod_fe_chirp
def open_files(filename=None, data_dir=None):
f = nix.File.open(os.path.join(data_dir, filename, filename + ".nix"), nix.FileMode.ReadOnly)
b = f.blocks[0]
return b
def load_folder_name(name):
# embed()#miniconda
# embed()
version_comp, add_on, name_output1, name_output, save_folder = find_code_vs_not()
# embed()
# lists = []
# for i in range(len(inspect.stack())):
# save_name = inspect.stack()[i][1]
# lists.append(save_name)
if version_comp == 'public':
output = '..'
else:
dicts = {'calc_model_core': add_on + 'calc_model_core',
'calc_model': add_on + 'calc_model',
'calc_beats': add_on + 'calc_beats',
'calc_MSE': add_on + 'calc_MSE',
'calc_phaselocking': add_on + 'calc_phaselocking',
'calc_JAR': add_on + 'calc_JAR',
'threefish': add_on + 'calc_threeFish',
'calc_FI_Curve': add_on + 'calc_FI_Curve',
'calc_base': add_on + 'calc_base',
'calc_RAM': add_on + 'calc_RAM',
'calc_ROC': add_on + 'calc_model',
'calc_vova': add_on + 'calc_vova',
'calc_cocktailparty': add_on + 'calc_cocktailparty',
'data': add_on + '../data/'} # load_folder_name('threefish')+''
# embed()
output = dicts[name]
# if add_on != '':
# dicts['calc_model_core'] = '../'+name_output1 # todo hier noch einfügen falls das file nicht da ist es rüber kopieren
# embed()
return output
def zenter_and_normalize(eod_fish_r, a_fr, normalize=True, old=False):
# Function normalize the time array to the desired height
# old:
if old:
zentered = (eod_fish_r - (np.max(eod_fish_r) + np.min(eod_fish_r)) / 2)
zentered = (eod_fish_r - (np.max(eod_fish_r)) + (np.abs(np.max(eod_fish_r)) + np.abs(np.min(eod_fish_r))) / 2)
if normalize:
eod_fish_r = ((zentered / np.max(zentered))) * a_fr
return eod_fish_r
def mean_eod(freq_whole, amp_whole, amp_point_whole, freq_orig, amp_orig, amp_point_orig):
keys = [k for k in freq_whole]
f_whole = [[]] * len(keys)
a_p_whole = [[]] * len(keys)
a_whole = [[]] * len(keys)
f_orig = [[]] * len(keys)
a_orig = [[]] * len(keys)
a_p_orig = [[]] * len(keys)
# embed()
keys = np.sort(keys)
for k_nr, k in enumerate(keys):
f_whole[k_nr] = np.nanmean(freq_whole[k])
a_whole[k_nr] = np.nanmean(amp_whole[k])
a_p_whole[k_nr] = np.nanmean(amp_point_whole[k])
f_orig[k_nr] = np.nanmean(freq_orig[k])
a_orig[k_nr] = np.nanmean(amp_orig[k])
a_p_orig[k_nr] = np.nanmean(amp_point_orig[k])
return keys, f_whole, a_p_whole, a_whole, f_orig, a_orig, a_p_orig
def load_waves(nfft_for_morph, data_list_short, a_fr=1, stimulus_length=1, sampling=40000, eod_fr=750, global_eod=[]):
cell_recording = data_list_short
time_array = np.arange(0, stimulus_length, 1 / sampling)
fish_morph_harmonics_var = 'analyzed'
# input, eod_fr_data, global_eod, time_data, time_array, eod_fish_r, p_initial_array, f_initial_array, p_new_array, f_new_array, amp, phase, b, t
input, eod_fr_data, data_array_eod, time_data_eod, eod_fish_r, pp, ff, p_array_new, f_new, amp, phase, b, t = thunder_morph_func(
0, cell_recording, eod_fr, sampling, stimulus_length, a_fr, nfft_for_morph, global_eod=global_eod)
return time_array, data_array_eod, eod_fish_r, ff / eod_fr_data, eod_fr_data, pp, eod_fish_r, p_array_new, f_new
def get_maxima(eod, time, eod_indices):
maxima = []
positions = []
for i in range(len(eod_indices) - 1):
start_index = eod_indices[i]
stop_index = eod_indices[i + 1]
maxima.append(np.max(eod[start_index:stop_index]))
positions.append(np.where(eod[start_index:stop_index] == maxima[-1])[0][0] + start_index)
return maxima, positions, time[positions]
def global_maxima(sampling, eod_fr, eod_rectified_up, kind='cubic'):
# period_length = max(len(period_fish_e), len(period_fish_r))
# period_length = len(period_fish_r)
period_length = int(np.round((1 / eod_fr) * sampling))
# embed()
if period_length > len(eod_rectified_up):
maxima_values = np.max(eod_rectified_up)
maxima_index = np.argmax(eod_rectified_up)
maxima_interp = [maxima_values] * len(eod_rectified_up)
else:
split_windows = np.arange(period_length, len(eod_rectified_up), period_length)
splits = np.split(eod_rectified_up, split_windows)
steps = np.arange(0, len(eod_rectified_up), len(splits[0]))
# embed()
try:
maxima_values = np.max(splits[0:-1], 1)
except:
print('maxima problem')
embed()
maxima_index = np.argmax(splits[0:-1], 1)
maxima_index = maxima_index + steps[0:-1]
# maxima_interp = np.interp(np.arange(0, len(eod_rectified_up), 1), maxima_index, maxima_values)
# embed()
# maxima_interp2 = interp1d(maxima_index/sampling, maxima_values, kind=kind, fill_value=(maxima_values[0],maxima_values[-1]),
# bounds_error=False)
# new_maxima = inter(time) # fill_value="extrapolate",
maxima_interp = interpolate(maxima_index / sampling, maxima_values,
np.arange(0, len(eod_rectified_up), 1) / sampling, kind=kind)
return maxima_values, maxima_index, maxima_interp
def interpolate(eod_times, eod_maxima, time, kind='cubic'):
# todo: eventuell den fill value ändern
inter = interp1d(eod_times, eod_maxima, kind=kind, fill_value=(eod_maxima[0], eod_maxima[-1]), bounds_error=False)
new_maxima = inter(time) # fill_value="extrapolate",
# embed()
return new_maxima
def extract_am(eod, eod_time, norm=True, extract='', threshold=0.02, sampling=40000, kind='cubic', eodf=500, sigma=1,
emb=False):
if emb:
embed()
if norm:
eod_norm = zenter_and_normalize(eod, 1)
else:
eod_norm = eod
# eod = eod/np.max(eod)
if 'globalmax' in extract:
stimulus = rectify(eod_norm)
maxima_values, maxima_index, am_interpolated = global_maxima(sampling, eodf, stimulus, kind=kind)
else:
eod_indices, eod_times, filtered = thresh_crossings(eod_norm, eod_time, threshold=threshold,
sigma=sigma, ) # -0.02
maxima, max_pos, max_times = get_maxima(eod_norm, eod_time, eod_indices)
# max_times = np.array(max_times)[np.array(maxima) > 0.15]
# maxima = np.array(maxima)[np.array(maxima) > 0.15]
am_interpolated = interpolate(max_times, maxima, eod_time, kind=kind)
# todo: here noch minimale period einbauen
test = False
if test:
eod_test()
return am_interpolated, eod_norm
def rectify(stimulus):
stimulus_sam_rec = stimulus * 1
stimulus_sam_rec[stimulus_sam_rec < 0.0] = 0.0
return stimulus_sam_rec
def thresh_crossing1(sigma, eod, threshold, time):
if sigma != []:
eod_sm = gaussian_filter(eod, sigma=sigma)
else:
eod_sm = eod
shifted_eod = np.roll(eod_sm, 1)
indices = np.arange(0, len(eod_sm), 1)
try:
eod_indices = indices[(eod_sm >= threshold) & (shifted_eod < threshold)]
except:
print('eod something')
embed()
eod_times = time[eod_indices]
return eod_times, eod_indices, eod_sm, indices, shifted_eod
def thresh_crossings(eod, time, threshold=-0.1, sigma=1, sigma2=60):
# find where thresh is crossed in eod signal to extract time windows for finding maximum
eod_times, eod_indices, eod_sm, indices, shifted_eod = thresh_crossing1(sigma, eod, threshold, time)
if sigma2 != []:
filtered = gaussian_filter(eod_sm, sigma=sigma2) # eodf/sampling
else:
filtered = eod_sm # eodf/sampling
eod_indices_fil = indices[(eod_sm >= filtered) & (shifted_eod < filtered)]
# try:
eod_indices_fil = eod_indices_fil[np.concatenate([[True], np.diff(eod_indices_fil) > 3])]
# except:
# print('threshold thing')
# embed()
# embed()
eod_times_fil = time[eod_indices_fil]
test = False
# embed()
if test:
filtered_cut = filtered[eod_indices_fil]
plt.plot(time, eod_sm, zorder=1)
plt.plot(time, shifted_eod, zorder=1)
plt.plot(time, filtered, color='red')
plt.scatter(eod_times, threshold * np.ones(len(eod_times)), color='black', zorder=2)
plt.scatter(eod_times_fil, filtered_cut, color='green', zorder=3)
plt.show()
# embed()
return eod_indices_fil, eod_times_fil, filtered
def correct_burstiness(hists, spikes_all, eod_fr, eod_fr2, lim=1.5, burst_corr='', ms_factor=1000):
hists2 = []
spikes_ex = []
frs_calc2 = []
for hh, h in enumerate(hists):
# also hier nimmt man einfach all jene spikes die übrig bleiben, nur die erste Verteilung zu nehmen ist je unmöglich
if 'inverse' in burst_corr:
first_true = [False]
first_true.extend(h < lim)
else:
first_true = [True]
first_true.extend(h > lim)
test = False
spike_ex = np.array(spikes_all[hh])[np.array(first_true)]
spikes_ex.append(spike_ex)
# embed()# todo: man könnte das mittel dieser true false arrays machen
try:
hists2.append((np.diff(spike_ex) / ms_factor) / (1 / eod_fr[hh]))
except:
hists2.append((np.diff(spike_ex) / ms_factor) / (1 / eod_fr2[hh]))
frs_calc2.append(len(spike_ex) / (spike_ex[-1] / ms_factor))
return hists2, spikes_ex, frs_calc2
def burst_saved(burst_corr='_burstIndividual_'):
if '2' not in burst_corr:
# embed()
names = {'2010-06-18-ag-invivo-1': 2.5,
'2010-06-18-ar-invivo-1': 2.5,
'2010-06-21-ah-invivo-1': 2.5,
'2010-06-21-av-invivo-1': 2.5,
'2010-07-13-ap-invivo-1': 2.5,
'2010-07-13-ax-invivo-1': 2.5,
'2010-07-13-bc-invivo-1': 2.5,
'2010-08-11-ab-invivo-1': 2.5,
'2010-08-11-ae-invivo-1': 2.5,
'2010-08-11-ak-invivo-1': 2.5,
'2010-08-11-an-invivo-1': 2.5,
'2010-08-11-ao-invivo-1': 2.5,
'2010-08-11-ap-invivo-1': 2.5,
'2010-08-11-aq-invivo-1': 2.5,
'2010-08-11-ar-invivo-1': 2.5,
'2010-08-27-ag-invivo-1': 2.5,
'2010-08-27-ai-invivo-1': 2.5,
'2010-08-31-ab-invivo-1': 2.5,
'2010-08-31-ah-invivo-1': 2.5,
'2010-05-21-ak': 2.5,
'2010-11-08-ai-invivo-1': 2.5,
'2010-11-08-al-invivo-1': 2.5,
'2010-12-07-ab-invivo-1': 2.5,
'2011-02-18-ad-invivo-1': 2.5,
'2011-05-09-ad-invivo-1': 2.5,
'2011-06-09-aa-invivo-1': 2.5,
'2011-09-21-ag-invivo-1': 2.5,
'2012-02-27-ac': 2.5,
'2012-03-08-ag': 2.5,
'2012-03-08-al': 2.5,
'2012-03-08-aj': 3.5,
'2012-03-23-ae': 3.5,
'2012-05-30-ad': 2.5,
'2012-05-10-aa-invivo-1': 4.5,
'2012-11-20-ab': 2.5,
'2013-04-10-ac-invivo-1': 2.5,
'2014-01-10-ae-invivo-1': 4.5,
'2014-01-10-ai-invivo-1': 3.5,
'2014-12-03-ah-invivo-1': 3.5,
'2017-08-11-ac-invivo-1': 3,
'2017-08-11-ad-invivo-1': 2.5,
'2017-10-25-af-invivo-1': 5.5,
'2018-01-09-ab-invivo-1': 2.5,
'2018-01-09-ac-invivo-1': 2.5,
'2018-03-28-ab-invivo-1': 40,
'2018-07-18-ab-invivo-1': 2.5,
'2018-08-14-aj-invivo-1': 2.5,
'2018-08-24-ap-invivo-1': 2.5,
'2018-08-24-ah-invivo-1': 2.5,
'2018-08-24-ao-invivo-1': 2.5,
'2018-08-30-ab-invivo-1': 2.5,
'2018-09-05-ai-invivo-1': 2.5,
'2018-09-06-ae-invivo-1': 3.5,
'2018-09-06-ag-invivo-1': 4.5,
'2018-09-06-ai-invivo-1': 2.5,
'2018-11-14-am-invivo-1': 4.5,
'2018-11-14-af-invivo-1': 4.5,
'2019-02-14-ad-invivo-1': 2.5,
'2019-05-15-ai-invivo-1': 3.5,
'2019-09-10-ac-invivo-1': 2.5,
'2019-09-23-ag-invivo-1': 2.5,
'2019-09-23-aq-invivo-1': 2.5,
'2019-09-21-af-invivo-1': 2.5,
'2019-10-21-af-invivo-1': 2.5,
'2019-10-21-aj-invivo-1': 2.5,
'2019-10-21-av-invivo-1': 2.5,
'2019-10-28-aj-invivo-1': 2.5,
'2019-11-13-ab-invivo-1': 1.5,
'2019-11-18-ak-invivo-1': 2.5,
'2020-08-12-ae-invivo-1': 2.5,
'2020-10-01-ac-invivo-1': 2.5,
'2020-10-01-af-invivo-1': 2.5,
'2020-10-01-ag-invivo-1': 2.5,
'2020-10-20-ab-invivo-1': 2.5,
'2020-10-20-ad-invivo-1': 2.5,
'2020-10-20-ae-invivo-1': 2.5,
'2020-10-21-aa-invivo-1': 2.5,
'2020-10-21-ac-invivo-1': 2.5,
'2020-10-27-ae-invivo-1': 0,
'2020-10-27-ag-invivo-1': 0,
'2020-10-27-ai-invivo-1': 2.5,
'2020-10-29-ah-invivo-1': 2.5,
'2021-06-23-ac-invivo-1': 2.5,
'2021-08-03-aa-invivo-1': 3.5,
'2021-11-04-ai-invivo-1': 2.5,
'2021-11-08-aa-invivo-1': 2.5,
'2021-12-17-ab-invivo-1': 2.5,
'2022-01-05-ab-invivo-1': 2.5,
'2022-01-06-aa-invivo-1': 3.5,
'2022-01-06-ab-invivo-1': 2.5,
'2022-01-06-ac-invivo-1': 2.5,
'2022-01-06-ae-invivo-1': 4.5,
'2022-01-06-ag-invivo-1': 2.5,
'2022-01-06-ah-invivo-1': 3.5,
'2022-01-06-af-invivo-1': 5,
'2022-01-08-ad-invivo-1': 2.5,
'2022-01-08-ah-invivo-1': 2.5,
'2022-01-27-ab-invivo-1': 2.5,
'2022-01-28-aa-invivo-1': 2.5,
'2022-01-28-ac-invivo-1': 4.5,
'2022-01-28-ad-invivo-1': 2.5,
'2022-01-28-af-invivo-1': 2.5,
'2022-01-28-ag-invivo-1': 2.5,
'2022-01-28-ah-invivo-1': 2.5,
'2022-01-28-al-invivo-1': 2.5,
'2022-01-28-am-invivo-1': 3.5,
'2022-02-07-ah-invivo-1': 2.5,
'2022-02-07-ai-invivo-1': 2.5,
'2022-02-08-ao-invivo-1': 2.5,
'2022-02-15-aa-invivo-1': 2.5,
'2022-02-15-ab-invivo-1': 2.5,
'2022-02-15-ac-invivo-1': 2.5,
'2010-06-15-ak-invivo-1': 2.5,
'2010-06-15-ah-invivo-1': 2.5,
'2010-06-18-al-invivo-1': 2.5,
'2010-06-18-aq-invivo-1': 2.5,
'2010-06-21-ad-invivo-1': 2.5,
'2010-06-21-ae-invivo-1': 2.5,
'2010-06-21-al-invivo-1': 2.5,
'2010-06-21-ao-invivo-1': 2.5,
'2010-06-21-ar-invivo-1': 2.5,
'2010-06-21-at-invivo-1': 2.5,
'2010-07-08-aa-invivo-1': 3.5,
'2010-07-13-ai-invivo-1': 2.5,
'2010-07-13-ak-invivo-1': 2.5,
'2010-07-13-al-invivo-1': 3.5,
'2010-07-13-aw-invivo-1': 2.5,
'2010-07-13-az-invivo-1': 2.5,
'2010-07-13-bf-invivo-1': 0.5,
'2010-08-25-ae-invivo-1': 0.5,
'2010-08-25-ah-invivo-1': 0.5,
'2010-09-24-ab-invivo-1': 2.5,
'2010-09-24-ac-invivo-1': 2.5,
'2010-09-24-ad-invivo-1': 2.5,
'2010-09-24-ae-invivo-1': 0.5,
'2010-09-24-af-invivo-1': 2.5,
'2010-11-08-ad-invivo-1': 2.5,
'2010-11-08-ak-invivo-1': 2.5,
'2010-11-26-aa-invivo-1': 3.5,
'2010-11-26-ab-invivo-1': 3.5,
'2010-11-26-ai-invivo-1': 2.5,
'2010-11-26-aj-invivo-1': 2.5,
'2010-11-26-ak-invivo-1': 2.5,
'2011-02-15-ab-invivo-1': 2.5,
'2011-04-19-ab-invivo-1': 2.5,
'2011-04-19-ac-invivo-1': 2.5,
'2011-09-21-ae-invivo-1': 2.5,
'2011-09-21-ah-invivo-1': 2.5,
'2011-09-21-ak-invivo-1': 2.5,
'2011-11-10-af-invivo-1': 2.5,
'2012-04-20-ak-invivo-1': 2.5,
'2012-05-10-ad-invivo-1': 2.5,
'2012-06-05-af-invivo-1': 2.5,
'2012-06-25-af-invivo-1': 2.5,
'2012-06-25-aj-invivo-1': 2.5,
'2012-06-27-aj-invivo-1': 2.5,
'2012-06-27-ak-invivo-1': 2.5,
'2012-06-28-aa-invivo-1': 3.5,
'2012-07-03-aa-invivo-1': 2.5,
'2012-07-03-al-invivo-1': 2.5,
'2012-07-11-ac-invivo-1': 2.5,
'2012-07-11-ae-invivo-1': 2.5,
'2012-08-19-aa-invivo-1': 3.5,
'2012-10-19-ab-invivo-1': 2.5,
'2012-10-23-aa-invivo-1': 3.5,
'2012-10-31-ab-invivo-1': 2.5,
'2012-10-31-af-invivo-1': 2.5,
'2012-11-16-ab-invivo-1': 2.5,
'2012-12-18-ad-invivo-1': 2.5,
'2012-12-21-ag-invivo-1': 2.5,
'2013-01-08-ab-invivo-1': 2.5,
'2013-01-08-ac-invivo-1': 2.5,
'2013-04-10-ae-invivo-1': 2.5,
'2013-04-17-ae-invivo-1': 2.5,
'2013-04-18-aa-invivo-1': 2.5,
'2013-04-18-ab-invivo-1': 2.5,
'2014-01-16-ag-invivo-1': 2.5,
'2014-03-25-aa-invivo-1': 2.5,
'2014-05-21-ab-invivo-1': 2.5,
'2014-05-21-af-invivo-1': 2.5,
'2014-12-03-ad-invivo-1': 2.5,
'2014-12-03-aj-invivo-1': 3.5,
'2014-12-03-ao-invivo-1': 2.5,
'2014-12-10-aa-invivo-1': 3.5,
'2014-12-11-aa-invivo-1': 2.5,
'2015-01-20-ae-invivo-1': 2.5,
'2015-01-20-af-invivo-1': 4.5,
'2015-01-20-ag-invivo-1': 4.5,
'2015-01-20-ah-invivo-1': 4.5,
'2017-07-18-ah-invivo-1': 2.5,
'2017-08-11-aa-invivo-1': 2.5,
'2017-08-11-ab-invivo-1': 2.5,
'2017-10-25-ad-invivo-1': 3.5,
'2017-10-25-ae-invivo-1': 3.5,
'2017-10-25-aq-invivo-1': 4.5,
'2018-01-09-ab-invivo-1': 2.5,
'2018-01-10-ad-invivo-1': 2.5,
'2018-01-10-ae-invivo-1': 2.5,
'2018-01-10-ag-invivo-1': 2.5,
'2018-01-10-aj-invivo-1': 2.5,
'2018-01-10-ak-invivo-1': 2.5,
'2018-01-10-al-invivo-1': 2.5,
'2018-01-12-ae-invivo-1': 2.5,
'2018-01-12-ap-invivo-1': 2.5,
'2018-01-17-ag-invivo-1': 2.5,
'2018-01-19-aa-invivo-1': 2.5,
'2018-01-19-ab-invivo-1': 2.5,
'2018-01-19-ac-invivo-1': 2.5,
'2018-01-19-ae-invivo-1': 2.5,
'2018-01-19-af-invivo-1': 2.5,
'2018-01-19-ah-invivo-1': 2.5,
'2018-01-19-ai-invivo-1': 2.5,
'2018-01-19-ak-invivo-1': 2.5,
'2018-01-19-an-invivo-1': 2.5,
'2018-01-19-ar-invivo-1': 2.5,
'2018-05-08-ab-invivo-1': 2.5,
'2018-06-25-ad-invivo-1': 2.5,
'2018-06-25-ae-invivo-1': 2.5,
'2018-06-25-af-invivo-1': 2.5,
'2018-06-26-af-invivo-1': 2.5,
'2018-07-26-af-invivo-1': 2.5,
'2018-08-24-am-invivo-1': 2.5,
'2018-08-29-aa-invivo-1': 2.5,
'2018-09-05-ab-invivo-1': 2.5,
'2018-09-13-aa-invivo-1': 2.5,
'2018-11-13-af-invivo-1': 2.5,
'2018-11-14-af-invivo-1': 2.5,
'2018-11-14-al-invivo-1': 2.5,
'2018-11-16-ae-invivo-1': 2.5,
'2018-12-17-ae-invivo-1': 2.5,
'2018-12-17-ah-invivo-1': 2.5,
'2018-12-21-ag-invivo-1': 2.5,
'2019-01-24-ac-invivo-1': 2.5,
'2019-01-24-ad-invivo-1': 2.5,
'2019-02-07-aa-invivo-1': 2.5,
'2019-02-11-ak-invivo-1': 2.5,
'2019-02-12-ae-invivo-1': 2.5,
'2019-02-14-ae-invivo-1': 2.5,
'2019-02-14-af-invivo-1': 2.5,
'2019-02-15-af-invivo-1': 2.5,
'2019-02-15-ag-invivo-1': 2.5,
'2019-04-23-af-invivo-1': 2.5,
'2019-04-23-ag-invivo-1': 2.5,
'2019-04-23-ah-invivo-1': 2.5,
'2019-04-23-ai-invivo-1': 2.5,
'2019-05-07-aq-invivo-1': 2.5,
'2019-05-07-au-invivo-1': 2.5,
'2019-05-07-av-invivo-1': 2.5,
'2019-05-07-ax-invivo-1': 2.5,
'2019-05-07-bb-invivo-1': 2.5,
'2019-05-07-by-invivo-1': 2.5,
'2019-05-15-ag-invivo-1': 2.5,
'2019-07-02-ad-invivo-1': 2.5,
'2019-09-23-ac-invivo-1': 2.5,
'2019-09-23-ae-invivo-1': 2.5,
'2019-09-23-ah-invivo-1': 2.5,
'2019-09-23-ak-invivo-1': 2.5,
'2019-10-21-ad-invivo-1': 2.5,
'2019-10-21-ah-invivo-1': 2.5,
'2019-10-21-ar-invivo-1': 2.5,
'2019-10-21-as-invivo-1': 2.5,
'2019-11-18-ah-invivo-1': 2.5,
'2020-06-16-ak-invivo-1': 3.5,
'2020-07-02-ag-invivo-1': 2.5,
'2020-07-07-aa-invivo-1': 2.5,
'2020-07-07-ab-invivo-1': 2.5,
'2020-10-20-af-invivo-1': 2.5,
'2021-06-23-aa-invivo-1': 2.5,
'2021-06-23-ab-invivo-1': 2.5,
'2021-11-05-at-invivo-1': 3.5,
'2022-01-08-ai-invivo-1': 2.5}
else:
names = {'2010-06-15-ak-invivo-1': 2.5,
'2010-06-18-ag-invivo-1': 2.5,
'2010-06-18-al-invivo-1': 2.5,
'2010-06-18-aq-invivo-1': 2.5,
'2010-06-18-ar-invivo-1': 2.5,
'2010-06-21-ad-invivo-1': 2.5,
'2010-06-21-ae-invivo-1': 2.5,
'2010-06-21-ah-invivo-1': 2.5,
'2010-06-21-al-invivo-1': 2.5,
'2010-06-21-ao-invivo-1': 2.5,
'2010-06-21-ar-invivo-1': 2.5,
'2010-06-21-at-invivo-1': 2.5,
'2010-06-21-av-invivo-1': 2.5,
'2010-07-08-aa-invivo-1': 3.5,
'2010-07-13-ai-invivo-1': 2.5,
'2010-07-13-ak-invivo-1': 2.5,
'2010-07-13-ai-invivo-1': 3.5,
'2010-07-13-ap-invivo-1': 2.5,
'2010-07-13-ax-invivo-1': 2.5,
'2010-07-13-aw-invivo-1': 2.5,
'2010-07-13-az-invivo-1': 2.5,
'2010-07-13-bc-invivo-1': 2.5,
'2010-07-13-bf-invivo-1': 0.5,
'2010-08-11-ab-invivo-1': 2.5,
'2010-08-11-ae-invivo-1': 2.5,
'2010-08-11-ak-invivo-1': 2.5,
'2010-08-11-an-invivo-1': 2.5,
'2010-08-11-ao-invivo-1': 2.5,
'2010-08-11-ap-invivo-1': 2.5,
'2010-08-11-aq-invivo-1': 2.5,
'2010-08-11-ar-invivo-1': 2.5,
'2010-08-25-ae-invivo-1': 0.5,
'2010-08-25-ah-invivo-1': 0.5,
'2010-08-27-ag-invivo-1': 2.5,
'2010-08-27-ai-invivo-1': 2.5,
'2010-08-31-ab-invivo-1': 2.5,
'2010-08-31-ah-invivo-1': 2.5,
'2010-09-24-ab-invivo-1': 2.5,
'2010-09-24-ac-invivo-1': 2.5,
'2010-09-24-ad-invivo-1': 2.5,
'2010-09-24-ae-invivo-1': 0.5,
'2010-09-24-af-invivo-1': 2.5,
'2010-11-08-ad-invivo-1': 2.5,
'2010-11-08-ak-invivo-1': 2.5,
'2010-11-08-ai-invivo-1': 2.5,
'2010-11-08-al-invivo-1': 2.5,
'2010-11-26-aa-invivo-1': 3.5,
'2010-11-26-ab-invivo-1': 3.5,
'2010-11-26-ai-invivo-1': 2.5,
'2010-11-26-aj-invivo-1': 2.5,
'2010-11-26-ak-invivo-1': 2.5,
'2010-12-07-ab-invivo-1': 2.5,
'2011-02-15-ab-invivo-1': 2.5,
'2011-02-18-ad-invivo-1': 2.5,
'2011-04-19-ab-invivo-1': 2.5,
'2011-04-19-ac-invivo-1': 2.5,
'2011-05-09-ad-invivo-1': 2.5,
'2011-06-09-aa-invivo-1': 2.5,
'2011-09-21-ae-invivo-1': 2.5,
'2011-09-21-ah-invivo-1': 2.5,
'2011-09-21-ak-invivo-1': 2.5,
'2011-11-10-af-invivo-1': 2.5,
'2012-04-20-ak-invivo-1': 2.5,
'2012-05-10-ad-invivo-1': 2.5,
'2012-06-05-af-invivo-1': 2.5,
'2012-06-25-af-invivo-1': 2.5,
'2012-06-25-aj-invivo-1': 2.5,
'2012-06-27-aj-invivo-1': 2.5,
'2012-06-27-ak-invivo-1': 2.5,
'2012-06-28-aa-invivo-1': 3.5,
'2012-07-03-aa-invivo-1': 2.5,
'2012-07-03-al-invivo-1': 2.5,
'2012-07-11-ac-invivo-1': 2.5,
'2012-07-11-ae-invivo-1': 2.5,
'2012-08-19-aa-invivo-1': 3.5,
'2012-10-19-ab-invivo-1': 2.5,
'2012-10-23-aa-invivo-1': 3.5,
'2012-10-31-ab-invivo-1': 2.5,
'2012-10-31-af-invivo-1': 2.5,
'2012-11-16-ab-invivo-1': 2.5,
'2012-11-20-ab-invivo-1': 3.5,
'2012-12-18-ad-invivo-1': 2.5,
'2012-12-21-ag-invivo-1': 2.5,
'2013-01-08-ab-invivo-1': 2.5,
'2013-01-08-ac-invivo-1': 2.5,
'2013-04-10-ae-invivo-1': 2.5,
'2013-04-17-ae-invivo-1': 2.5,
'2013-04-18-aa-invivo-1': 2.5,
'2013-04-18-ab-invivo-1': 2.5,
'2014-01-10-ae-invivo-1': 4.5,
'2014-01-10-ai-invivo-1': 3.5,
'2014-01-16-ag-invivo-1': 2.5,
'2014-03-25-aa-invivo-1': 2.5,
'2014-05-21-ab-invivo-1': 2.5,
'2014-05-21-af-invivo-1': 2.5,
'2014-12-03-ad-invivo-1': 2.5,
'2014-12-03-aj-invivo-1': 3.5,
'2014-12-03-ao-invivo-1': 2.5,
'2014-12-10-aa-invivo-1': 3.5,
'2014-12-11-aa-invivo-1': 2.5,
'2015-01-20-ae-invivo-1': 2.5,
'2015-01-20-af-invivo-1': 4.5,
'2015-01-20-ag-invivo-1': 4.5,
'2015-01-20-ah-invivo-1': 4.5,
'2017-07-18-ah-invivo-1': 2.5,
'2017-08-11-aa-invivo-1': 2.5,
'2017-08-11-ab-invivo-1': 2.5,
'2017-08-11-ac-invivo-1': 3,
'2017-08-11-ad-invivo-1': 2.5,
'2017-10-25-af-invivo-1': 2.5,
'2017-10-25-ad-invivo-1': 3.5,
'2017-10-25-ae-invivo-1': 3.5,
'2017-10-25-aq-invivo-1': 4.5,
'2018-01-09-ab-invivo-1': 3.5,
'2018-01-09-ab-invivo-1': 2.5,
'2018-01-10-ad-invivo-1': 2.5,
'2018-01-10-ae-invivo-1': 2.5,
'2018-01-10-ag-invivo-1': 2.5,
'2018-01-10-aj-invivo-1': 2.5,
'2018-01-10-ak-invivo-1': 2.5,
'2018-01-10-al-invivo-1': 2.5,
'2018-01-12-ae-invivo-1': 2.5,
'2018-01-12-ap-invivo-1': 2.5,
'2018-01-17-ag-invivo-1': 2.5,
'2018-01-19-aa-invivo-1': 2.5,
'2018-01-19-ab-invivo-1': 2.5,
'2018-01-19-ac-invivo-1': 2.5,
'2018-01-19-ae-invivo-1': 2.5,
'2018-01-19-af-invivo-1': 2.5,
'2018-01-19-ah-invivo-1': 2.5,
'2018-01-19-ai-invivo-1': 2.5,
'2018-01-19-ak-invivo-1': 2.5,
'2018-01-19-an-invivo-1': 2.5,
'2018-01-19-ar-invivo-1': 2.5,
'2018-03-28-ab-invivo-1': 40,
'2018-05-08-ab-invivo-1': 2.5,
'2018-06-25-ad-invivo-1': 2.5,
'2018-06-25-ae-invivo-1': 2.5,
'2018-06-25-af-invivo-1': 2.5,
'2018-06-26-af-invivo-1': 2.5,
'2018-07-18-ab-invivo-1': 2.5,
'2018-07-26-af-invivo-1': 2.5,
'2018-08-24-am-invivo-1': 2.5,
'2018-08-14-aj-invivo-1': 2.5,
'2018-08-24-ap-invivo-1': 2.5,
'2018-08-29-aa-invivo-1': 2.5,
'2018-09-05-ai-invivo-1': 2.5,
'2018-09-05-ab-invivo-1': 2.5,
'2018-09-06-ae-invivo-1': 3.5,
'2018-09-06-ag-invivo-1': 4.5,
'2018-09-06-ai-invivo-1': 2.5,
'2018-09-13-aa-invivo-1': 2.5,
'2018-11-13-af-invivo-1': 2.5,
'2018-11-14-am-invivo-1': 4.5,
'2018-11-14-af-invivo-1': 2.5,
'2018-11-14-al-invivo-1': 2.5,
'2018-11-16-ae-invivo-1': 2.5,
'2018-12-17-ae-invivo-1': 2.5,
'2018-12-17-ah-invivo-1': 2.5,
'2018-12-21-ag-invivo-1': 2.5,
'2019-01-24-ac-invivo-1': 2.5,
'2019-01-24-ad-invivo-1': 2.5,
'2019-02-07-aa-invivo-1': 2.5,
'2019-02-11-ak-invivo-1': 2.5,
'2019-02-12-ae-invivo-1': 2.5,
'2019-02-14-ae-invivo-1': 2.5,
'2019-02-14-af-invivo-1': 2.5,
'2019-02-15-af-invivo-1': 2.5,
'2019-02-15-ag-invivo-1': 2.5,
'2019-04-23-af-invivo-1': 2.5,
'2019-04-23-ag-invivo-1': 2.5,
'2019-04-23-ah-invivo-1': 2.5,
'2019-04-23-ai-invivo-1': 2.5,
'2019-05-07-aq-invivo-1': 2.5,
'2019-05-07-au-invivo-1': 2.5,
'2019-05-07-av-invivo-1': 2.5,
'2019-05-07-ax-invivo-1': 2.5,
'2019-05-07-bb-invivo-1': 2.5,
'2019-05-07-by-invivo-1': 2.5,
'2019-05-15-ai-invivo-1': 3.5,
'2019-05-15-ag-invivo-1': 2.5,
'2019-07-02-ad-invivo-1': 2.5,
'2019-09-10-ac-invivo-1': 2.5,
'2019-09-23-ac-invivo-1': 2.5,
'2019-09-23-ag-invivo-1': 2.5,
'2019-09-23-ae-invivo-1': 2.5,
'2019-09-23-ah-invivo-1': 2.5,
'2019-09-23-ak-invivo-1': 2.5,
'2019-09-23-aq-invivo-1': 2.5,
'2019-09-21-af-invivo-1': 2.5,
'2019-10-21-ad-invivo-1': 2.5,
'2019-10-21-ah-invivo-1': 2.5,
'2019-10-21-af-invivo-1': 2.5,
'2019-10-21-aj-invivo-1': 2.5,
'2019-10-21-ar-invivo-1': 2.5,
'2019-10-21-as-invivo-1': 2.5,
'2019-10-21-av-invivo-1': 2.5,
'2019-10-28-aj-invivo-1': 2.5,
'2019-11-13-ab-invivo-1': 2.5,
'2019-11-18-ak-invivo-1': 2.5,
'2019-11-18-ah-invivo-1': 2.5,
'2020-06-16-ak-invivo-1': 3.5,
'2020-07-02-ag-invivo-1': 2.5,
'2020-07-07-aa-invivo-1': 2.5,
'2020-07-07-ab-invivo-1': 2.5,
'2020-08-12-ae-invivo-1': 2.5,
'2020-10-01-ac-invivo-1': 2.5,
'2020-10-01-af-invivo-1': 2.5,
'2020-10-01-ag-invivo-1': 2.5,
'2020-10-20-ab-invivo-1': 2.5,
'2020-10-20-ad-invivo-1': 2.5,
'2020-10-20-ae-invivo-1': 2.5,
'2020-10-20-af-invivo-1': 2.5,
'2020-10-20-ad-invivo-1': 2.5,
'2020-10-21-aa-invivo-1': 2.5,
'2020-10-21-ac-invivo-1': 2.5,
'2020-10-27-ae-invivo-1': 0,
'2020-10-27-ag-invivo-1': 0,
'2020-10-27-ai-invivo-1': 2.5,
'2020-10-29-ah-invivo-1': 2.5,
'2021-06-23-aa-invivo-1': 2.5,
'2021-06-23-ab-invivo-1': 2.5,
'2021-06-23-ac-invivo-1': 2.5,
'2021-08-03-aa-invivo-1': 3.5,
'2021-11-04-ai-invivo-1': 2.5,
'2021-11-05-at-invivo-1': 3.5,
'2021-11-08-aa-invivo-1': 2.5,
'2021-12-17-ab-invivo-1': 2.5,
'2022-01-05-ab-invivo-1': 2.5,
'2022-01-06-aa-invivo-1': 3.5,
'2022-01-06-ab-invivo-1': 2.5,
'2022-01-06-ac-invivo-1': 2.5,
'2022-01-06-ae-invivo-1': 4.5,
'2022-01-06-ag-invivo-1': 2.5,
'2022-01-06-ah-invivo-1': 3.5,
'2022-01-06-af-invivo-1': 5,
'2022-01-08-ad-invivo-1': 2.5,
'2022-01-08-ah-invivo-1': 2.5,
'2022-01-08-ai-invivo-1': 2.5,
'2022-01-27-ab-invivo-1': 2.5,
'2022-01-28-aa-invivo-1': 2.5,
'2022-01-28-ac-invivo-1': 4.5,
'2022-01-28-ad-invivo-1': 2.5,
'2022-01-28-af-invivo-1': 2.5,
'2022-01-28-ag-invivo-1': 2.5,
'2022-01-28-ah-invivo-1': 2.5,
'2022-01-28-al-invivo-1': 2.5,
'2022-01-28-am-invivo-1': 3.5,
'2022-02-07-ah-invivo-1': 2.5,
'2022-02-07-ai-invivo-1': 2.5,
'2022-02-15-aa-invivo-1': 2.5,
'2022-02-15-ab-invivo-1': 2.5,
'2022-02-15-ac-invivo-1': 2.5,
}
return names
def find_lim_here(data_name, burst_corr):
burst_cutoff = burst_saved(burst_corr)
# embed()
if ('Individual' in burst_corr) | ('individual' in burst_corr):
if data_name in burst_cutoff:
lim_here = burst_cutoff[data_name]
else:
lim_here = 1.5
elif '2.5' in burst_corr:
lim_here = 2.5
else:
lim_here = 1.5
return lim_here
def create_shifted_spikes(eod, len_smoothed_b, len_smoothed, beat, am_corr, sampling_rate, time_b, time, smoothed,
shifts, plot_segment, tranformed_spikes, version='spikes'):
spikes_cut = []
beat_cut = []
all_spikes = []
am_corr_cut = []
error = []
maxima = []
for ii in range(len(shifts)):
# here we do everything in ms
# tranformed_spikes = spikes.iloc[0] - spikes.iloc[0][0]
# tranformed_spikes = spikes.iloc[0] - spikes.iloc[0][0]
lowerbound = (shifts[ii])
upperbound = (shifts[ii]) + plot_segment
if upperbound < tranformed_spikes[-1]:
if version == 'spikes':
used_spikes = tranformed_spikes[(tranformed_spikes > lowerbound) & (tranformed_spikes < upperbound)]
# used_spikes = used_spikes[used_spikes < upperbound]
used_spikes = used_spikes - lowerbound
# change_first = used_spikes-used_spikes[0]
# used_spikes = used_spikes - used_spikes[0]
all_spikes.append(used_spikes)
smoothed_trial = smoothed * 1
# smoothed_trial = smoothed_trial[time > lowerbound ]#+used_spikes[0]
# embed()
len_smoothed, spikes_cut, _ = cut_smoothed(time, ii, spikes_cut, smoothed_trial,
upperbound, lowerbound, len_smoothed)
# embed()
len_smoothed_b, beat_cut, _ = cut_smoothed(time_b, ii, beat_cut, beat, upperbound, lowerbound,
len_smoothed_b)
# embed()
len_smoothed_am, am_corr_cut, _ = cut_smoothed(time_b, ii, am_corr_cut, am_corr, upperbound, lowerbound,
len_smoothed_b)
# embed()
am_corr_rec = beat_cut[ii] * 1
am_corr_rec[am_corr_rec < 0] = 0
maxima_values, maxima_index, maxima_interp = global_maxima(sampling_rate, eod,
am_corr_rec) # global maxima
if ii == 0:
control = maxima_interp
error.append(np.mean((maxima_interp - control) ** 2))
maxima.append(maxima_interp)
return len_smoothed, smoothed_trial, all_spikes, maxima, error, spikes_cut, beat_cut, am_corr_cut
def cut_smoothed(time, ii, smoothed_trials, smoothed_trial, upperbound, lowerbound, len_smoothed):
if ii == 0:
new_time = time * 1
# new_time = new_time[time > lowerbound]
smoothed_trial = smoothed_trial[(time < upperbound) & (time > lowerbound)]
# last_time = new_time[new_time < upperbound]
smoothed_trials.append(smoothed_trial)
len_smoothed = len(smoothed_trial)
else:
smoothed_trial = smoothed_trial[time > lowerbound]
smoothed_trials.append(smoothed_trial[0:len_smoothed])
return len_smoothed, smoothed_trials, smoothed_trial
def prepare_baseline_array(time_array, eod_fr=750, nfft_for_morph=2 ** 14, phaseshift_fr=0, mimick='no', zeros='zeros',
data_name='', sampling=20000, stimulus_length=1, fish_self='Alepto', deltat=1 / 20000,
nfft=2 ** 14, damping_type='', damping='', damping_choice='', damping_gain=1,
a_fr_baseline=1, beat='beat', fish_morph_harmonics_var='analyzed'):
# only first two are important
# function to make a baseline for the later adaptation of the offset we need to know what the initial baseline was
# three baselines are created to see only the difference in case of adapting for a single change (damping, wave, power)
# time_array
# eod_fr - EOD frequency
# mimick - which moprh variant
# zeros - any offset in case of no wave
# data_name - cell to morph the eod from
# sampling - the sampling
# stimulus_length - stimulus length
# fish_self - the fish to morph
# deltat - time step
# nfft - nfft for spectral analysis
# damping_type - which damping type, in case of '' no damping
# damping - factor of damping, the bigger the lower the damping, maximal value 0.75
# us_name - choice if to take the dampned array ('') or the derivative ('us1')
# gain - gain that might be changed for the damping
# a_fr_basline - defualt 1, height of the baseline
# Output: you have four options for the later baseline choice if you want selectevely to test the changes added at
# each step
# baseline_without_wave - pure sinus
# baseline_with_wave - sinus with an EOD waveform modification
# baseline_without_wave_damping - pure sinus with damping
# baseline_with_wave_damping - EOD wave form modification with damping
#
# create the time for the sinusoidal wave and do it with the same baseline amplitude of 1
time_fish_r = time_array * 2 * np.pi * eod_fr
baseline_without_wave = a_fr_baseline * np.sin(time_fish_r)
# this we create if you want a baseline with a wave, here it depends on the variant you chose anyway for morphing
baseline_with_wave, _, _, _ = eod_fish_r_generation(time_array, eod_fr, a_fr_baseline, stimulus_length,
phaseshift_fr, data_name, zeros, mimick, sampling, fish_self,
deltat, nfft, nfft_for_morph,
fish_morph_harmonics_var=fish_morph_harmonics_var, beat=beat)
# embed()
# here we also get a baseline if we eg want to have the damping as baseline and only look at the difference contributed
# by the power modification
if (damping_type == 'damping'):
## do a damping on a pure sinus
# us, baseline_without_wave_damping, std_dump, max_dump, range_dump, \
# _, _, _ = damping_func(
# baseline_without_wave, time_array, damping_type,
# gain, eod_fr, damping, us_name)
# do a damping on a wave with EOD waveform modification
us, baseline_with_wave_damping, std_dump, max_dump, range_dump = damping_func(baseline_with_wave, time_array,
eod_fr, damping, damping_choice,
damping_gain)
else:
# if we have no damping we just equate these variables with their wave counterparts
# baseline_without_wave_damping = baseline_without_wave
baseline_with_wave_damping = baseline_with_wave
return baseline_with_wave_damping, baseline_without_wave
def damping_func(stimulus, time, eod_fr=750, damping='', damping_var_choice='', gain_damping=1):
# classical harmonic occilator function
# change the frequency in radians
wr = eod_fr * 2 * np.pi
# calculate the resonance frequenc
w0 = wr / np.sqrt(1 - 2 * damping ** 2)
# these are some scaling factors
Zm = np.sqrt((2 * w0 * damping) ** 2 + (wr ** 2 - w0 ** 2) ** 2 / wr ** 2)
alphar = wr * Zm
# initial values
x0 = [0, 0]
@jit()
def func(x, t, damping, w0, stimulus, dt):
# the damping function
# the first output is the dampned input, the second is its derivative
dydt = (x[1], stimulus[int(np.round(t / dt))] - 2 * damping * w0 * x[1] - (w0 ** 2) * x[0]) # stimulus[t/dt]
return dydt
output_damping = odeint(func, x0, time, args=(damping, w0, time, stimulus, np.abs(time[0] - time[1])))
output_damping_scaled = output_damping * alphar * gain_damping # and gain default is also 1
# the first is not usually utilized
if damping_var_choice == '_us1_':
stimulus = output_damping_scaled[:, 1]
# usually you take the dampned input as output
else:
stimulus = output_damping_scaled[:, 0] # ok so this is the right one
std_dump = np.std(stimulus[700:-1])
max_dump = np.max(stimulus[700:-1])
range_dump = np.max(stimulus[700:-1]) - np.min(stimulus[700:-1])
return output_damping_scaled, stimulus, std_dump, max_dump, range_dump
def get_spikes(adapt_type, offset, adapt_offset, a_fr, nfft, lower_tol, tt, carrier_RAM, n, cell, trial_nrs, variant,
stimulus_length, deltat, exponential, upper_tol, v_exp, exp_tau, dendrid, noise_added, cut_off, c_sig,
var_type, ref_type, c_noise, model_params, fishe_receiver, phaseshift_fr, nfft_for_morph, eod_fr,
damping_type='', mimick='no', burst_corr='', cell_recording='', us_name='', gain=1,
reshuffled='reshuffled', zeros='ones'):
time_array = np.arange(0, stimulus_length, deltat)
baseline_with_wave_damping, baseline_without_wave = prepare_baseline_array(time_array, eod_fr, nfft_for_morph,
phaseshift_fr, mimick, zeros,
cell_recording, 1 / deltat,
stimulus_length, fishe_receiver, deltat,
nfft, damping_type, '', us_name, gain,
beat='beat',
fish_morph_harmonics_var='analyzed')
# do the model
spike_adapted = [[]] * trial_nrs
spikes = [[]] * trial_nrs
spikes_bef = [[]] * trial_nrs
if a_fr == 0:
variant_here = variant + '_' + zeros
else:
variant_here = variant
# embed()
for t in range(trial_nrs):
if t == 0:
adapt_offset_here = adapt_offset
else:
adapt_offset_here = ''
# embed()
cvs, adapt_output, baseline_after, spike_adapted[
t], rate_adapted, rate_baseline_before, rate_baseline_after, spikes_bef[
t], stimulus_power, v_dent, offset, v_mem_output, noise_final = simulate(cell, offset, carrier_RAM,
cut=False, deltat=deltat,
adaptation_variant=adapt_offset_here,
adaptation_yes_e=0,
adaptation_yes_t=tt,
adaptation_upper_tol=upper_tol,
adaptation_lower_tol=lower_tol,
power_variant=variant_here,
power_nr=n, waveform_damping='',
waveform_damping_baseline=baseline_with_wave_damping,
waveform_mimick=mimick,
reshuffle=reshuffled,
noisesplit_c_noise=c_noise,
noisesplit_noise_added=noise_added,
noisesplit_var_type=var_type,
noisesplit_cut_off=cut_off,
noisesplit_c_sig=c_sig,
LIF_ref_type=ref_type,
LIF_adapt_type=adapt_type,
LIF_dendrid=dendrid,
LIF_exponential=exponential,
LIF_exponential_tau=exp_tau,
LIF_expontential__v=v_exp,
**model_params) # EODf = eod_fr,
# embed()
isi = calc_isi(spikes_bef[t], eod_fr)
spikes[t] = spikes_after_burst_corr(spikes_bef[t], isi, burst_corr, cell, eod_fr, model_params=model_params)
return noise_final, offset, v_mem_output, spikes_bef, rate_adapted, rate_baseline_after, spikes, isi, v_dent
# noise_final_c,offset, v_mem_output,spikes_bef, rate_adapted, rate_baseline_after,spikes, isi, v_dent
def spikes_after_burst_corr(spikes_mt, isi, burst_corr, data_name, eod_fr, model_params=''):
# isi: Interspikeintervalle in EOD mehrfachen
# else:
if 'added' in burst_corr:
# also im Prinzip fülle ich überall hier wo noch platz ist burst auf bis zu der Burst number
lim_here = find_lim_here(data_name, burst_corr)
# if np.min(isi) < lim_here:
# # todo hier noch auffüllen
# embed() # _burst_added_
# else:
# embed() # _burst_added_
try:
burst_number = int(burst_corr.split('added')[-1][0])
except:
print('burst nr problem')
embed()
# minimal_distance = burst_number * (1 / eod_fr)+model_params.ref_period
# minimale ISI in EODf mehrfachen, damit es keinen overlap mit dem nächsten Spike gibt
minimal_distance = burst_number + model_params.ref_period / (1 / eod_fr)
# also hier nehmen wir erstmal nur die spikes die die erstmn vom brust sind
# also dieses first true bezieht sich auf das interval left
# das interval links schaut dass es der erste Spike vom burst ist
interval_left = [True]
interval_left.extend(isi > lim_here)
# und das interval rechts schaut dass es nicht schon einen Burst rechts gibt
# es ist quasi um eins gerollt
interval_right = list(isi > lim_here)
interval_right.extend([True])
# und dann kommt noch dazu dass wir Platz haben für die Bursts und die Refraktär Zeit, dass die spikes sich nicht
# überlappen
interval_right_ref = list(isi > minimal_distance)
interval_right_ref.extend([True])
# hier wird die Anzahl der burst spikes in einen array umgesetzt
if 'only' in burst_corr:
range_en = [burst_number]
else:
range_en = range(1, burst_number + 1)
# nur die spikes auf die alle diese drei Konditions zutreffen werden nun verwendet
# für die Burst generierung
spike_ex = np.array(spikes_mt)[
np.array(interval_left) & np.array(interval_right) & np.array(interval_right_ref)]
spikes = [spikes_mt]
for n in range_en:
# print(n)
# die burst spikes werden angehangen
spikes.append(spike_ex + n * (1 / eod_fr))
spikes = np.concatenate(spikes)
spikes_new = np.array(np.sort(spikes))
# todo: checken ob das jetzt so war oder nicht
# keine bursts über die ursprünglichen spikes hinaus!
spikes_new = spikes_new[spikes_new < spikes_mt[-1]]
spikes_mt = spikes_new
# embed()
elif 'burst' in burst_corr:
lim_here = find_lim_here(data_name, burst_corr)
if np.min(isi) < lim_here:
hists2, spikes_ex, frs_calc2 = correct_burstiness([isi], [spikes_mt],
[eod_fr], [eod_fr], lim=lim_here, burst_corr=burst_corr)
spikes_mt = spikes_ex[0]
# frame.loc[position, 'burst_corr'] = True
# todo: ok doch das war falsch mit den unteren Teil das sollte man irgendwie ausbessern mit einer neuen Version
# todo alle correct burstiness sollten part einer solchen function sein..
# print(burst_corr)
return spikes_mt
def noise_c2r(random_phases, height, datapoints, cut_high, cut_low, deltat):
# white noise in frequency space has constant amplitude, random phase
# Hier zieht man die phase random
try:
noise_bef_cut_off = height * (np.cos(2 * np.pi * random_phases) + 1j * np.sin(2 * np.pi * random_phases))
except:
print('height problem')
embed()
noise_aft_cut_aft = np.ones(len(noise_bef_cut_off)) * 1 # noise_bef_cut_off*1#
# Und setzt dann im Anschluss den imaginär Teil auf Null
noise_bef_cut_off[0] = height * (np.cos(2 * np.pi * random_phases))[0]
noise_bef_cut_off[int(datapoints / 2)] = height * (np.cos(2 * np.pi * random_phases))[int(datapoints / 2)]
noise_bef_cut_off[int(cut_high)] = height * (np.cos(2 * np.pi * random_phases))[int(cut_high)]
# alles kleiner und größer als der Cut-off auf Null
indeces = np.arange(0, len(noise_bef_cut_off), 1)
noise_bef_cut_off[(indeces < cut_low)] = 0
noise_bef_cut_off[(indeces > cut_high)] = 0
# hier generiere ich den noise nach dem Cut-off
noise_aft_cut_aft[0] = height * (np.cos(2 * np.pi * random_phases))[
0] # todo. das müsste man genau so ziehen und setzt dann im anschluss den imaginär teil auf null, die amplitude bleibt gleich
noise_aft_cut_aft[int(datapoints / 2)] = height * (np.cos(2 * np.pi * random_phases))[int(datapoints / 2)]
noise_aft_cut_aft[int(cut_high)] = height * (np.cos(2 * np.pi * random_phases))[int(cut_high)]
# und setzte alles höher und keliner cut-off auf Null
indeces = np.arange(0, len(noise_aft_cut_aft), 1)
noise_aft_cut_aft[(indeces < cut_low)] = 0
noise_aft_cut_aft[(indeces < cut_high)] = 0
# hier normieren wir das alles richtig, forward ist das was wir brauchen,
# aber nicht alle np versionen haben norm als argugument, wenn nicht kommt der Teil nach
# except zum tragen
try:
noise_bef = np.real(np.fft.irfft(noise_bef_cut_off, norm='forward'))
noise_aft = np.real(np.fft.irfft(noise_aft_cut_aft, norm='forward'))
except:
# we take backward and transform it to forward
noise_bef = np.real(np.fft.irfft(noise_bef_cut_off)) / deltat
noise_aft = np.real(np.fft.irfft(noise_aft_cut_aft)) / deltat
# hier berechnen wir die Varianz des erzeugten time signals
var_bef = np.var(noise_bef)
var_aft = np.var(noise_aft)
return var_bef, var_aft, noise_aft, noise_bef, noise_bef_cut_off, noise_aft_cut_aft
def calc_isi(spikes_mt, eod_fr):
return np.diff((np.array(spikes_mt)) / (1 / eod_fr)) # (frs[s], cvs[s])
def tau_changes(alter_taus, dend_tau, dent_tau_change, mem_tau):
if alter_taus == 'dent':
dend_tau = dend_tau * dent_tau_change
elif alter_taus == 'mem':
mem_tau = mem_tau * dent_tau_change
return mem_tau, dend_tau
def simulate(cell, v_offset, stimulus, emb=False, cut=True, EODf='eod_fr', deltat=0.00005, v_zero=0.0, a_zero=2.0,
threshold=1.0, v_base=0.0, delta_a=0.08, tau_a=0.1, mem_tau=0.015, noise_strength=0.05, input_scaling=60.0,
dend_tau=0.001, ref_period=0.001, adaptation_variant='adaptoffset_bisecting', adaptation_yes_j=0,
adaptation_yes_e=0, adaptation_yes_t=0, adaptation_upper_tol=1.005, adaptation_lower_tol=0.995,
power_variant='sinz', power_alpha=0, power_nr=1, waveform_damping='', waveform_damping_baseline='',
waveform_mimick='no', reshuffle='reshuffled', reshuffle_stimulus_length_orig=100, reshuffle_start_vals=[],
reshuffle_transient=100, tau_change_choice='', tau_change_val=1, noisesplit_c_noise=1,
noisesplit_noise_added='', noisesplit_var_type='', noisesplit_cut_off='', noisesplit_c_sig=0.9,
sigmoidal_mult=0, sigmoidal_plus=0, sigmoidal_slope=0, sigmoidal_add=0, sigmoidal_sigmoidal_val=(7, 1),
LIF_ref_type='', LIF_adapt_type='', LIF_dendrid='', LIF_exponential='', LIF_exponential_tau=0.001,
LIF_expontential__v=1): # v_offset=-10.0,
# embed()#'adaptoffset_bisecting
""" Simulate a P-unit.
Returns
-------
spike_times: 1-D array
Simulated spike times in seconds.
:param EODf:
:param cell:
:param cell:
"""
stimulus = stimulus * 1
# stimulus = stimulus.copy()
# change taus eg. reduce them to see the influence of a certain part of the equation
# embed()
mem_tau, dend_tau = tau_changes(tau_change_choice, dend_tau, tau_change_val, mem_tau)
# I reduce noise_final for the adaptation, that it is equal always!!
# if 'stableall' in adapt_offset:
# noise_final = np.zeros(len(stimulus))
# noise_adapt = np.zeros(len(stimulus))
# elif 'stable' in adapt_offset:
# noise_adapt = np.zeros(len(stimulus))
# noise_final = np.random.randn(len(stimulus))
# noise_final *= noise_strength / np.sqrt(deltat)
# else:
# embed()
d = noise_strength # (noise_strength ** 2) / 2
if len(stimulus) > 1 / deltat:
noise_length = len(stimulus)
else:
noise_length = int(1 / deltat)
# embed()
noise_strength_new = np.sqrt(noise_strength * 2)
noise_final = np.random.randn(noise_length) # len(stimulus)
noise_orig = noise_final * noise_strength_new / np.sqrt(deltat) # 0.05370289258320868 0.0015532069917408744
#############################################
# das war das ursprüngliche
# also beim alex war noise_strength = np.sqrt(2*D)
# und in der D right Tabelle habe ich dann direkt D da drin
# noise = np.random.randn(len(stimulus))
# noise *= noise_strength / np.sqrt(deltat)
if 'additiv' in noisesplit_var_type:
variance = (d * noisesplit_c_noise) * 2 / deltat
noise_strength_new = np.sqrt(variance)
noise_final *= noise_strength_new # 0.0015532069917408744
noise_sole = noise_final * 1 # 0.005364195092629859
# embed()
if noisesplit_noise_added == '_noiseadded_': # _noiseadded_
cut_high = len(stimulus) * len(stimulus) * deltat * 2
f = np.fft.fftfreq(len(noise_final), deltat)
try:
white_noise, freq_wn = whitenoise(noisesplit_cut_off, np.max(f), deltat, len(stimulus) * deltat,
rng=np.random)
except:
print('noise problem')
embed()
f_max = np.max(f)
variance_sig = (d * noisesplit_c_sig) * 2 / deltat
noise_strength_sig = np.sqrt(variance_sig * (f_max - noisesplit_cut_off) / f_max) #
white_noise *= noise_strength_sig # 0.046775882352175384
noise_final = white_noise[0:-1] + noise_sole # 0.05209675634528021
else:
noise_strength_new = np.sqrt(noise_strength * 2)
noise_final *= noise_strength_new / np.sqrt(deltat) # 0.05370289258320868 0.0015532069917408744
#embed()
v_mem, adapt, v_dend = reshuffled_start_vals(reshuffle, a_zero, stimulus, v_zero, reshuffle_start_vals, cell,
reshuffle_stimulus_length_orig, reshuffle_transient)
#######################################################
# here we take the original baseline its always the same
# len(stimulus)
time_array = np.arange(0, 1, deltat)
# embed()
# das ist noch ohne die Nichtlinearität aber mit damping und welle!
# embed()
try:
time_fish_r = time_array * 2 * np.pi * EODf
except:
print('EODf thing')
embed()
baseline_whole = 1 * np.sin(time_fish_r)
if (waveform_mimick != 'no') or (waveform_damping == 'damping'):
# ja weil wenn der ganze Spaß ein damping oder eine Welle hat will man das auch hier drinnen haben
baseline_whole = waveform_damping_baseline[0:len(baseline_whole)]
# also das muss das mit der einfachen welle sein NUR rectified
baseline_whole_rec = 1 * np.sin(time_fish_r)
# baseline_for_after = baseline_whole*1
baseline_rectified = baseline_whole_rec * 1
baseline_rectified[baseline_rectified < 0.0] = 0.0
#####################
# Here Basleine and Stimulus after Nonlinearity
baseline_after_nonlin, stimulus_after_nonlin = nonlinearity(power_nr, power_alpha, power_variant, stimulus, baseline_whole,
sig_val=sigmoidal_sigmoidal_val, mult=sigmoidal_mult,
plus=sigmoidal_plus, slope=sigmoidal_slope,
add=sigmoidal_add)
# embed()
#####################
# adaptation part
sp_bef, sp_aft, rate_bef, rate_aft, mean_adapt, adapt, offsets, rates, factors, adapt_output, factor_adapted, factor, offset, rate_adapted, spike_adapted, v_dent_adapted, reduce, step = adapt_process(
adaptation_yes_j, adaptation_yes_t, adaptation_variant, adaptation_yes_e, adaptation_upper_tol, adaptation_lower_tol, LIF_exponential, baseline_after_nonlin,
LIF_adapt_type, baseline_rectified, v_dend, dend_tau, deltat, v_base, v_mem, v_offset, input_scaling,
noise_final, mem_tau, tau_a, ref_period, LIF_expontential__v, LIF_exponential_tau, threshold, delta_a, cell,
adapt, stimulus_after_nonlin, dendrid=LIF_dendrid, ref_type=LIF_ref_type)
# print('aft adapt')
############################
# the real model part
v_dent_output = np.zeros(len(stimulus_after_nonlin))
v_dent_output[0] = stimulus_after_nonlin[0]
adapt_output, spike_times, v_dent_output, v_mem_output = stimulate_body(v_dent_output, stimulus_after_nonlin,
v_dend, dend_tau, deltat, v_base,
v_mem,
offset, input_scaling, adapt, noise_final,
mem_tau,
tau_a, ref_period, threshold, delta_a,
exponential=LIF_exponential, ref_type=LIF_ref_type,
dendrid=LIF_dendrid, adapt_type=LIF_adapt_type,
mean_adapt=mean_adapt, v_exp=LIF_expontential__v,
exp_tau=LIF_exponential_tau)
test = False
if test:
test_stims()
test_fft2()
if cut:
v_dent = v_dent_output[int(0.05 / deltat):-1]
else:
v_dent = v_dent_output
cvs = calc_cvs(spike_adapted, sp_bef, sp_aft, adaptation_upper_tol, adaptation_lower_tol, factor, offsets,
rates)
if emb:
embed()
cvs['noise'] = noise_orig
return cvs, adapt_output, baseline_after_nonlin, spike_adapted, rate_adapted, rate_bef, rate_aft, np.array(
spike_times), stimulus_after_nonlin, v_dent, offset, v_mem_output, noise_final
def threshold_power(stimulus, nr, threshold=0.0):
# embed()
stimulus[stimulus < threshold] = 0.0
if (nr < 1) and (nr > 0):
stimulus = threshold_power_square(stimulus, nr)
else:
stimulus = (stimulus) ** nr
return stimulus
@jit(nopython=True)
def stimulate_body(v_dent_output, stimulus, v_dend, dend_tau, deltat, v_base, v_mem, v_offset, input_scaling, adapt,
noise, mem_tau,
tau_a, ref_period, threshold, delta_a, exponential='', ref_type='', adapt_type='',
mean_adapt=0, dendrid='', v_exp=0, exp_tau=0.00014):
spike_times = []
v_mem_output = np.zeros(len(stimulus))
adapt_output = np.zeros(len(stimulus))
for i in range(len(stimulus)):
####################################################
# dendridic part
# standard part
if dendrid == '': # original part
v_dend += (-v_dend + stimulus[i]) / dend_tau * deltat # *0.1
v_dent_scaled = (v_dend * input_scaling)
elif dendrid == 'Stimulus_wo_dendrid': # instead of dendride pure stimulus
# v_dend += stimulus[i]
v_dent_scaled = (stimulus[i] * input_scaling)
else:
v_dend = 0
v_dent_scaled = (v_dend * input_scaling)
v_dent_output[i] = v_dend
####################################################
# exponential part
if exponential == 'EIF':
exponential_part = exp_tau * np.exp((v_mem - v_exp) / exp_tau)
threshold = 1000
# embed()
elif exponential == 'QIF':
I = 0
exponential_part = ((v_mem - v_exp) ** 2) / exp_tau + (v_mem - v_base) + I
# threshold = 1000
elif exponential == 'CIF':
I = 0
if v_mem > v_base:
exponential_part = (v_mem - v_exp) ** 3
else:
exponential_part = 0
# embed()
threshold = 1000
else:
# standard part
exponential_part = 0
####################################################
# v -mem part
v_mem_output[i] = v_mem
v_mem += (- v_mem + v_base + exponential_part + v_offset + v_dent_scaled - adapt + noise[i]) / mem_tau * deltat
####################################################
# adapt part
adapt_output[i] = adapt
# embed()
if adapt_type == '_adaptMean_':
adapt = mean_adapt
elif adapt_type == '_noAdapt_':
adapt = 0
else:
# standard part
adapt += -adapt / tau_a * deltat
####################################################################################
# refractory period:
# standard part
if ref_type == '':
if len(spike_times) > 0 and (deltat * i) - spike_times[-1] < ref_period + deltat / 2:
v_mem = v_base
# else:
# v_mem = v_base
###########################################################
# threshold crossing:
if v_mem > threshold:
# embed()
v_mem = v_base
spike_times.append(i * deltat)
#########################################################
# adapt part
if adapt_type == '_adaptMean_':
adapt = mean_adapt
elif adapt_type == '_noAdapt_':
adapt = 0
else:
adapt += delta_a / tau_a
# embed()
# embed()
test = False
if test:
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True)
ax[0].plot(stimulus * input_scaling)
ax[1].plot(noise)
plt.show()
return adapt_output, spike_times, v_dent_output, v_mem_output
def adapt_type_version(cell, adapt, adapt_type):
if 'Mean' in adapt_type:
start_vals = pd.read_csv(load_folder_name('calc_model_core') + '/models_big_fit_starting_vals.csv')
# start_vals = pd.read_pickle(load_folder_name('calc_model_core')+'/models_big_fit_starting_vals.pkl')
star_vals_here = start_vals[start_vals['cell'] == cell]
df_shuffled = star_vals_here.sample(frac=1).reset_index(drop=True)
adapt = df_shuffled.iloc[0]['mean_adapt']
mean_adapt = df_shuffled.iloc[0]['mean_adapt']
elif 'no' in adapt_type:
# start_vals = pd.read_pickle('calc_model_core/models_big_fit_starting_vals.pkl')
# df_shuffled = star_vals_here.sample(frac=1).reset_index(drop=True)
# star_vals_here = start_vals[start_vals['cell'] == cell]
adapt = 0
mean_adapt = 0
else:
mean_adapt = 0
return mean_adapt, adapt
def reshuffled_start_vals(reshuffle, a_zero, stimulus, v_zero, start_vals, cell, stimulus_length_orig, transient):
if 'reshuffled' in reshuffle:
shuffled_params_name = load_shuffled_params_name(stimulus_length_orig, transient)
if len(start_vals) < 1:
start_vals = resave_small_files(shuffled_params_name + 'small.csv')
star_vals_here = start_vals[start_vals['cell'] == cell]
df_shuffled = star_vals_here.sample(frac=1).reset_index(drop=True)
v_dend = df_shuffled.iloc[0]['v_dent']
v_mem = df_shuffled.iloc[0]['v_mem']
adapt = df_shuffled.iloc[0]['adapt']
else:
v_mem = v_zero
adapt = a_zero
v_dend = stimulus[0]
return v_mem, adapt, v_dend
def nonlinearity(nr, alpha, power_variant, stimulus, baseline_before_power, sig_val=(1, 2), mult=1, plus=1, slope=10,
add=0):
stimulus = stimulus * 1
baseline_before_power = baseline_before_power * 1
if 'softplus' in power_variant:
stimulus = np.log(1 + np.exp(stimulus))
b_after_power = np.log(1 + np.exp(baseline_before_power))
elif 'GELU' in power_variant:
stimulus = 0.5 * stimulus * (1 + np.tanh(np.sqrt(2 / np.pi) * (stimulus + 0.044715 * stimulus ** 3)))
b_after_power = 0.5 * baseline_before_power * (
1 + np.tanh(np.sqrt(2 / np.pi) * (baseline_before_power + 0.044715 * baseline_before_power ** 3)))
elif 'powerend' in power_variant:
stimulus = powerend(stimulus, nr)
b_after_power = powerend(baseline_before_power, nr)
elif 'powerstart' in power_variant:
stimulus = powerstart(stimulus, nr)
b_after_power = powerstart(baseline_before_power, nr)
elif 'power' in power_variant:
stimulus = power_sole(stimulus, nr)
b_after_power = power_sole(baseline_before_power, nr)
elif 'SigmoidSmoothed' in power_variant:
stimulus = sigmoid_smoothout(stimulus, plus=sig_val[0], multiple=sig_val[1])
b_after_power = sigmoid_smoothout(baseline_before_power, plus=sig_val[0], multiple=sig_val[1])
elif 'SigmoidAdjusted' in power_variant:
stimulus = sigmoid_adjusted(stimulus, plus=plus, multiple=mult, add=add, slope=slope)
b_after_power = sigmoid_adjusted(baseline_before_power, plus=plus, multiple=mult, add=add, slope=slope)
elif 'LeakyReLU' in power_variant:
# change of stimulus
stimulus = stimulus * 1
stimulus[stimulus < 0.0] = stimulus[stimulus < 0.0] * alpha
b_after_power = baseline_before_power * 1
b_after_power[baseline_before_power < 0.0] = baseline_before_power[baseline_before_power < 0.0] * alpha
elif 'ELU' in power_variant:
stimulus = stimulus * 1
stimulus[stimulus < 0.0] = (np.exp(stimulus[stimulus < 0.0]) - 1) * alpha
b_after_power = baseline_before_power * 1
b_after_power[baseline_before_power < 0.0] = (np.exp(
baseline_before_power[baseline_before_power < 0.0]) - 1) * alpha
elif 'Tanh' in power_variant:
stimulus = np.tanh(stimulus)
b_after_power = np.tanh(baseline_before_power)
elif 'Sigmoid' in power_variant:
stimulus = 1 / (1 + np.exp(-stimulus))
b_after_power = 1 / (1 + np.exp(-baseline_before_power))
elif 'SiLU' in power_variant:
stimulus = stimulus / (1 + np.exp(-stimulus))
b_after_power = baseline_before_power / (1 + np.exp(-baseline_before_power))
elif 'square' in power_variant:
stimulus = stimulus ** nr
b_after_power = baseline_before_power ** nr
# embed()
elif 'RecCore' in power_variant:
stimulus = RecCore_power(stimulus, nr)
b_after_power = RecCore_power(baseline_before_power, nr)
elif 'sinz_plus' in power_variant:
threshold = float(power_variant.replace('sinz_plus', ''))
stimulus = threshold_power(stimulus, nr, threshold=threshold)
b_after_power = threshold_power(baseline_before_power, nr, threshold=threshold)
elif 'sinz' in power_variant:
stimulus = threshold_power(stimulus, nr)
b_after_power = threshold_power(baseline_before_power, nr)
elif power_variant == '':
stimulus = (stimulus) ** nr
b_after_power = (baseline_before_power) ** nr
if 'zeros' in power_variant:
# todo: this are not adapted for any power 3 etc, only non wave for eodf
# stimulus = stimulus#np.zeros(len(stimulus))
b_after_power = np.zeros(len(baseline_before_power))
elif 'ones' in power_variant:
# stimulus = stimulus#np.ones(len(stimulus))
b_after_power = np.ones(len(baseline_before_power))
# todo: evnetuell das mit dem zeros am ende dazu! und nur b_after_power ändern!
return b_after_power, stimulus
def load_shuffled_params_name(stimulus_length_orig, transient):
shuffled_params_name = 'models_big_fit_starting_vals_transient_' + str(transient) + '_len_' + str(
stimulus_length_orig)
return shuffled_params_name
def define_tol(rate_baseline_after, rate_baseline_before, lower_tol=0.95, upper_tol=1.05):
# a negative factor will indicates a increased firing rate and will lead to a decrease of the offset
# a positive factor will indicate a decrease of the firing rate and lead to a increase of the offset
# if the initial spikes were zero and the spikes ar induced we here want the firing rate to drop,
# therefore here the tolerace is around zero the desired firing rate
if rate_baseline_before == 0:
factor = -rate_baseline_after
upper_tol = 1
lower_tol = -1
# if rate_baseline_before is not equal zero then we want to have a similiar ratio of one when
# deviding both, therefore the tolerace here is some percentage above and beyond 1.
# that rate_baseline_after is 0 is not a problem but we want it to rize here so we have a positive factor
elif rate_baseline_after == 0:
factor = rate_baseline_before / 1
lower_tol = lower_tol
upper_tol = upper_tol
# this is the most often happening case, when the rates diverge, we calculate a foctor and aim it to be
# 1 with a small percentual tolerance
else:
lower_tol = lower_tol
upper_tol = upper_tol
factor = rate_baseline_before / rate_baseline_after
return factor, lower_tol, upper_tol
def adapt_process(j, t, adapt_offset, e, upper_tol, lower_tol, exponential, baseline_after, adapt_type, b_threshold,
v_dend, dend_tau, deltat, v_base, v_mem, v_offset, input_scaling, noise, mem_tau, tau_a, ref_period,
v_exp, exp_tau, threshold, delta_a, cell, adapt, stimulus_power, dendrid='', ref_type=''):
# embed()
# now we can see what the spikes were to the initial baseline
v_dent_output = np.zeros(len(b_threshold))
v_dent_output[0] = b_threshold[0]
adapt_type_bef = ''
mean_adapt_bef, adapt_bef = adapt_type_version(cell, adapt, adapt_type_bef)
# here the baseline is choosen as the rectified stimulus since this was utilized in the Ott (2020) paper
# print('bef adapt')
try:
# embed()
adapt_output, spikes_baseline_before, v_dent_output1, v_mem_output1 = stimulate_body(v_dent_output, b_threshold,
v_dend, dend_tau,
deltat, v_base, v_mem,
v_offset,
input_scaling, adapt_bef,
noise,
mem_tau,
tau_a, ref_period,
threshold, delta_a,
adapt_type=adapt_type_bef,
v_exp=v_exp,
exp_tau=exp_tau, )
except:
print('stimulate_body')
embed()
# print('aft adapt')
v_dent_output = np.zeros(len(baseline_after))
v_dent_output[0] = stimulus_power[0]
mean_adapt_aft, adapt_aft = adapt_type_version(cell, adapt, adapt_type)
# embed()
try:
adapt_output, spike_baseline_after, v_dent_output2, v_mem_output2 = stimulate_body(v_dent_output,
baseline_after, v_dend,
dend_tau,
deltat, v_base, v_mem,
v_offset, input_scaling,
adapt_aft, noise,
mem_tau,
tau_a, ref_period, threshold,
delta_a,
exponential=exponential,
v_exp=v_exp,
exp_tau=exp_tau,
ref_type=ref_type,
dendrid=dendrid,
adapt_type=adapt_type,
mean_adapt=mean_adapt_aft, ) #
except:
print('stimulate_body2')
embed()
# embed()
# embed()
# calculate the firing rates before and after the changes and their ratio (factor)
rate_baseline_before = rate_calculation(spikes_baseline_before, b_threshold, deltat)
rate_baseline_after = rate_calculation(spike_baseline_after, baseline_after, deltat)
# define the tolerace of the adaptation
factor, lower_tol, upper_tol = define_tol(rate_baseline_after, rate_baseline_before, lower_tol=lower_tol,
upper_tol=upper_tol)
# embed()
# adapt offset in case of baseline change due to modifications to pure sinus wave
# embed()
# todo: hier adapt noch machen
mean_adapt, adapt = adapt_type_version(cell, adapt, adapt_type)
# embed()
offsets, rates, factors, adapt_output, factor_adapted, factor, offset, rate_adapted, spike_adapted, v_dent_adapted, reduce, step = do_the_adaptation(
lower_tol, upper_tol, v_dent_output, baseline_after, v_dend, dend_tau, deltat, v_base, v_mem,
input_scaling, adapt, noise, mem_tau, tau_a, ref_period, threshold, delta_a,
rate_baseline_before, adapt_offset, e,
factor, v_offset, exponential=exponential, ref_type=ref_type, dendrid=dendrid,
offsets=[v_offset], rates=[rate_baseline_after], factors=[factor], printing=False, cell=cell,
adapt_type=adapt_type, mean_adapt=mean_adapt, j=j, t=t, v_exp=v_exp, exp_tau=exp_tau)
# embed()
return spikes_baseline_before, spike_baseline_after, rate_baseline_before, rate_baseline_after, mean_adapt, adapt, offsets, rates, factors, adapt_output, factor_adapted, factor, offset, rate_adapted, spike_adapted, v_dent_adapted, reduce, step
def do_the_adaptation(lower_tol, upper_tol, v_dent_output, baseline_after, v_dend, dend_tau,
deltat, v_base, v_mem, input_scaling, adapt,
noise, mem_tau, tau_a, ref_period, threshold, delta_a, rate_baseline_before,
adapt_offset, e, factor, v_offset, rate_adapted='no', spike_adapted='no', v_dent_adapted='no',
reduce='no', dendrid='', ref_type='', offsets=[], rates=[], offset_diffs=[],
factors=[], printing=False, cell='', adapt_type='', mean_adapt=0, step='no', t=0, exponential='',
v_exp=0, exp_tau=0.014, j=0):
# input
# factor - rate_baseline_before / rate_baseline_after
# this is improtat that e and j are zero, we want only once an adaptation!
# embed()
if ('adaptoffset' in adapt_offset) and (e == 0) and (j == 0) and (t == 0):
reduce = 0
step = 0
if (factor > upper_tol) or (factor < lower_tol):
# embed()
# print(factor)
if 'adaptoffset_bisecting' in adapt_offset:
# if 'adaptoffset_bisecting' in adapt_offset:
if (factor > upper_tol):
offsets_bisect = np.arange(v_offset, v_offset + 150, 1)
elif (factor < lower_tol):
offsets_bisect = np.arange(v_offset, v_offset - 150, -1)
for o, offset in enumerate(offsets_bisect):
adapt_output, spike_adapted, v_dent_adapted, v_mem_output = stimulate_body(v_dent_output,
baseline_after,
v_dend,
dend_tau, deltat,
v_base, v_mem, offset,
input_scaling, adapt,
noise,
mem_tau,
tau_a, ref_period,
threshold, delta_a,
exponential=exponential,
mean_adapt=mean_adapt,
adapt_type=adapt_type,
v_exp=v_exp,
ref_type=ref_type,
dendrid=dendrid,
exp_tau=exp_tau)
rate_adapted = rate_calculation(spike_adapted, baseline_after, deltat)
if printing:
print(' rate adapted' + str(rate_adapted))
# embed()
# analog to the tolarance calculation function
if adapt_offset == 'adaptoffsetallall2':
if rate_baseline_before == 0:
factor_adapted = -rate_adapted
elif rate_adapted == 0:
factor_adapted = rate_adapted
else:
factor_adapted = rate_baseline_before / rate_adapted
else:
if rate_baseline_before == 0:
factor_adapted = -rate_adapted
elif rate_adapted == 0:
factor_adapted = 2
else:
factor_adapted = rate_baseline_before / rate_adapted
# embed()
# this is the recurrent loop
# embed()
offsets.append(offset)
rates.append(rate_adapted)
factors.append(factor_adapted)
offset_diffs.append(v_offset - offset)
# print('offset'+str(offset)+' factor '+str(factor)+' rate '+str(rate_adapted))
# embed()
if (factor_adapted < upper_tol) & (factor_adapted > lower_tol):
print('finished adaptation in function')
# embed()
return offsets, rates, factors, adapt_output, factor_adapted, factor, offset, rate_adapted, spike_adapted, v_dent_adapted, reduce, step
# find the moment we start bisecting
elif ((factor > upper_tol) & (factor_adapted < upper_tol)) | (
(factor < lower_tol) & (factor_adapted > lower_tol)):
step = np.abs(offsets_bisect[o] - offsets_bisect[o - 1]) / 2
# this is the current minus the step yields the new best guess
if ((factor > upper_tol) & (factor_adapted < upper_tol)):
offsets_bisect_new = offsets_bisect[o] - step
else:
offsets_bisect_new = offsets_bisect[o] + step
reduce_plus, reduce_minus, offsets, rates, factors, adapt_output, factor_adapted, rate_adapted, spike_adapted, \
v_dent_adapted, offset, reduce, step = recurrent_adapt_biscect(o, offsets, 0, step,
offsets_bisect_new,
reduce,
v_dent_output,
baseline_after,
v_dend,
dend_tau,
deltat,
v_base,
v_mem,
input_scaling,
adapt,
noise,
mem_tau,
tau_a,
ref_period,
threshold,
delta_a,
rate_baseline_before,
upper_tol=upper_tol,
cell=cell,
lower_tol=lower_tol,
dendrid=dendrid,
adapt_offset=adapt_offset,
offsets=offsets, rates=rates,
factors=factors,
v_exp=v_exp,
mean_adapt=mean_adapt,
adapt_type=adapt_type,
exp_tau=exp_tau,
ref_type=ref_type,
printing=printing)
print('finished adaptetation bisecting')
# embed()
return offsets, rates, factors, adapt_output, factor_adapted, factor, offset, rate_adapted, spike_adapted, v_dent_adapted, reduce, step
else:
reduce_plus, reduce_minus, offsets, rates, factors, adapt_output, factor_adapted, rate_adapted, spike_adapted, \
v_dent_adapted, offset, reduce, step = recurrent_adapt(0, step,
v_offset,
factor,
reduce,
v_dent_output,
baseline_after,
v_dend,
dend_tau,
deltat,
v_base,
v_mem,
input_scaling,
adapt,
noise,
mem_tau,
tau_a,
ref_period,
threshold,
delta_a,
rate_baseline_before,
upper_tol=upper_tol, cell=cell,
lower_tol=lower_tol, adapt_offset=adapt_offset,
offsets=offsets, rates=rates, factors=factors,
v_exp=v_exp, mean_adapt=mean_adapt,
adapt_type=adapt_type,
exp_tau=exp_tau, printing=printing)
else:
# if factor in tolerance area then no changes in offset
offsets = []
rates = []
factors = []
adapt_output = float('nan')
factor_adapted = 'nochange'
offset = v_offset
elif (adapt_offset == 'adapt_baseline') and (e == 0) and (j == 0):
reduce = 0
step = 0
rate_adapted, spike_adapted, \
v_dent_adapted, offset, reduce, step = recurrent_adapt_baseline(step,
v_offset,
reduce,
v_dent_output,
baseline_after,
v_dend,
dend_tau,
deltat,
v_base,
v_mem,
input_scaling,
adapt,
noise,
mem_tau,
tau_a,
ref_period,
threshold,
delta_a,
exponential=exponential,
v_exp=v_exp,
exp_tau=exp_tau)
else:
factor_adapted = 1
factor = 1
offset = v_offset
adapt_output = float('nan')
return offsets, rates, factors, adapt_output, factor_adapted, factor, offset, rate_adapted, spike_adapted, v_dent_adapted, reduce, step
def calc_cvs(spike_adapted, spikes_baseline_before, spike_baseline_after, upper_tol, lower_tol, factor, offsets, rates):
test = False
if test:
plt_adaptation_convergence_rate(upper_tol, lower_tol, offsets, rates)
# offsets, rates, factors,
if len(spikes_baseline_before) > 0:
cv_before = np.diff(spikes_baseline_before)
cv_before = np.std(cv_before) / np.mean(cv_before)
else:
cv_before = 0
if len(spike_baseline_after) > 0:
cv_after = np.diff(spike_baseline_after)
cv_after = np.std(cv_after) / np.mean(cv_after)
else:
cv_after = 0
if (len(spike_adapted) > 0) & (spike_adapted != 'no'):
# try:
cv_adapted = np.diff(spike_adapted)
# except:
# embed()
cv_adapted = np.std(cv_adapted) / np.mean(cv_adapted)
else:
cv_adapted = 0
cvs = {'cv_before': cv_before,
'cv_after': cv_after,
'cv_adapted': cv_adapted}
return cvs
def power_sole(stimulus, nr):
stimulus[stimulus < 0.0] = 0
power_stimulus = stimulus ** nr
return power_stimulus
def sigmoid_adjusted(time_array, multiple=1, plus=1, slope=10, add=0):
sigmoid_activation = 1 / (1 + np.exp((- time_array + plus) * slope)) * multiple + add
return sigmoid_activation
def sigmoid_smoothout(time_array, plus=9, multiple=1.25):
sigmoid_activation2 = sigmoid_adjusted(time_array, plus=plus, multiple=multiple)
stimulus = time_array * 1
stimulus[stimulus < 0.0] = 0
rectified = stimulus
# embed()
length = np.where((sigmoid_activation2 > rectified) & (time_array > 1.08))[0]
if len(length) > 0:
pos = time_array > time_array[length[0]]
sigmoid_activation2[pos] = rectified[pos]
return sigmoid_activation2
def powerstart(stimulus, nr):
# stimulus = np.arange(-10, 10, 0.1)
stimulus[stimulus < 0.0] = 0
# power_stimulus = stimulus ** nr
# power_stimulus2 = power_stimulus * 1
if (nr < 1) and (nr > 0):
power_stimulus2 = threshold_power_square(stimulus, nr)
else:
power_stimulus2 = (stimulus) ** nr
power_stimulus2[np.where(power_stimulus2 > stimulus)] = stimulus[np.where(power_stimulus2 > stimulus)]
# plt.plot(stimulus,stimulus)
# plt.plot(stimulus,power_stimulus2)
# plt.show()
# embed()
stimulus = power_stimulus2 * 1
return stimulus
def powerend(stimulus, nr):
stimulus[stimulus < 0.0] = 0
if (nr < 1) and (nr > 0):
power_stimulus = threshold_power_square(stimulus, nr)
else:
power_stimulus = (stimulus) ** nr
power_stimulus3 = power_stimulus * 1
power_stimulus3[np.where(power_stimulus3 < stimulus)] = stimulus[np.where(power_stimulus3 < stimulus)]
return power_stimulus3
def recurrent_adapt_baseline(step, v_offset, reduce, v_dent_output, stimulus_baseline_modified, v_dend, dend_tau,
deltat,
v_base, v_mem, input_scaling, adapt, noise, mem_tau,
tau_a, ref_period, threshold, delta_a, exponential='', v_exp=0, exp_tau=0.014):
step += 1
reduce_minus = 1
offset = v_offset + reduce_minus
adapt_output, spike_adapted, v_dent_adapted, v_mem_output = stimulate_body(v_dent_output,
stimulus_baseline_modified, v_dend,
dend_tau, deltat,
v_base, v_mem, offset, input_scaling,
adapt, noise,
mem_tau,
tau_a, ref_period, threshold, delta_a,
exponential=exponential, v_exp=v_exp,
exp_tau=exp_tau)
rate_adapted = len(spike_adapted) / (len(stimulus_baseline_modified) * deltat)
if (rate_adapted < 100) and (step < 500):
rate_adapted, spike_adapted, v_dent_adapted, offset, reduce, step = recurrent_adapt_baseline(step, offset,
reduce,
v_dent_output,
stimulus_baseline_modified,
v_dend, dend_tau,
deltat,
v_base, v_mem,
input_scaling,
adapt,
noise, mem_tau,
tau_a, ref_period,
threshold, delta_a,
v_exp=v_exp,
exp_tau=exp_tau)
return rate_adapted, spike_adapted, v_dent_adapted, offset, reduce, step
def recurrent_adapt_biscect(o, offsets_bisect, offset_diff, step, v_offset, reduce, v_dent_output,
stimulus_baseline_modified, v_dend,
dend_tau, deltat,
v_base, v_mem, input_scaling, adapt, noise, mem_tau,
tau_a, ref_period, threshold, delta_a, rate_baseline_initial, upper_tol=1.1, lower_tol=0.9,
reduce_plus=0.25, dendrid='', adapt_offset='', ref_type='', cell='', offsets=[], rates=[],
factors=[], offset_diffs=[0], reduce_minus=-0.24, mean_adapt=0,
adapt_type='', max_iter=400, exponential='', v_exp=0, exp_tau=0.014, printing=False):
# input
# factor - rate_baseline_before / rate_baseline_after
# step += 1
# if the factor (rate_baseline_before / rate_baseline_after) is high this means that the modification reduces the firing
# rate and this can be compensated by a increased offset
# this was succesfull for all models with the adaptation, withouth the adaptation maybe we need higher tolerance
adapt_output, spike_adapted, v_dent_adapted, v_mem_output = stimulate_body(v_dent_output,
stimulus_baseline_modified, v_dend,
dend_tau, deltat,
v_base, v_mem, v_offset, input_scaling,
adapt, noise,
mem_tau,
tau_a, ref_period, threshold, delta_a,
exponential=exponential,
ref_type=ref_type, dendrid=dendrid,
mean_adapt=mean_adapt,
adapt_type=adapt_type, v_exp=v_exp,
exp_tau=exp_tau)
rate_adapted = rate_calculation(spike_adapted, stimulus_baseline_modified, deltat)
if printing:
print(' rate adapted' + str(rate_adapted))
# embed()
# analog to the tolarance calculation function
if adapt_offset == 'adaptoffsetallall2':
if rate_baseline_initial == 0:
factor_adapted = -rate_adapted
elif rate_adapted == 0:
factor_adapted = rate_adapted
else:
factor_adapted = rate_baseline_initial / rate_adapted
else:
if rate_baseline_initial == 0:
factor_adapted = -rate_adapted
elif rate_adapted == 0:
factor_adapted = 2
else:
factor_adapted = rate_baseline_initial / rate_adapted
# embed()
# this is the recurrent loop
# embed()
offsets.append(v_offset)
rates.append(rate_adapted)
factors.append(factor_adapted)
offset_diffs.append(offset_diff)
print('offset' + str(v_offset) + ' factor ' + str(factor_adapted) + ' rate ' + str(rate_adapted))
if (step < max_iter): # and (np.sign(factor_adapted) == np.sign(factor_adapted_orig))
# print(factor_adapted)
if (factor_adapted > upper_tol) or (factor_adapted < lower_tol):
# step = np.abs(offsets_bisect[o] - offsets_bisect[o - 1]) / 2
# this is the current minus the step yields the new best guess
# a_fe_new =
if (factor_adapted > upper_tol): # & (factors[-2]< lower_tol):
# the new step can be half of the code_old one
step = step / 2
offset = v_offset + step
# elif (factor_adapted > upper_tol):
# offset = v_offset - step
elif (factor_adapted < lower_tol): # & (factors[-2]> upper_tol):
step = step / 2
offset = v_offset - step
# elif (factor_adapted < lower_tol):
# offset = v_offset - step
# embed()
reduce_plus, reduce_minus, offsets, rates, factors, adapt_output, factor_adapted, rate_adapted, \
spike_adapted, v_dent_adapted, \
offset, reduce, step = recurrent_adapt_biscect(o, offsets_bisect, offset_diff, step,
offset,
reduce,
v_dent_output,
stimulus_baseline_modified,
v_dend,
dend_tau,
deltat,
v_base,
v_mem,
input_scaling,
adapt,
noise,
mem_tau,
tau_a,
ref_period,
threshold,
delta_a,
rate_baseline_initial,
upper_tol=upper_tol, reduce_plus=reduce_plus,
reduce_minus=reduce_minus,
lower_tol=lower_tol, cell=cell,
exponential=exponential, mean_adapt=mean_adapt,
v_exp=v_exp, adapt_offset=adapt_offset,
adapt_type=adapt_type,
exp_tau=exp_tau, ref_type=ref_type, dendrid=dendrid,
offsets=offsets, rates=rates, offset_diffs=offset_diffs,
factors=factors, printing=printing)
else:
offset = v_offset
else:
print('max iter problem')
embed()
if (factor_adapted > 1.05) or (factor_adapted < 0.95):
print('max iter problem')
embed()
fig, ax = plt.subplots(4, 1, sharex=True, constrained_layout=True)
ax[0].plot(offsets)
ax[0].scatter(np.arange(0, len(rates), 1), offsets)
ax[0].set_title('Offset')
ax[1].plot(rates)
ax[1].scatter(np.arange(0, len(rates), 1), rates)
ax[1].axhline(rate_baseline_initial)
# ax[1].axhline(rate_baseline_before, color = 'pink')
# ax[1].axhline(rate_baseline_after, color = 'purple')
ax[1].set_title('Rate')
ax[2].plot(factors)
ax[2].scatter(np.arange(0, len(rates), 1), factors)
ax[2].axhline(1, color='black')
ax[2].axhline(upper_tol, color='grey')
ax[2].axhline(lower_tol, color='grey')
ax[2].set_title('Factor')
# ax[2].set_xlabel('Step')
ax[3].plot(offset_diffs)
ax[3].scatter(np.arange(0, len(rates), 1), offset_diffs)
# ax[2].axhline(1, color='black')
# ax[2].axhline(upper_tol, color='grey')
# ax[2].axhline(lower_tol, color='grey')
ax[3].set_title('offsets rate proportion')
ax[3].set_xlabel('Step')
plt.show()
# embed()
return reduce_plus, reduce_minus, offsets, rates, factors, adapt_output, factor_adapted, rate_adapted, spike_adapted, v_dent_adapted, offset, reduce, step
def recurrent_adapt(offset_diff, step, v_offset, factor_adapted_orig, reduce, v_dent_output, stimulus_baseline_modified,
v_dend,
dend_tau, deltat,
v_base, v_mem, input_scaling, adapt, noise, mem_tau,
tau_a, ref_period, threshold, delta_a, rate_baseline_initial, upper_tol=1.1, lower_tol=0.9,
reduce_plus=0.25, adapt_offset='', cell='', offsets=[], rates=[], factors=[], offset_diffs=[0],
reduce_minus=-0.24, mean_adapt=0, adapt_type='', max_iter=400, exponential='', v_exp=0,
exp_tau=0.014, printing=False):
# input
# factor - rate_baseline_before / rate_baseline_after
step += 1
# if the factor (rate_baseline_before / rate_baseline_after) is high this means that the modification reduces the firing
# rate and this can be compensated by a increased offset
if (cell == '2013-01-08-aa-invivo-1') & (adapt_type != ''):
# reduce_minus = -0.25
# reduce_plus = 0.25
if factor_adapted_orig > 3:
multiply_val = 3
else:
multiply_val = factor_adapted_orig
else:
multiply_val = factor_adapted_orig
# this was succesfull for all models with the adaptation, withouth the adaptation maybe we need higher tolerance
if adapt_offset == 'adaptoffsetallall2':
if (factor_adapted_orig > upper_tol):
offset = v_offset + reduce_plus * factor_adapted_orig # * multiply_val# multiply_val
if printing:
print('increase' + ' bef offset ' + str(v_offset) + ' offset ' + str(offset))
# if the factor (rate_baseline_before / rate_baseline_after) is low this means that the modification increases the firing
# rate and this can be compensated by a decreased offset
elif (factor_adapted_orig < lower_tol):
offset = v_offset + reduce_minus # *(1/factor_adapted_orig)
if printing:
print('reduce' + ' bef offset ' + str(v_offset) + ' offset ' + str(offset))
elif adapt_offset == 'adaptoffsetnew':
if (factor_adapted_orig > upper_tol):
offset = v_offset + reduce_plus # * multiply_val# multiply_val
if np.abs(reduce_plus) > 0.001:
reduce_plus = reduce_plus * 0.8
if printing:
print('increase' + ' bef offset ' + str(v_offset) + ' offset ' + str(offset))
# if the factor (rate_baseline_before / rate_baseline_after) is low this means that the modification increases the firing
# rate and this can be compensated by a decreased offset
elif (factor_adapted_orig < lower_tol):
offset = v_offset + reduce_minus # *(1/factor_adapted_orig)
if np.abs(reduce_minus) > 0.001:
reduce_minus = reduce_minus * 0.8
if printing:
print('reduce' + ' bef offset ' + str(v_offset) + ' offset ' + str(offset))
else:
upper_tol_r = 1
lower_tol_r = 1
# embed()
# if (rates[-1] < rate_baseline_initial - lower_tol_r):
if len(offsets) == 1:
diff = (rate_baseline_initial - rates[-1]) * 0.02
elif len(offsets) == 2:
offset_diff = np.diff(offsets)[0] / np.diff(rates)[0]
diff = (rate_baseline_initial - rates[-1]) * offset_diff * 0.8
else:
offset_diff = (offsets[-1] - offsets[0]) / (rates[-1] - rates[0])
diff = (rate_baseline_initial - rates[-1]) * offset_diff * 0.8
# offset_diff = np.diff(offsets) / np.diff(rates)
# embed()
# embed()
offset = v_offset + diff # * multiply_val# multiply_val
# if np.abs(reduce_plus) > 0.001:
# reduce_plus = reduce_plus*0.8
# if printing:
# print('increase'+' bef offset '+str(v_offset)+' offset '+str(offset))
adapt_output, spike_adapted, v_dent_adapted, v_mem_output = stimulate_body(v_dent_output,
stimulus_baseline_modified, v_dend,
dend_tau, deltat,
v_base, v_mem, offset, input_scaling,
adapt, noise,
mem_tau,
tau_a, ref_period, threshold, delta_a,
exponential=exponential,
mean_adapt=mean_adapt,
adapt_type=adapt_type, v_exp=v_exp,
exp_tau=exp_tau)
rate_adapted = rate_calculation(spike_adapted, stimulus_baseline_modified, deltat)
if printing:
print(' rate adapted' + str(rate_adapted))
# embed()
# analog to the tolarance calculation function
if adapt_offset == 'adaptoffsetallall2':
if rate_baseline_initial == 0:
factor_adapted = -rate_adapted
elif rate_adapted == 0:
factor_adapted = rate_adapted
else:
factor_adapted = rate_baseline_initial / rate_adapted
else:
if rate_baseline_initial == 0:
factor_adapted = -rate_adapted
elif rate_adapted == 0:
factor_adapted = 2
else:
factor_adapted = rate_baseline_initial / rate_adapted
# embed()
# this is the recurrent loop
# embed()
offsets.append(offset)
rates.append(rate_adapted)
factors.append(factor_adapted)
offset_diffs.append(offset_diff)
print('step ' + str(step) + ' factor ' + str(factor_adapted))
if (step < max_iter): # and (np.sign(factor_adapted) == np.sign(factor_adapted_orig))
# print(factor_adapted)
if (factor_adapted > upper_tol) or (factor_adapted < lower_tol):
reduce_plus, reduce_minus, offsets, rates, factors, adapt_output, factor_adapted, rate_adapted, \
spike_adapted, v_dent_adapted, \
offset, reduce, step = recurrent_adapt(offset_diff, step,
offset,
factor_adapted,
reduce,
v_dent_output,
stimulus_baseline_modified,
v_dend,
dend_tau,
deltat,
v_base,
v_mem,
input_scaling,
adapt,
noise,
mem_tau,
tau_a,
ref_period,
threshold,
delta_a,
rate_baseline_initial,
upper_tol=upper_tol, reduce_plus=reduce_plus,
reduce_minus=reduce_minus,
lower_tol=lower_tol, cell=cell,
exponential=exponential, mean_adapt=mean_adapt,
v_exp=v_exp, adapt_offset=adapt_offset, adapt_type=adapt_type,
exp_tau=exp_tau, offsets=offsets, rates=rates,
offset_diffs=offset_diffs, factors=factors, printing=printing)
else:
print('max iter problem')
embed()
fig, ax = plt.subplots(4, 1, sharex=True, constrained_layout=True)
ax[0].plot(offsets)
ax[0].scatter(np.arange(0, len(rates), 1), offsets)
ax[0].set_title('Offset')
ax[1].plot(rates)
ax[1].scatter(np.arange(0, len(rates), 1), rates)
ax[1].axhline(rate_baseline_initial)
# ax[1].axhline(rate_baseline_before, color = 'pink')
# ax[1].axhline(rate_baseline_after, color = 'purple')
ax[1].set_title('Rate')
ax[2].plot(factors)
ax[2].scatter(np.arange(0, len(rates), 1), factors)
ax[2].axhline(1, color='black')
ax[2].axhline(upper_tol, color='grey')
ax[2].axhline(lower_tol, color='grey')
ax[2].set_title('Factor')
# ax[2].set_xlabel('Step')
ax[3].plot(offset_diffs)
ax[3].scatter(np.arange(0, len(rates), 1), offset_diffs)
# ax[2].axhline(1, color='black')
# ax[2].axhline(upper_tol, color='grey')
# ax[2].axhline(lower_tol, color='grey')
ax[3].set_title('offsets rate proportion')
ax[3].set_xlabel('Step')
plt.show()
# embed()
return reduce_plus, reduce_minus, offsets, rates, factors, adapt_output, factor_adapted, rate_adapted, spike_adapted, v_dent_adapted, offset, reduce, step
def RecCore_power(stimulus, nr, ):
if (nr < 1) and (nr > 0):
stimulus = threshold_power_square(stimulus, nr)
else:
stimulus = (stimulus) ** nr
stimulus[stimulus < 0.0] = 0.0
return stimulus
def threshold_power_square(stimulus, nr):
stimulus_mod = stimulus * 1
stimulus_mod[stimulus > 0] = (stimulus_mod[stimulus > 0]) ** nr
stimulus_mod[stimulus < 0] = -(-stimulus_mod[stimulus < 0]) ** nr
stimulus = stimulus_mod * 1
return stimulus
def whitenoise(cflow, cfup, dt, duration, rng=np.random):
"""Band-limited white noise.
Generates white noise with a flat power spectrum between `cflow` and
`cfup` Hertz, zero mean and unit standard deviation. Note, that in
particular for short segments of the generated noise the mean and
standard deviation of the returned noise can deviate from zero and
one.
Parameters
----------
cflow: float
Lower cutoff frequency in Hertz.
cfup: float
Upper cutoff frequency in Hertz.
dt: float
Time step of the resulting array in seconds.
duration: float
Total duration of the resulting array in seconds.
Returns
-------
noise: 1-D array
White noise.
"""
# number of elements needed for the noise stimulus:
n = int(np.ceil((duration + 0.5 * dt) / dt))
# next power of two:
nn = int(2 ** (np.ceil(np.log2(n))))
# indices of frequencies with `cflow` and `cfup`:
inx0 = int(np.round(dt * nn * cflow))
inx1 = int(np.round(dt * nn * cfup))
if inx0 < 0:
inx0 = 0
if inx1 >= nn / 2:
inx1 = nn / 2
# draw random numbers in Fourier domain:
whitef = np.zeros((nn // 2 + 1), dtype=complex)
if inx0 == 0:
whitef[0] = rng.randn()
inx0 = 1
if inx1 >= nn // 2:
whitef[nn // 2] = rng.randn()
inx1 = nn // 2 - 1
m = inx1 - inx0 + 1
whitef[inx0:inx1 + 1] = rng.randn(m) + 1j * rng.randn(m)
# scaling factor to ensure standard deviation of one:
sigma = 0.5 / np.sqrt(float(inx1 - inx0))
# inverse FFT:
noise = np.real(np.fft.irfft(whitef))[:n] * sigma * nn
# embed()
return noise, whitef
def resave_small_files(save_name, load_folder='calc_model_core', index_col=False, resave=None):
# Pay attention:
# The load folder function needs the string not the funciton modified version!
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
#embed()
# todo: dass muss man noch einbauen undzwar nich tnur subfolder
# sondern auch mit anderer PC
# Drei Varianten: Code folder, susept folder, susept folder wo anders!
# todo: hier müsste man das mit version comp machen
start_vals = []
# embed()
if version_comp != 'public': # 'code':#:version_comp == 'develop':
# if (('alexi' in os.getlogin()) | ('rudnaya' in os.getlogin())) & (os.path.exists(load_folder_name(load_folder))):
if version_comp == 'develop':
if 'csv' in save_name:
if save_name != 'models_big_fit_d_right.csv':
start_vals = pd.read_csv(load_folder_name(load_folder) + '/' + save_name, index_col=0)
else:
# try:
start_vals = pd.read_csv(load_folder_name(load_folder) + '/' + save_name)
# wenn es kein resave gibt müssen wir das auch nicht resaven!
##if not resave:
# try:
# start_vals.to_csv(save_name)
# except:
# try:
# start_vals.to_csv(save_name)
# except:
# print('resave thing')
# embed()
if resave:
if 'pkl' in resave:
start_vals.to_pickle(save_name.replace('.csv', '.pkl'))
elif 'csv' in resave:
start_vals.to_csv(save_name.replace('.pkl', '.csv'))
if resave == True: # statdessen saven wir das nur wenn das true ist
try:
start_vals.to_csv(save_name)
except:
try:
start_vals.to_csv(save_name)
except:
print('resave thing')
embed()
# embed()
elif '.dat' in save_name:
# open(load_folder_name(load_folder) + '/' + save_name)
shutil.copyfile(load_folder_name(load_folder) + '/' + save_name,
save_name)
elif '.npy' in save_name:
# open(load_folder_name(load_folder) + '/' + save_name)
start_vals = np.load(load_folder_name(load_folder) + '/' + save_name, start_vals)
if not resave:
np.save(save_name, start_vals)
elif 'pkl' in resave:
start_vals.to_pickle(save_name.replace('.npy', '.pkl'))
elif 'csv' in resave:
start_vals.to_csv(save_name.replace('.npy', '.csv'))
else:
# try:
start_vals = pd.read_pickle(load_folder_name(load_folder) + '/' + save_name)
# except:
# print('start vals thing')
if not resave:
start_vals.to_pickle(save_name)
elif 'pkl' in resave:
start_vals.to_pickle(save_name.replace('.csv', '.pkl'))
elif 'csv' in resave:
start_vals.to_csv(save_name.replace('.pkl', '.csv'))
# embed()
else:
try:
if not resave:
start_vals = load_resaved_code(index_col, load_folder, save_name, start_vals)
elif 'pkl' in resave:
start_vals = load_resaved_code(index_col, load_folder, save_name.replace('csv', 'pkl'),
start_vals)
# start_vals = load_resaved(index_col, save_name.replace('csv', 'pkl'), start_vals)
elif 'csv' in resave:
start_vals = load_resaved_code(index_col, load_folder, save_name.replace('pkl', 'csv'),
start_vals)
# start_vals = load_resaved(index_col, save_name.replace('pkl', 'csv'), start_vals)
elif 'npy' in resave:
print(os.getcwd())
print(os.environ())
try:
start_vals = load_resaved_code(index_col, load_folder, save_name.replace('pkl', 'npy'),
start_vals)
except:
print('something')
embed()
except:
start_vals = load_resaved_code(index_col, load_folder, save_name, start_vals)
else:
if not resave:
start_vals = load_resaved_public(index_col, save_name, start_vals)
elif 'pkl' in resave:
start_vals = load_resaved_public(index_col, save_name.replace('csv', 'pkl'), start_vals)
elif 'csv' in resave:
start_vals = load_resaved_public(index_col, save_name.replace('pkl', 'csv'), start_vals)
elif 'npy' in resave:
start_vals = load_resaved_public(index_col, save_name.replace('pkl', 'npy'), start_vals)
# embed()
return start_vals
def load_resaved_code(index_col, load_folder, save_name, start_vals):
if 'csv' in save_name:
if save_name != 'models_big_fit_d_right.csv':
start_vals = pd.read_csv(load_folder_name(load_folder) + '/' + save_name, index_col=0)
else:
if index_col:
start_vals = pd.read_csv(
load_folder_name(load_folder) + '/' + save_name, index_col=0) # hier muss das index col dazu
else:
try:
# embed()
start_vals = pd.read_csv(
load_folder_name(load_folder) + '/' + save_name) # hier muss das index col dazu
except:
print('resave public')
embed()
elif '.pkl' in save_name:
start_vals = pd.read_pickle(load_folder_name(load_folder) + '/' + save_name)
elif 'npy' in save_name:
start_vals = np.load(load_folder_name(load_folder) + '/' + save_name)
return start_vals
def load_resaved_public(index_col, save_name, start_vals):
if 'csv' in save_name:
if save_name != 'models_big_fit_d_right.csv':
start_vals = pd.read_csv(save_name, index_col=0)
else:
if index_col:
start_vals = pd.read_csv(save_name,
index_col=0) # load_folder_name(load_folder) + '/' + hier muss das index col dazu
else:
start_vals = pd.read_csv(save_name)
elif '.pkl' in save_name:
start_vals = pd.read_pickle(save_name) # , index_col=0
elif '.npy' in save_name:
start_vals = np.load(save_name) # , index_col=0
return start_vals
def choose_p_units(stack_files):
# embed()
# t1 = time.time()
lim = 0.30
lim2 = 0.29
max_lim = 0.4
# cvs_stim = []
# for f, cell in enumerate(cells):[['cell','cv','cv_stim']].
stack_files = stack_files[stack_files['stimulus_length'] > 0]
stack_droped = stack_files[['cell', 'cv', 'cv_stim']].drop_duplicates()
cells = np.array(stack_droped.cell)
cvs_stim = stack_droped[stack_droped['cell'].isin(cells)]['cv_stim']
cvs_stim = np.array(cvs_stim)
cells_stim = cells[np.argsort(cvs_stim)][np.sort(cvs_stim) < lim]
cells_to_high = cells[np.argsort(cvs_stim)][np.sort(cvs_stim) > max_lim]
cvs_stim2 = cvs_stim[np.argsort(cvs_stim)][np.sort(cvs_stim) < lim]
cvs_base = []
# for f, cell in enumerate(cells):
cvs_base = stack_droped[stack_droped['cell'].isin(cells)]['cv']
cvs_base = np.array(cvs_base)
cells_base = cells[np.argsort(cvs_base)][np.sort(cvs_base) < lim2]
cells_all = list(cells_stim)
cvs_all = list(cvs_stim2)
# t2 = time.time() - t1
# embed()
for c, c3 in enumerate(cells_base):
# if c3 in cells_stim:
# print(c3 + 'in')
if c3 not in cells_stim:
if c3 not in cells_to_high:
# print(c3 + 'not in')
cells_all.append(c3)
cvs_all.append(cvs_base[c])
# cells = cells_all
# embed()
cells, cvs_unique = make_cell_unique(np.array(cvs_all), np.array(cells_all))
# cells = np.unique(cells)
# t2 = time.time() - t1
# print(f'loading time {t2}')
# embed()
return cells
def find_cell_add(cells_given):
if not cells_given:
cells_save = []
cell_add = ''
else:
cells_save = cells_given
# embed()
cell_add = ''
for cell in cells_given:
cell_add += cell
return cell_add, cells_save
def add_ends(adapt_type, dendrid, ref_type, stim_type_noise, var_type):
if 'additiv' in var_type: # ' ser1 ' + str(np.round(model_show.ser_first_stim.iloc[0], 2))+ ' ser mean ' + str(np.round(model_show.ser_stim.iloc[0], 5))
stim_type_noise_name = stim_type_noise
else:
stim_type_noise_name = ''
if dendrid == '':
dendrid_name = 'standard'
else:
dendrid_name = dendrid
if ref_type == '':
ref_type_name = 'standard'
else:
ref_type_name = dendrid
if adapt_type == '':
adapt_type_name = 'standard'
else:
adapt_type_name = adapt_type
return adapt_type_name, dendrid_name, ref_type_name, stim_type_noise_name
def titles_susept_names(a_fe, extract, noise_added, stim_type_afe_name, stim_type_noise_name2,
trials_stim, var_items, var_type):
# for var_it in var_items:
titles = ''
suptitles = ''
titles_nr = 0
if 'internal_noise' in var_items:
titles += ' intrinsic noise=' + stim_type_noise_name2
titles_nr += 1
else:
suptitles += ' intrinsic noise=' + stim_type_noise_name2
if 'external_noise' in var_items:
titles += ' additive RAM=' + stim_type_afe_name
titles_nr += 1
else:
suptitles += ' additive RAM=' + stim_type_afe_name
if titles_nr % 2 == 0:
titles += '\n'
if 'repeats' in var_items:
titles += ' $N_{repeat}=$' + str(trials_stim)
titles_nr += 1
else:
suptitles += ' $N_{repeat}=$' + str(trials_stim)
if titles_nr % 2 == 0:
titles += '\n'
if 'contrasts' in var_items:
titles += ' contrast=' + str(a_fe)
titles_nr += 1
else:
suptitles += ' contrast=' + str(a_fe)
if titles_nr % 2 == 0:
titles += '\n'
if 'level_extraction' in var_items:
titles += ' Extract Level=' + str(extract)
titles_nr += 1
else:
suptitles += ' Extract Level=' + str(extract)
if titles_nr % 2 == 0:
titles += '\n'
if 'D_extraction_method' in var_items:
titles += ' D extraction ' + str(var_type)
titles_nr += 1
else:
suptitles += ' D extraction ' + str(var_type)
if titles_nr % 2 == 0:
titles += '\n'
if 'noises_added' in var_items:
titles += ' high freq noise=' + str(noise_added)
titles_nr += 1
else:
suptitles += ' high freq noise=' + str(noise_added)
if titles_nr % 2 == 0:
titles += '\n'
return suptitles, titles
def quater_triangle(ax, fr_stim, cutoff, color='darkred', scatters=False, line_length=1 / 4, vertical=False,
label='Sum Fr'):
cutoff = float(cutoff)
line_length_h = np.sqrt(line_length ** 2 / 2)
# diagonale
ax.plot([0, fr_stim * line_length_h], [fr_stim, fr_stim * (1 - line_length_h)], color=color, label=label,
linestyle='--')
# horizontale Linie
ax.plot([0, fr_stim * line_length], [fr_stim, fr_stim], color=color, linestyle='--')
# embed()
if vertical:
ax.plot([fr_stim, fr_stim], [cutoff, cutoff - 25], color=color, linestyle='--')
if fr_stim < cutoff:
# embed()
if scatters:
ax.scatter(fr_stim, fr_stim, color=color, marker="+")
ax.scatter(fr_stim, 0, color=color, marker="+", clip_on=False)
else:
#################################
# lower triangle
# diagonal
# c = sqet(b**2+a**2)
# c = line_length
# c**2 = a**2+a**2
# c**2 = 2*a**2
# a = sqrt(c**2/2)
ax.plot([fr_stim, fr_stim * (1 - line_length_h)], [0, fr_stim * line_length_h], color=color, label=label,
linestyle='--')
# verticale linie
ax.plot([fr_stim, fr_stim], [0, fr_stim * line_length], color=color, linestyle='--')
#################################
# tip triangle
# verticale linie
ax.plot([fr_stim, fr_stim], [fr_stim, fr_stim * (1 - line_length)], color=color, linestyle='--')
# horizontale Linie
ax.plot([fr_stim, fr_stim * (1 - line_length)], [fr_stim, fr_stim], color=color, linestyle='--')
# embed()
def find_tags_all(b):
tags = []
for t in b.tags:
tags.append(t.name)
return tags
def find_tags_list(b, names='ficurve'):
ts = []
for t in b.tags:
if names in t.name.lower():
ts.append(t)
return ts
def find_contrasts(features, mt):
Intensity = [] # None
preIntensity = [] # None
contrasts = [] # None
precontrasts = [] # None
for ft in features:
if 'PreContrast' in ft:
precontrasts = mt.features[ft].data[:]
elif 'Contrast' in ft:
contrasts = mt.features[ft].data[:]
elif 'PreIntensity' in ft:
preIntensity = mt.features[ft].data[:]
elif 'Intensity' in ft:
Intensity = mt.features[ft].data[:]
return Intensity, preIntensity, contrasts, precontrasts
def bolzmann(x, f_max, k, I_half):
return f_max / (1 + np.exp(-k * (x - I_half)))
def bolzmann_onset(x, f_max, k, I_half):
return f_max / (1 + np.exp(-k * (x - I_half)))
def bolzmann_steady(x, f_max, k, I_half):
return f_max / (1 + np.exp(-k * (x - I_half)))
def find_first_level_path(search_name, first_level_path):
find_path = None
print(first_level_path)
# first level
if os.path.exists(first_level_path + '/' + search_name):
# if os.path.isdir(first_level_path):
find_path = first_level_path # + '/' + search_name
return find_path
else:
# second_level
find_path = find_second_level_path(first_level_path, search_name, find_path=None)
return find_path
def find_second_level_path(l0, search_name, find_path=None):
# listed_dir = os.listdir(first_level_path)
listed_dir = [d for d in os.listdir(l0) if os.path.isdir(os.path.join(l0, d))]
# embed()
for l1 in listed_dir:
if os.path.isdir(l0 + '/' + l1):
if os.path.exists(l0 + '/' + l1 + '/' + search_name):
find_path = l0 + '/' + l1
return find_path
if os.path.exists(l0 + '/' + l1 + '/detailed/' + search_name):
find_path = l0 + '/' + l1 + '/detailed'
return find_path
return find_path
def find_third_level_path(l0, l1, file):
# first level
if os.path.exists(l0 + '/' + l1 + '/' + file):
# if os.path.isdir(first_level_path):
find_path = l0 + '/' + l1 # + '/' + search_name
return find_path
find_path = find_second_level_path(l0, file, find_path=None)
# listed_dir = os.listdir(first_level_path)
listed_dir = [d for d in os.listdir(l0 + '/' + l1) if os.path.isdir(os.path.join(l0 + '/' + l1, d))]
# embed()
for l2 in listed_dir:
if os.path.isdir(l0 + '/' + l1 + '/' + l2):
if os.path.exists(l0 + '/' + l1 + '/' + l2 + file):
find_path = l0 + '/' + l1 + '/' + l2
return find_path
if os.path.exists(l0 + '/' + l1 + '/' + l2 + '/cells/' + file):
find_path = l0 + '/' + l1 + '/' + l2 + '/cells'
return find_path
return find_path
def load_path_data(l0=None, l1=None, file=None, load=False):
# am Besten die Paths natürlich wissen!
t1 = time.time()
if l1:
find_path = find_third_level_path(l0, l1, file)
elif l0:
find_path = find_first_level_path(file, l0)
else:
first_level_paths = os.listdir()
# todo: das muss man eventuel schneller machen, nur Dirs listen!
first_level_paths = [d for d in os.listdir() if os.path.isdir(d)]
for l0 in first_level_paths:
# name = first_level_path.replace('.py','')
if os.path.isdir(l0):
if ('calc' in l0) & ('.py' not in l0):
# try:
find_path = find_first_level_path(file, l0)
if find_path:
t2 = time.time()
print(' find file ' + str(t2 - t1))
return find_path, find_path + '/' + file
# except:
# print('find ')
# embed()
# embed()
t2 = time.time()
print(' find file ' + str(t2 - t1))
save_name = find_path + '/' + file
frame_all = []
if load:
t1 = time.time()
if 'pkl' in save_name:
frame_all = pd.read_pickle(save_name)
else:
frame_all = pd.read_csv(save_name)
t2 = time.time()
print(' find file ' + str(t2 - t1))
return find_path, save_name, frame_all
def save_structure_to_frame(position, frame, isi, name='isi', double=True):
# initialize float
if position == 0:
frame.loc[position, name] = float('nan')
frame[name] = frame[name].astype(object)
# try differently to put the array in there
if double:
try:
frame.at[position, name] = [isi]
except:
try:
frame.at[position, name] = isi
frame.at[position, name] = [isi]
except:
frame[name] = frame[name].astype(object)
frame.at[position, name] = [isi]
else:
try:
frame.at[position, name] = isi
except:
try:
frame.at[position, name] = [isi]
except:
# embed()
frame[name] = frame[name].astype(object)
frame.at[position, name] = isi
return frame
def c_to_dist(eod_size, power=1.61, factor=24, convert='contrast_to_dist'): # diese faktoren hatte ich visuell gefitted
# ok das war halt anhand des was ich selber gefittet habe
# power = 2.09, factor = 12.23,
# und 1.63 und 29 ist laut henninger 2020
# if 'mv' not in input:
# factor = 9.793241127295891
# 1 / ((distance/factor) ** 3) # /10
# Power = , factor =
# ok das ist die formel wo c die distance ist
#
# aber wir wollen schon die formel
# distance = (factor / c)** (1/power)
# distance = 10 / c ** (1 / 3)
# else:
# distance = factor / (c) ** power
if 'contrast_to_dist' in convert:
distance = (factor / eod_size) ** (1 / power)
output = distance
else: # das ist der default den wir wollen
distance_cm = eod_size
field = factor / ((distance_cm) ** power)
output = field
return output
def calc_burst_perc(eod_fr, isi):
isi_eod_corr = isi / (1 / eod_fr)
burst_1 = np.sum(isi_eod_corr < 1.5) / len(isi_eod_corr)
burst_2 = np.sum(isi_eod_corr < 2.5) / len(isi_eod_corr)
return burst_1, burst_2
def find_base_fr(spike_adapted, deltat, stimulus_length, time_array, dev=0.0005):
spikes_mat = [[]] * len(spike_adapted)
# embed()
for s in range(len(spike_adapted)):
spikes_mat[s] = cr_spikes_mat(spike_adapted[s], 1 / deltat, int(stimulus_length * 1 / deltat))
sampling_rate = 1 / deltat
if dev != 'original':
smoothed05 = gaussian_filter(spikes_mat, sigma=dev * sampling_rate)
else:
smoothed05 = spikes_mat
mat_base = np.mean(smoothed05, axis=0)
base_cut = mat_base[time_array > 0.05]
return base_cut, mat_base,
def spikes_mat_depending_on_length(spike_adapted, deltat, stimulus_length):
if len(spike_adapted) < 21:
spikes_mat = [[]] * len(spike_adapted)
for s in range(len(spike_adapted)):
spikes_mat[s] = cr_spikes_mat(spike_adapted[s], 1 / deltat, int(stimulus_length * 1 / deltat))
else:
spikes_mat = [[]] * 19
# nrs = np.linspace(0, len(spike_adapted),20)
spikes_mat = [[]]
nrs = [0, len(spike_adapted)]
for s in range(len(nrs) - 1):
spikes_mat[s], time2, samp2 = calc_spikes_hist(len(spike_adapted[int(nrs[s]):int(nrs[s + 1])]),
stimulus_length,
np.concatenate(spike_adapted[int(nrs[s]):int(nrs[s + 1])]),
deltat=deltat)
return spikes_mat
def find_base_fr2(spike_adapted, deltat, stimulus_length, dev=0.0005, length_adapt=True):
if length_adapt == True:
spikes_mat = spikes_mat_depending_on_length(spike_adapted, deltat, stimulus_length)
# hist = calc_hist(spike_adapted, bin_rate=1 / sampling_rate)
else:
spikes_mat = [[]] * len(spike_adapted)
for s in range(len(spike_adapted)):
spikes_mat[s] = cr_spikes_mat(spike_adapted[s], 1 / deltat, int(stimulus_length * 1 / deltat))
sampling_rate = 1 / deltat
if dev != 'original':
smoothed05 = gaussian_filter(spikes_mat, sigma=dev * sampling_rate)
else:
smoothed05 = spikes_mat
mat_base = np.mean(smoothed05, axis=0) # time_array
base_cut = mat_base[np.arange(0, len(mat_base) / sampling_rate, 1 / sampling_rate) > 0.05]
return base_cut, mat_base, smoothed05, spikes_mat
def calc_spikes_hist(trials_nr, stimulus_length, spikes_mt, deltat=0.001):
time = np.arange(0, stimulus_length + deltat / 2, deltat)
bin_nr = time - deltat / 2
try:
hist = np.histogram(spikes_mt, bins=bin_nr)
except:
print('hist problem')
embed()
# time = hist[1][0:-1]-np.diff(hist[1])[0]/2
deltat_here = np.diff(time)[0]
hist_out = hist[0] * (1 / deltat_here) / trials_nr
time = time[0:-1]
return hist_out, time, deltat_here
def load_eod_size(cell_recording, max='max'):
data_dir = load_folder_name('data') + 'cells'
try:
f = nix.File.open(data_dir + '/' + cell_recording + '/' + cell_recording + ".nix", nix.FileMode.ReadOnly)
b = f.blocks[0]
# embed()
# tag_names_
try:
baseline = b.tags['BaselineActivity_1']
first = baseline.position[0] # s[:][l] - delay,
second = baseline.extent[0] # s[:][l] + delay,
except:
baseline = []
first = 10
second = 10.2
# embed()
eod_data_array = b.data_arrays['EOD'] # ['EOD-1']
eod_mt = eod_data_array.get_slice([first], [second], nix.DataSliceMode.Data)[:]
# zenter_and_normalize
eod_mt = zenter_and_normalize(eod_mt, 1, normalize=False)
if max == 'max':
max_val = np.max(eod_mt)
min_val = np.min(eod_mt)
else:
max_val = np.mean(eod_mt[eod_mt > np.percentile(eod_mt, 95)])
min_val = np.mean(eod_mt[eod_mt < np.percentile(eod_mt, 5)])
eod_size = max_val - min_val
# embed()
except:
print('EODF size problem')
eod_size = 1
b = 1
baseline = 1
return baseline, b, eod_size,
def mult_beat_freqs(eodf, maxx, freq1, color_stim_mult='orange', stim_thing=True, color_df_mult='green',
color_eodf='black', color_stim='orange'):
freq1_array = np.arange(freq1, maxx, freq1)
if stim_thing:
freqs = [eodf + freq1, eodf + 2 * freq1, eodf + 3 * freq1, eodf + 4 * freq1, eodf + 5 * freq1, eodf - freq1,
eodf - 2 * freq1, eodf - 3 * freq1, eodf - 4 * freq1, eodf - 5 * freq1,
]
else:
freqs = []
freqs.extend(freq1_array)
freqs.extend([eodf, freq1 + eodf])
# embed()
if stim_thing:
colors_peaks = [
color_stim_mult, color_stim_mult, color_stim_mult, color_stim_mult, color_stim_mult,
color_stim_mult, color_stim_mult, color_stim_mult, color_stim_mult, color_stim_mult,
] # colors_array[1],'green','green'
else:
colors_peaks = []
colors_peaks.extend([color_df_mult] * len(freq1_array))
colors_peaks.extend([color_eodf, color_stim, color_df_mult])
labels = [
'EODF+DF1', 'EODF+DF1', 'EODF+DF1', 'EODF+DF1', 'EODF+DF1', 'EODF-DF1', 'EODF-DF1', 'EODF-DF1',
'EODF-DF1', 'EODF-DF1',
]
# DF_pi_core(),
# f_eod_pi_core(),
# f_pi_core(),
labels.extend([DF_pi_core()] * len(freq1_array))
labels.extend([f_eod_pi_core(), f_pi_core(), 'DF1'])
if stim_thing:
alphas = [1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
]
alphas.extend([1])
alphas.extend([alpha_burstgain() + 0.4] * int((len(freq1_array)) - 1))
alphas.extend([1, 1, 1])
else:
alphas = []
alphas.extend([alpha_burstgain()] * len(freq1_array))
alphas.extend([1, 1, 1])
return alphas, labels, colors_peaks, freqs
def alpha_burstgain():
return 0.6
def peaks_1d(fr, a_fr, beat1, freq1):
if a_fr == 1:
names = {'fundamental/2': np.abs(beat1 / 2),
'fundamental': np.abs(beat1),
'h1': np.abs(beat1 * 2),
'h2': np.abs(beat1 * 3),
'h3': np.abs(beat1 * 4),
'fr/2': np.abs(fr / 2),
'fr': np.abs(fr),
'fr1': np.abs(fr * 2),
'fr2': np.abs(fr * 3),
'fr3': np.abs(fr * 4),
}
else:
names = {'fundamental/2': np.abs(freq1[0] / 2),
'fundamental': np.abs(freq1[0]),
'h1': np.abs(freq1[0] * 2),
'h2': np.abs(freq1[0] * 3),
'h3': np.abs(freq1[0] * 4),
'fr/2': np.abs(fr / 2),
'fr': np.abs(fr),
'fr1': np.abs(fr * 2),
'fr2': np.abs(fr * 3),
'fr3': np.abs(fr * 4),
}
return names
def default_settings(lw=1, column=2, width=2.6 * 3, length=3, ts=None, ls=None, fs=None): # 9
# dont forget about the default settings
# embed()us
default_figsize(column, length, width)
plt.rcParams['figure.facecolor'] = 'white'
# plt.rcParams['figure.facecolor'] = 'white'
plt.rcParams['axes.facecolor'] = 'none'
# if not ts:
# plt.rcParams['axes.titlesize'] = 'large'#ts
plt.rcParams['legend.fontsize'] = 'medium'
if fs:
plt.rcParams['font.size'] = fs
if ts:
plt.rcParams['axes.titlesize'] = ts # 'large' # ts
# if not ls:
# plt.rcParams['axes.labelsize'] = 'medium'#ls
# else:
if ls:
plt.rcParams['axes.labelsize'] = ls # ls
plt.rcParams['lines.linewidth'] = lw
plt.rcParams['lines.markersize'] = 6
plt.rcParams['legend.loc'] = 'upper right'
plt.rcParams["legend.frameon"] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
## das ist in relativen Units
plt.rcParams['axes.ymargin'] = 0.02 # 0.99#0.05
plt.rcParams['axes.xmargin'] = 0.02 # 0.99#0.05
def default_figsize(column=None, width=2.6 * 3, length=3):
if column == 1.5:
width = 4.5
elif column == 1:
width = 3.42
elif column == 2:
width = column2()
plt.rcParams['figure.figsize'] = (width, length)
plt.rcParams['figure.facecolor'] = 'white'
def column2():
width = 6.77165354 # 7
return width
def chose_mat_max_value(DF1_desired, DF2_desired, extract, mult_type, eodftype, indices, cell, contrast_small,
contrast_big, contrast1, dfs, start, dev, contrast, autodefine='autodefine', nfft=0,
limit=10.2, mean_type='_MeanTrialsIndexPhaseSort_Min0.25sExcluded_', cut_matrix='malefemale',
chose_score='mean_nrs'): # chose_score = 'auci02_012-auci_base_01'
try:
t1 = time.time()
except:
print('time not there')
embed()
# if file_prechosen:
if 'autodefine' in autodefine:
try:
load_name, save_name_all = load_three_fish_savename(mean_type, '', mult_type, eodftype, indices, cell,
extract, devname=str([dev]), nfft=nfft)
frame_o = pd.read_pickle(load_name)
except:
load_name = load_folder_name(
'threefish') + '/calc_auc_three_MeanTrialsIndexPhaseSort_Min0.25sExcluded_Same_Count_multsorted2__pureEODf_allindices4__' + cell + 'three_AUCI.pkl'
try:
load_name = load_folder_name(
'threefish') + '/calc_auc_three_AllTrialsIndex_Min0.25sExcluded__multsorted2__pureEODf_allindices4__' + cell + 'three_AUCI.pkl'
frame_o = pd.read_pickle(load_name)
except:
load_name = load_folder_name(
'threefish') + '/calc_auc_three-_AllTrialsIndex_Min0.25sExcluded__multsorted2__pureEODf_allindices4__' + cell + 'three_AUCI.pkl'
frame_o = pd.read_pickle(load_name)
# embed()
frame = frame_o[frame_o['cell'] == cell]
df = frame[frame[contrast_small] == contrast]
df_dev = df[df['dev'] == dev]
df_datapoint = df_dev[df_dev['datapoints'] == df_dev['datapoints'].unique()[0]] # d
df_contrast = df_datapoint[df_datapoint[contrast_big] == contrast1]
else:
load_name = load_folder_name(
'threefish') + '/calc_auc_three_MeanTrialsIndexPhaseSort_Min0.25sExcluded_Same_Count_multsorted2__pureEODf_allindices4__' + cell + 'three_AUCI.pkl'
load_name = load_folder_name(
'threefish') + '/calc_auc_three_AllTrialsIndex_Min0.25sExcluded__multsorted2__pureEODf_allindices4__' + cell + 'three_AUCI.pkl'
# todo: dieses Spikes core da fehlen halt noch die EODf frequenzen
file_name = 'calc_auc_three_core-spikes_core_AUCI.pkl' # 'calc_auc_three_core-spikes_core_AUCI.pkl'
load_name = load_folder_name('threefish') + '/' + file_name
try:
frame_o = resave_small_files(file_name, 'threefish', resave='csv') # load_folder_name()
except:
print('some problem')
embed()
# if os.path.exists()
# frame_o = pd.read_pickle(load_name)
frame = frame_o[frame_o['cell'] == cell]
df = frame[frame[contrast_small] == contrast]
df_contrast = df[df[contrast_big] == contrast1]
# embed()
name = check_dirs(cell)
# if 'autodefine' in autodefine:
# todo: da beim autodefine gibt es noch ein Problem
pivot_chosen, _, indexes, resorted, orientation, cut_type = get_data_pivot_three(df_contrast, chose_score,
matrix_extent=start,
matrix_sorted='all',
dfs=dfs) # gridspacing=0.02,
if chose_score == 'mean_nrs':
if len(resorted) > 0:
pivot_chosen = resorted
# autodefine is the version that takes the maximum
if autodefine == 'autodefine':
# embed()
c1_ub = 1.25
c1_lb = 0.75
c2_ub = 0.85
c2_lb = 0.55
c3_lb = 1.25
c3_ub = 1.55
# this is if we want restrict it to femal male range
if cut_matrix == 'malefemale':
diff_cut = cut_matrix_df_choice(pivot_chosen, c1_ub, c1_lb, c2_lb, c2_ub, c3_lb, c3_ub)
else:
diff_cut = pivot_chosen
try:
max_val = diff_cut.stack().iloc[np.argmax(diff_cut.stack())]
max_y, max_x = diff_cut.stack().index[np.argmax(diff_cut.stack())]
# bothmax = True
if chose_score == 'mean_nrs':
diff_cut.loc[max_y, max_x] = float('nan')
min_val = diff_cut.stack().iloc[np.argmax(diff_cut.stack())]
min_y, min_x = diff_cut.stack().index[np.argmax(diff_cut.stack())]
else:
min_val = diff_cut.stack().iloc[np.argmin(diff_cut.stack())]
min_y, min_x = diff_cut.stack().index[np.argmin(diff_cut.stack())]
if '1' in diff_cut.stack().index.names[0]:
DF1_desired = [max_y, min_y]
DF2_desired = [max_x, min_x]
else:
DF1_desired = [max_x, min_x]
DF2_desired = [max_y, min_y]
except:
print('priblesm min val')
embed()
mult = True
fr = df_contrast.iloc[0].fr # .unique()#.iloc[indices]
fr = [fr] * len(DF1_desired)
eod_fr = df_contrast.iloc[0].EODf
divergence = (np.array(DF1_desired) + np.array(DF2_desired) - 2) * eod_fr - fr
# embed()
# max_y, min_y, max_x, min_x, DF1_desired, DF2_desired = find_maximal_pos(mult, max_val, min_val)
# df_contrast.fr
elif autodefine == 'autodefine_sorted':
# embed()
c1_ub = 1.25
c1_lb = 0.75
c2_ub = 0.85
c2_lb = 0.55
c3_lb = 1.25
c3_ub = 1.55
# this is if we want restrict it to femal male range
if cut_matrix == 'malefemale':
diff_cut = cut_matrix_df_choice(pivot_chosen, c1_ub, c1_lb, c2_lb, c2_ub, c3_lb, c3_ub)
else:
diff_cut = pivot_chosen
try:
max_val = diff_cut.stack().iloc[np.argmax(diff_cut.stack())]
# embed()
max_y, max_x = diff_cut.stack().index[np.argmax(diff_cut.stack())]
# bothmax = True
if chose_score == 'mean_nrs':
diff_cut.loc[max_y, max_x] = float('nan')
min_val = diff_cut.stack().iloc[np.argmax(diff_cut.stack())]
min_y, min_x = diff_cut.stack().index[np.argmax(diff_cut.stack())]
else:
min_val = diff_cut.stack().iloc[np.argsort(diff_cut.stack())[::-1]]
# min_y, min_x = diff_cut.stack().index[np.argmin(diff_cut.stack())]
min_y = []
min_x = []
arr = np.asarray(min_val.index)
DF2_desired = []
DF1_desired = []
# embed()
for a in range(len(arr)):
if '2' in min_val.index.names[0]:
DF2_desired.append(arr[a][0])
DF1_desired.append(arr[a][1])
else:
DF1_desired.append(arr[a][0])
DF2_desired.append(arr[a][1])
# embed()
# arr.flatten()
except:
print('priblesm min val')
embed()
mult = True
fr = df_contrast.iloc[0].fr # .unique()#.iloc[indices]
fr = [fr] * len(DF1_desired)
eod_fr = df_contrast.iloc[0].EODf
divergence = (np.array(DF1_desired) + np.array(DF2_desired) - 2) * eod_fr - fr
# embed()
# max_y, min_y, max_x, min_x, DF1_desired, DF2_desired = find_maximal_pos(mult, max_val, min_val)
# df_contrast.fr
elif autodefine == '_dfchosen_':
min_y = []
min_x = []
min_val = []
diff_cut = []
# pivot_chosen = []
max_val = []
max_x = []
max_y = []
mult = []
divergence = []
# embed()
try:
fr = [df_contrast.iloc[0].fr] * len(DF1_desired)
except:
print('fr7 problem')
# embed()
# min_y = []
# min_x = []
# min_val = []
elif 'dfchosen_closest' in autodefine: #
min_y = []
min_x = []
min_val = []
diff_cut = []
# pivot_chosen = []
max_val = []
max_x = []
max_y = []
mult = []
divergence = []
# embed()
DF1_desired_here = df_contrast.m1
DF2_desired_here = df_contrast.m2
try:
DF1_desired = DF1_desired_here.iloc[
np.argsort(np.abs(DF1_desired_here - (DF1_desired / df_contrast.EODf + 1)))]
except:
try:
DF1_desired = DF1_desired_here[
np.argsort(np.abs(DF1_desired_here - (DF1_desired / df_contrast.EODf + 1)))]
except:
print('df something')
embed()
try:
DF2_desired = DF2_desired_here.iloc[
np.argsort(np.abs(DF2_desired_here - (DF2_desired / df_contrast.EODf + 1)))]
except:
DF2_desired = DF2_desired_here[np.argsort(np.abs(DF2_desired_here - (DF2_desired / df_contrast.EODf + 1)))]
# 'dfchosen_closest'
if 'first' in autodefine:
DF1_desired = [DF1_desired.iloc[0]]
DF2_desired = [DF2_desired.iloc[0]]
try:
fr = [df_contrast.iloc[0].fr] * len(DF1_desired)
except:
print('fr5 problem')
# _dfchosen_closest_
# embed()
elif 'triangle' in autodefine:
# pivot_chosen, _, indexes, resorted, orientation, cut_type = get_data_pivot_three(
# dev,
# df_contrast,
# contrast,
# chose_score,
# matrix_extent=start,
# dfs=dfs) # gridspacing=0.02,
min_y = []
min_x = []
min_val = []
diff_cut = []
# pivot_chosen = []
max_val = []
max_x = []
max_y = []
mult = []
if 'fr' in autodefine:
fr_subtract = df_contrast.fr
# embed()
if len(fr_subtract) > 0:
# embed()
if 'no' in list(fr_subtract):
load_name = load_folder_name(
'threefish') + '/calc_auc_three_MeanTrialsIndexPhaseSort_Min0.25sExcluded_Same_Count_multsorted2__pureEODf_allindices4__' + cell + 'three_AUCI.pkl'
frame_o = pd.read_pickle(load_name)
frame = frame_o[frame_o['cell'] == cell]
df = frame[frame[contrast_small] == contrast]
df_dev = df[df['dev'] == dev]
df_datapoint = df_dev[df_dev['datapoints'] == df_dev['datapoints'].unique()[0]] # d
df_contrast = df_datapoint[df_datapoint[contrast_big] == contrast1]
fr_subtract = df_contrast.fr
# embed()
else:
fr_subtract = df_contrast.EODf / 2
try:
min_df2 = np.abs(np.abs(df_contrast.DeltaF2) - fr_subtract)
except:
print('fr4 problem')
embed()
min_df1 = np.abs(np.abs(df_contrast.DeltaF1) - fr_subtract)
sum_diagonal = np.abs(np.abs(df_contrast.DeltaF1) + np.abs(df_contrast.DeltaF2) - fr_subtract)
difference_diagonal = np.abs(np.abs(df_contrast.DeltaF1) - (np.abs(df_contrast.DeltaF2) - fr_subtract))
lower_diagonal = np.abs(np.abs(df_contrast.DeltaF1) + np.abs(df_contrast.DeltaF2) * 2 - fr_subtract)
if '2diagonal' in autodefine:
sorted_sum = np.sort(difference_diagonal)
sorted_argsum = np.argsort(difference_diagonal)
divergence = sorted_sum[sorted_sum < limit]
indices = sorted_argsum[sorted_sum < limit]
DF1_desired = df_contrast.iloc[indices].m1
DF2_desired = df_contrast.iloc[indices].m2
# embed()
elif 'diagonal' in autodefine:
sorted_sum = np.sort(sum_diagonal)
sorted_argsum = np.argsort(sum_diagonal)
divergence = sorted_sum[sorted_sum < limit]
indices = sorted_argsum[sorted_sum < limit]
DF1_desired = df_contrast.iloc[indices].m1
DF2_desired = df_contrast.iloc[indices].m2
elif 'df1' in autodefine:
sorted_sum = np.sort(min_df1)
sorted_argsum = np.argsort(min_df1)
indices = sorted_argsum[sorted_sum < limit]
DF1_desired = df_contrast.iloc[indices].m1
DF2_desired = df_contrast.iloc[indices].m2
elif 'df2' in autodefine:
sorted_sum = np.sort(min_df2)
sorted_argsum = np.argsort(min_df2)
indices = sorted_argsum[sorted_sum < limit]
DF1_desired = df_contrast.iloc[indices].m1
DF2_desired = df_contrast.iloc[indices].m2
else:
DF1_desired = []
DF2_desired = []
divergence = []
indices = []
arrays = [lower_diagonal, difference_diagonal, min_df1, min_df2, sum_diagonal]
for array in arrays:
DF1_desired, DF2_desired, divergence, indices = append_dfs(indices, divergence, DF1_desired,
DF2_desired, array, limit, df_contrast)
# embed()
fr = np.array(df_contrast.iloc[indices].fr)
test = False
if test:
test_extra()
else:
DF1_desired = df_contrast.m1
DF2_desired = df_contrast.m2
fr = np.array(df_contrast.fr)
max_val = []
max_x = []
max_y = []
min_x = []
min_y = []
min_y = []
min_val = []
mult = []
diff_cut = []
sum_diagonal = np.abs(np.abs(df_contrast.DeltaF1) + np.abs(df_contrast.DeltaF2) - fr)
divergence = np.array(sum_diagonal)
print('working on ')
# embed()
DF1_desired = list(map(float, DF1_desired))
DF2_desired = list(map(float, DF2_desired))
# embed()
# try:
# divergence
# except:
# print('divergence problem')
# embed()
t2 = time.time()
print('first steps ' + str(t2 - t1))
return divergence, fr, pivot_chosen, max_val, max_x, max_y, mult, DF1_desired, DF2_desired, min_y, min_x, min_val, diff_cut
# pivot_chosen, max_val, max_x, max_y, mult, DF1_desired, DF2_de
def append_dfs(indices_all, divergnce, DF1_desired, DF2_desired, second_diagonal, limit, df_contrast):
sorted_sum = np.sort(second_diagonal)
sorted_argsum = np.argsort(second_diagonal)
indices = sorted_argsum[sorted_sum < limit]
divergnce.extend(sorted_sum[sorted_sum < limit])
DF1_desired.extend(df_contrast.iloc[indices].m1)
indices_all.extend(indices)
DF2_desired.extend(df_contrast.iloc[indices].m2)
return DF1_desired, DF2_desired, divergnce, indices_all
def cut_matrix_df_choice(pivot_chosen, c1_ub, c1_lb, c2_lb, c2_ub, c3_lb, c3_ub):
try:
if '1' in pivot_chosen.index.name:
diff_cut = pivot_chosen.loc[(pivot_chosen.index < c1_ub) & (pivot_chosen.index > c1_lb)]
diff_cut = diff_cut[pivot_chosen.columns[
(pivot_chosen.columns < c2_ub) & (pivot_chosen.columns > c2_lb) | (
pivot_chosen.columns < c3_ub) & (pivot_chosen.columns > c3_lb)]]
elif '2' in pivot_chosen.index.name:
diff_cut = pivot_chosen.loc[
(pivot_chosen.index < c2_ub) & (pivot_chosen.index > c2_lb)]
diff_cut = diff_cut[pivot_chosen.columns[
(pivot_chosen.columns < c1_ub) & (pivot_chosen.columns > c1_lb)]]
except:
print('diff cut')
embed()
return diff_cut
def check_dirs(cell):
all_dirs = os.listdir(load_folder_name('threefish') + '/')
name = []
for dir in all_dirs:
if cell in dir:
name.append(dir)
return name
def load_three_fish_savename(mean_type, chirps, mult_type, eodftype, indices, cell, extract, devname='', nfft='',
AUCI='AUCI', sqrt='_sqrt_', points=None):
###############
# NEUE VERSION, ALTE OHNE DIE ZWEI!
if devname:
if ('original' in devname) & ('05' in devname):
devname_save = ''
else:
devname_save = '_dev_' + str(devname) + '_' # '_'.join(devname)
else:
devname_save = ''
# embed()
if nfft != '':
nfft_name = '_nfft_' + str(nfft)
else:
nfft_name = ''
# das eventuell noch auf calc_ROC_data ändern..
if points:
points_name = '_points' + str(points)
else:
points_name = ''
save_name = load_folder_name(
'threefish') + '/calc_auc_three' + mean_type + chirps + mult_type + eodftype + indices + '_' + cell + extract + devname_save + nfft_name + 'three_' + AUCI + sqrt + points_name + '.pkl'
save_name_all = load_folder_name(
'threefish') + '/calc_auc_three' + mean_type + chirps + mult_type + eodftype + indices + '_' + extract + devname_save + nfft_name + 'three_' + AUCI + sqrt + points_name + '.pkl'
save_name = save_name.replace(' ', '')
save_name_all = save_name_all.replace(' ', '')
save_name = save_name.replace(',', '_')
save_name_all = save_name_all.replace(',', '_')
save_name_all = save_name_all.replace("'", '_')
save_name = save_name.replace("'", '_')
# if not os.path.exists(save_name):
# dir_names, dir_names_cells,dir_names_all = search_all_files(chirps, extract, indices, mean_type, mult_type)
# print('save name not there')
# #embed()
return save_name, save_name_all
def colors_cocktailparty():
global color_base, color_01, color_02, color_012
color_base = 'magenta'#'blue'
color_01 = 'green'
color_02 = 'red'
color_012 = 'orange' # 'orange'
return color_base, color_01, color_02, color_012
def colors_didactic():
global color
color = ['red', 'purple', 'orange', 'green'] # 'green', 'red'
return color
def get_RAM_stimulus(cell, exponential, model_cells):
model_params = model_cells[model_cells.cell == cell].iloc[0]
# deltat = model_params["deltat"]
# p#arameters = load_models("models.csv")
stimulus_length = 1
noise_strength = model_params.noise_strength # **2/2
a_fr = 1 # ,0]#0,,0,]#,0 ,0 ] # ,0,]#0#1
input_scaling = model_params.input_scaling
stimulus_length_here = stimulus_length
eod_fr = model_params['EODf']
deltat = model_params.pop("deltat")
time_here = np.arange(0, stimulus_length_here * 2, deltat)
eod_fish_r, deltat, eod_fr, time_array = eod_fish_r_generation(time_here, eod_fr, a_fr,
stimulus_length_here * 2)
cut_offs = [eod_fr / 2] # 300,400, 5000,
var_type = ''
noise_added = '' ##'_noiseadded_'#''#'_noiseadded_'
cut_off = cut_offs[0]
white_noise, freq_wn = whitenoise(0, cut_off, deltat, stimulus_length,
rng=np.random)
input_scaling = model_params.input_scaling
noise_strength = model_params.noise_strength # **2/2
d = noise_strength # (noise_strength ** 2) / 2
# var2 = (d * c_sig) * 2 / deltat
white_noise = white_noise[0:-1]
# if 'additiv' in var_type:
# RAM = white_noise * (np.sqrt(np.var(RAM2)))
# else:
a_fe = 0.2
RAM = white_noise * a_fe
# embed()#
# carrier_RAM = (1 + np.concatenate([RAM, RAM])) * eod_fish_r
carrier_RAM = (1 + np.concatenate([RAM, RAM])) * eod_fish_r
# embed()
am_extracted = extract_am(carrier_RAM, np.arange(0, len(carrier_RAM) * deltat, deltat), norm=False)[0]
nfft = 2 ** 14
# embed()
adapt_offset = 'adaptoffset_bisecting'
c_sig = 0.9
c_sig = 0 # 0.45,0.9, 1.8]#0.9,0.1,1.5]#1.5#1.5#0.9#0.9#0.9#0.9
c_noise = 1 # ,0.9]#0.9#np.round(1 - c_sig,1)
adapt_type = '' # ,'_noAdapt_']#,,,]#'_noAdapt_','']
ref_type = '' # ,'_NoRef_']#,'_NoRef_''''_NoRef_''',]#'_NoRef_''',]#''##''##''##''##''##''#
dendrid = '' # ,'Stimulus_wo_dendrid',]#'Stimulus_wo_dendrid''''','Stimulus_wo_dendrid''''',]#'Stimulus_wo_dendrid',#,, ] # ,''''''''','noDendrid',]#''#'noDendrid'
##########################
# model part
model_params = model_cells[model_cells.cell == cell].iloc[0]
cell = model_params.pop('cell')
eod_fr = model_params['EODf']
deltat = model_params.pop("deltat")
offset = model_params.pop("v_offset")
trials_nr = 15
spikes = [[]] * trials_nr
for t in range(trials_nr):
cvs, adapt_output, baseline_after, spike_adapted, rate_adapted, rate_baseline_before, rate_baseline_after, \
spikes[t], stimulus_power, v_dent, offset, v_mem_output, noise_final = simulate(cell, offset, carrier_RAM,
deltat=deltat,
adaptation_variant=adapt_offset,
adaptation_yes_e=0,
adaptation_yes_t=0,
power_variant='sinz',
power_nr=1,
noisesplit_c_noise=c_noise,
noisesplit_noise_added=noise_added,
noisesplit_var_type=var_type,
noisesplit_cut_off=cut_off,
noisesplit_c_sig=c_sig,
LIF_ref_type=ref_type,
LIF_adapt_type=adapt_type,
LIF_dendrid=dendrid,
LIF_exponential=exponential,
**model_params)
v_dent_extracted = extract_am(v_dent, np.arange(0, len(v_dent) * deltat, deltat), norm=False)[0]
# embed()
base_cut, mat_base = find_base_fr(spikes, deltat, stimulus_length * 2, time_array, dev=0.001)
arrays = [
carrier_RAM,
'',
mat_base,
] # ,RAM400, am_extracted400-np.mean(am_extracted400)]RAM,eod_fish_r[len(RAM):-1],
arrays2 = [
RAM + 1,
'',
''] # ,RAM400, am_extracted400-np.mean(am_extracted400)]'','',
# embed()
# titles = ['Noise', 'RAM', 'RAM*Carrier',
# 'RAM*Carrier -> RAM', 'V_dent', 'V_dent -> RAM']
colors = ['grey', 'black', 'black', 'black', 'black']
return arrays, arrays2, colors, deltat, spikes
def find_optimal_vs(spikes_s, eod_fs):
values = []
phases = []
for eod in eod_fs:
vs_here = calc_vectorstrength(spikes_s, 1 / eod)
# vs_here = vectorstrength(spikes_s, 1 / eod)
values.append(vs_here[0])
phases.append(vs_here[1])
optimal_eodf = eod_fs[np.argmax(values)]
# phases[np.argmax(values)]
vs = [np.max(values), phases[np.argmax(values)]]
return optimal_eodf, vs
def calc_vectorstrength(spikes_s, eod_period):
return vectorstrength(spikes_s, eod_period)
def cut_spike_snippets(spikes_folded, eod=[], zero_crossings=[], time_eod=[], eod_based=False, array_cut2=[],
array_cut=[], norming=True, sampling_rate=40000, skip_factor=1, end=200, period_based=False,
ms_factor=1000, smoothened=[]):
# embed()
# todo: das vielleicht noch abspeichern
# [a for zero_crossings]
eods_cut = []
times_cut = []
spikes_cut = []
smoothed_cut = []
if not period_based:
if len(zero_crossings) < 1:
eod_period, zero_crossings, zero_crossings_eod2 = find_mean_period(
eod, sampling_rate)
if end:
if int(len(zero_crossings) - 1) > end:
crossings_length = end
else:
crossings_length = int(len(zero_crossings) - 1)
else:
crossings_length = int(len(zero_crossings) - 1)
else:
crossings_length = int(len(array_cut2) - 1)
for sp in range(crossings_length): # int(len(zero_crossings)/2-2
# print('sp')
# embed()
if period_based:
cut = array_cut2[sp]
cut_next = array_cut2[sp + 1]
else:
if eod_based:
cut = array_cut[sp]
cut_next = array_cut[sp + 1]
else:
# das ist jetzt in sekunden
zer = zero_crossings
cut = zer[int(sp)] # [0]
cut_next = zer[int(sp + 1)] # [0]#das in sekunden
spikes_s = np.array(spikes_folded) / ms_factor
cut_sp = np.array(spikes_s)[(spikes_s > cut) & (spikes_s < cut_next)] - cut
#
# hier muss noch der skip faktor hin
if norming:
norm = 2 * np.pi / (cut_next - cut)
else:
norm = 1
spikes_cut.append(cut_sp * norm)
# embed()
if len(smoothened) > 0:
times_here = time_eod
smoothed_cut.append(smoothened[(times_here > cut) & (times_here < cut_next)])
if len(eod) > 0:
times_here = time_eod
eods_cut.append(eod[(times_here > cut) & (times_here < cut_next)])
time_cut = times_here[(times_here > cut) & (times_here < cut_next)]
if len(time_cut) > 0:
# embed()
times_cut.append(skip_factor * (time_cut - time_cut[0]) * norm)
else:
times_cut.append(time_cut)
else:
times_cut = []
eods_cut = []
return eods_cut, spikes_cut, times_cut, cut_next, smoothed_cut
def get_max_len(spikes_mats):
lengths = []
eod_intep_length = []
for ss, s in enumerate(spikes_mats):
lengths.append(len(s))
# eod_intep_length.append(len(eod_interps[ss]))
try:
max_len = np.max(lengths)
# max_len_e = np.max(eod_intep_length)
except:
print('max len problem')
embed()
return max_len, np.argmax(lengths)
def pad_for_mean(spikes_mats, max_len):
for s in range(len(spikes_mats)):
if len(spikes_mats[s]) != max_len:
# spikes_mats[s] = np.ones(max_len)*float('nan')
spikes_mats[s] = np.pad(spikes_mats[s],
((0, max_len - len(spikes_mats[s]))),
mode='constant', constant_values=(np.nan,))
return spikes_mats
def cut_spikes_to_certain_lenght_in_period(time_eod, axs, eod, eod_based, nrs, spikes_folded, xlim, zero_crossings,
array_cut=[]):
spikes_cut = []
eods_cut = []
times_cut = []
times_here = time_eod * 1000
for sp in range(nrs):
if eod_based:
cut = array_cut[sp]
cut_next = array_cut[sp + 1]
else:
zer = zero_crossings * 1000
try:
cut = zer[zer > xlim[1] * sp][0]
except:
print('cut zer thing')
embed()
cut_next = zer[zer > xlim[1] * (sp + 1)][0]
spikes_cut.append(np.array(spikes_folded)[(spikes_folded > cut) & (spikes_folded < cut_next)] - cut)
eods_cut.append(eod[(times_here > cut) & (times_here < cut_next)])
time_cut = times_here[(times_here > cut) & (times_here < cut_next)]
times_cut.append(time_cut - time_cut[0])
test_eod = False
if test_eod: # todo: vielleicht nochmalso optimierung wie beim Beat für die aller ähnlichsten Zyklen
axs.plot(times_cut[-1] - cut, eods_cut[-1], color='grey')
return spikes_cut, eods_cut, times_cut
def find_alpha_val(aa, amps_defined):
alpha_min = 0.5 # 25
alpha = 1 - alpha_min * (len(amps_defined) - 1 - aa)
return alpha
def load_models(file):
""" Load model parameter from csv file.
Parameters
----------
file: string
Name of file with model parameters.
Returns
-------
parameters: list of dict
For each cell a dictionary with model parameters.
"""
parameters = []
with open(file, 'r') as file:
header_line = file.readline()
header_parts = header_line.strip().split(",")
keys = header_parts
for line in file:
line_parts = line.strip().split(",")
parameter = {}
for i in range(len(keys)):
parameter[keys[i]] = float(line_parts[i]) if i > 0 else line_parts[i]
parameters.append(parameter)
return parameters
def define_baseline_for_adaptation(baseline_without_wave, b_with_wave_and_damping, length=7000):
# output
# b_before_every_thing - so this is actually the real beaseline
# b_threshold - but we use this since the model was fitted to the thresholded version therefor its the groundtruth
# b_before_power - this is not rectified since all the modifications will happen in the nonlinearity function
# so here we take the baseline_without_wave as the baseline and therefore test the effects of damping, wave and power
# but potentially all three can be disentangled in case of utilizing baseline_with_wave_damping, baseline_without_wave_damping, baseline_with_wave, baseline_without_wave,
baseline_before = baseline_without_wave * 1
b_before_every_thing = baseline_before[0:length]
b_threshold = baseline_before[0:length]
b_threshold[b_threshold < 0.0] = 0.0
# baseline_with_wave_damping is the last modification until now but before power modification
# b_after = baseline_with_wave_damping.copy()
b_before_power = b_with_wave_and_damping[0:length]
b_threshold_before_power = b_with_wave_and_damping[0:length]
b_threshold_before_power[b_threshold_before_power < 0.0] = 0.0
return b_before_every_thing, b_threshold, b_threshold_before_power, b_before_power
def vmem_spikes(stimulus, save=False):
ax = plt.subplot(2, 1, 1)
ax.plot(time[0:len(stimulus)], stimulus)
ax.set_title('stimulus')
ax.set_xlim([0.1, 0.15])
ax = plt.subplot(2, 1, 2)
plt.plot(time[0:len(stimulus)], v_mem_output1)
ax.set_title('voltage')
ax.set_xlim([0.1, 0.15])
plt.axhline(y=1, color='pink')
plt.scatter(spikes_baseline_before, np.ones(len(spikes_baseline_before)) * 1.2, color='black', s=25)
if save:
plt.savefig(r'C:\Users\alexi\OneDrive - bwedu\Präsentations\2021.03.05\voltagetrace')
plt.show()
def stimulate_body_simple(v_dent_output, stimulus, v_dend, deltat, v_base, v_mem, noise, mem_tau, threshold,
spikes='spikes'):
spike_times = []
v_mem_output = np.zeros(len(stimulus))
adapt_output = np.zeros(len(stimulus))
for i in range(len(stimulus)):
####################################################
# dendridic part
# standard part
v_dent_output[i] = v_dend
####################################################
# exponential part
####################################################
# v -mem part
v_mem += (- v_mem + stimulus[i] + noise[i]) / mem_tau * deltat # +v_base + v_offset
# v_mem = v_base
###########################################################
# threshold crossing:
if spikes == 'spikes':
if v_mem > threshold:
# embed()
v_mem = v_base
spike_times.append(i * deltat)
v_mem_output[i] = v_mem
# embed()
# test = False
# if test:
# embed()
return adapt_output, spike_times, v_dent_output, v_mem_output
def find_cell_recording_save_name(cell_recording):
if cell_recording == '':
cell_recording_save_name = ''
else:
cell_recording_save_name = '_cellrecording_' + cell_recording # + max_type
return cell_recording_save_name
def calc_cv_adapt(save_name, v_exp, upper_tol, lower_tol, trials_pop, extract, stim_type_afe, cut_off2, c_noise, c_sig,
cut_off_type, nfft_for_morph, noise_name, var, T, nfft, stim_type_noise, cut_off1, model_cells,
cell_recording='', fishe_receiver='Alepto', n=1, variant='sinz', fish_morph_harmonics_vars='',
trans=1, noise_added='', ref_type='', dendrid='', damping_type='', exp_tau=0, variance=1,
stimulus_length=1, adapt_offset='', a_fe=0, mimick='no', zeros='zeros', adapt_type='',
phaseshift_fr=0, gain=1, exponential='', a_fr=1, us_name='', var_type_scale='',
var_type='additiv_cv_adapt_scaled'):
model = pd.read_csv(load_folder_name('calc_model_core') + '/models_big_fit_d_right.csv')
cells = model.cell.unique()
frame = pd.DataFrame()
for cell in cells:
##################################################
# initial CV estimation
length = var_type_scale.replace('additiv_cv_adapt_factor_scaled', '')
cvs_here = []
trial_nr = 80
for t in range(trial_nr):
height = 0
# print(cell + ' ' + str(a_fe))
model_params = model_cells[model_cells.cell == cell].iloc[0]
cell = model_params.pop('cell')
eod_fr = model_params['EODf']
deltat = model_params.pop("deltat")
offset = model_params.pop("v_offset")
tt = 0
# height, cv_base, cvs,spikes, spike_adapted, carrier_RAM
height, cv_base, cvs, spikes, spike_adapted, carrier_RAM = adapt_cv_stimulus(
model_params, eod_fr, deltat, offset, variance, tt, gain, zeros,
fish_morph_harmonics_vars, cell_recording, n, variant, exponential, adapt_offset, exp_tau, v_exp,
upper_tol,
lower_tol,
trials_pop, phaseshift_fr, us_name, damping_type, mimick, fishe_receiver, nfft_for_morph, a_fe,
stimulus_length,
noise_name, var, T, nfft, stim_type_noise, cut_off1, cell,
a_fr=a_fr, dendrid=dendrid, trans=trans, noise_added=noise_added, extract=extract,
stim_type_afe=stim_type_afe, cut_off2=cut_off2, ref_type=ref_type, var_type=var_type,
adapt_type=adapt_type,
c_noise=c_noise, c_sig=c_sig, cut_off_type=cut_off_type, height=height, )
# embed()
isi = np.diff(spikes[0][spikes[0] > 1])
mean_isi = np.mean(isi)
cv_isi = np.std(isi) / mean_isi
cvs_here.append(cv_isi)
cv_start = np.mean(cvs_here)
# embed()
if len(length) > 0:
cv_base = float(length)
upper_tol_cv = cv_base + 0.01 # 5#np.std(cvs_here)
lower_tol_cv = cv_base - 0.01 # 5#np.std(cvs_here)
else:
# var_type = 'additiv_cv_adapt_factor_scaled0.2'
upper_tol_cv = cv_base.iloc[0] + 0.01 # 5#np.std(cvs_here)
lower_tol_cv = cv_base.iloc[0] - 0.01 # 5#np.std(cvs_here)
# embed()
# if cv_start < cv_base:
# if 'adaptoffset_bisecting' in adapt_offset:
# embed()
##################################################
# initial CV biscecting
# embed()
# try:
height_new, heights, cvs_adapt, adaptation_state, cv_base = do_adaptation_cv(upper_tol_cv, lower_tol_cv,
trial_nr, cv_start, model_params,
eod_fr, deltat, offset, variance,
tt,
gain, zeros,
fish_morph_harmonics_vars,
cell_recording, n, variant,
exponential,
adapt_offset, exp_tau,
v_exp,
upper_tol, lower_tol,
trials_pop, phaseshift_fr, us_name,
damping_type, mimick,
fishe_receiver, nfft_for_morph,
a_fe,
stimulus_length,
noise_name, var, T, nfft,
stim_type_noise, cut_off1, cell,
a_fr, dendrid, trans,
noise_added, extract,
stim_type_afe, cut_off2, ref_type,
var_type,
adapt_type,
c_noise, c_sig, cut_off_type,
height)
# except:
# print('unpack problem')
# embed()
if heights != []:
# []
noise_strength = model_params.noise_strength # **2/2
input_scaling = model_params.input_scaling
d = noise_strength # (noise_strength ** 2) / 2
var_val = (d * c_sig) * 2 # / deltat
if 'scaled' in var_type:
# height = np.sqrt(var_val * stimulus_length) * factor / input_scaling # * sigma#/deltat#stimulus_length
factor = (height_new * input_scaling) / np.sqrt(var_val * stimulus_length)
else:
factor = (height_new) / np.sqrt(var_val * stimulus_length)
# isi = np.diff(spikes[0])
# cv_spikes= np.std(isi) / np.mean(isi)
# isi_a = np.diff(spike_adapted[0])
# cv_spikes_a = np.std(isi_a) / np.mean(isi_a)
# calc_cv_model()
frame.loc['factor', cell] = factor
frame.loc['height', cell] = height_new
frame.loc['adaptation', cell] = adaptation_state
frame.loc['cv_start', cell] = cvs_adapt[0]
frame.loc['cv_end', cell] = cvs_adapt[-1]
frame.loc['cv_base', cell] = cv_base # .iloc[0]
# embed()
frame.to_csv(save_name + '.csv') # ,index=False
return frame
def do_adaptation_cv(upper_tol_cv, lower_tol_cv, trial_nr, cv_start, model_params, eod_fr, deltat, offset, variance, tt,
gain, zeros,
fish_morph_harmonics_vars, cell_recording, n, variant, exponential, adapt_offset, exp_tau, v_exp,
upper_tol, lower_tol,
trials_pop, phaseshift_fr, us_name, damping_type, mimick, fishe_receiver, nfft_for_morph, a_fe,
stimulus_length,
noise_name, var, T, nfft, stim_type_noise, cut_off1, cell,
a_fr, dendrid, trans, noise_added, extract,
stim_type_afe, cut_off2, ref_type, var_type,
adapt_type,
c_noise, c_sig, cut_off_type, height):
heights = [height]
cvs_here = [cv_start]
if (cv_start > upper_tol_cv):
height_bisecting = np.arange(height, height / 500, -height * 0.1)
height_bisecting = list(height_bisecting)
height_bisecting.extend([0])
# height_bisecting = np.logspace(height, height / 500, num=25)
elif (cv_start < lower_tol_cv):
height_bisecting = np.arange(height, height * 100, height)
else:
height_bisecting = []
frame = pd.read_csv(load_folder_name('calc_base') + '/cv.csv')
frame = frame.sort_values(by='cv')
fr_base = frame[frame.cell == cell].fr # np.array(model_cells['cell'])
cv_base = frame[frame.cell == cell].cv
return height, heights, cvs_here, 'no', cv_base.iloc[0]
# if cell == '2012-06-27-ah-invivo-1':
# embed()
for o, height_test in enumerate(height_bisecting[1::]):
cvs_adapt = []
for t in range(trial_nr):
# tt = 0
# embed()
height, cv_base, cvs, spikes, spike_adapted, carrier_RAM = adapt_cv_stimulus(
model_params, eod_fr, deltat, offset, variance, tt, gain, zeros,
fish_morph_harmonics_vars, cell_recording, n, variant, exponential, adapt_offset, exp_tau, v_exp,
upper_tol, lower_tol,
trials_pop, phaseshift_fr, us_name, damping_type, mimick, fishe_receiver, nfft_for_morph, a_fe,
stimulus_length,
noise_name, var, T, nfft, stim_type_noise, cut_off1, cell,
a_fr=a_fr, dendrid=dendrid, trans=trans, noise_added=noise_added, extract=extract,
stim_type_afe=stim_type_afe, cut_off2=cut_off2, ref_type=ref_type, var_type=var_type,
adapt_type=adapt_type,
c_noise=c_noise, c_sig=c_sig, cut_off_type=cut_off_type, height=height_test)
# embed()
isi = np.diff(spikes[0][spikes[0] > 1])
mean_isi = np.mean(isi)
cv_isi = np.std(isi) / mean_isi
cvs_adapt.append(cv_isi)
cvs_mean_adapted = np.mean(cvs_adapt)
heights.append(height)
cvs_here.append(cvs_mean_adapted)
print(' CV ' + str(cvs_mean_adapted) + ' height ' + str(height))
# embed()
if (cvs_mean_adapted < upper_tol_cv) & (cvs_mean_adapted > lower_tol_cv):
print('finished adaptation in function')
# embed()
return height, heights, cvs_here, 'no', cv_base.iloc[0]
# find the moment when we start bisecting in the previously equally spaced hight array
elif ((cv_start > upper_tol_cv) & (cvs_mean_adapted < upper_tol_cv)) | (
(cv_start < lower_tol_cv) & (cvs_mean_adapted > lower_tol_cv)):
print('STARTED BISECTING')
step = np.abs(np.diff(height_bisecting)[0]) / 2
# this is the current minus the step yields the new best guess
if (cvs_mean_adapted > upper_tol_cv):
height_prev = height_bisecting[o + 1] - step
else:
height_prev = height_bisecting[o + 1] + step
# here kommt die recurrent adapt function
heights, cvs_here, step, height = recurent_adapt_height(cvs_here, upper_tol_cv, lower_tol_cv, heights, step,
height_prev, trial_nr, model_params, eod_fr, deltat,
offset,
variance, tt,
gain, zeros,
fish_morph_harmonics_vars, cell_recording, n,
variant,
exponential,
adapt_offset,
exp_tau,
v_exp,
upper_tol, lower_tol,
trials_pop, phaseshift_fr, us_name, damping_type,
mimick,
fishe_receiver,
nfft_for_morph,
a_fe,
stimulus_length,
noise_name, var, T, nfft, stim_type_noise, cut_off1,
cell, a_fr,
dendrid,
trans, noise_added, extract,
stim_type_afe, cut_off2, ref_type, var_type,
adapt_type,
c_noise, c_sig, cut_off_type)
print('finished adaptetation bisecting')
# embed()
return height, heights, cvs_here, 'yes', cv_base.iloc[0]
print('not converged')
return [], [], [], [], []
def recurent_adapt_height(cvs_here, upper_tol_cv, lower_tol_cv, heights, step, height_prev, trial_nr, model_params,
eod_fr, deltat, offset, variance, tt, gain, zeros,
fish_morph_harmonics_vars, cell_recording, n, variant, exponential, adapt_offset, exp_tau,
v_exp,
upper_tol, lower_tol,
trials_pop, phaseshift_fr, us_name, damping_type, mimick, fishe_receiver, nfft_for_morph,
a_fe,
stimulus_length,
noise_name, var, T, nfft, stim_type_noise, cut_off1, cell, a_fr, dendrid, trans,
noise_added, extract,
stim_type_afe, cut_off2, ref_type, var_type,
adapt_type,
c_noise, c_sig, cut_off_type):
cvs_adapt = []
for t in range(trial_nr):
height, cv_base, cvs, spikes, spike_adapted, carrier_RAM = adapt_cv_stimulus(
model_params, eod_fr, deltat, offset, variance, tt, gain, zeros,
fish_morph_harmonics_vars, cell_recording, n, variant, exponential, adapt_offset, exp_tau,
v_exp,
upper_tol, lower_tol,
trials_pop, phaseshift_fr, us_name, damping_type, mimick, fishe_receiver, nfft_for_morph,
a_fe,
stimulus_length,
noise_name, var, T, nfft, stim_type_noise, cut_off1, cell,
a_fr=a_fr, dendrid=dendrid, trans=trans, noise_added=noise_added, extract=extract,
stim_type_afe=stim_type_afe, cut_off2=cut_off2, ref_type=ref_type, var_type=var_type,
adapt_type=adapt_type,
c_noise=c_noise, c_sig=c_sig, cut_off_type=cut_off_type, height=height_prev)
# embed()
isi = np.diff(spikes[0][spikes[0] > 1])
mean_isi = np.mean(isi)
cv_isi = np.std(isi) / mean_isi
cvs_adapt.append(cv_isi)
cvs_mean_adapted = np.mean(cvs_adapt)
heights.append(height)
cvs_here.append(cvs_mean_adapted)
# embed()
if (cvs_mean_adapted > upper_tol_cv) or (cvs_mean_adapted < lower_tol_cv):
# step = np.abs(offsets_bisect[o] - offsets_bisect[o - 1]) / 2
# this is the current minus the step yields the new best guess
# a_fe_new =
if (cvs_mean_adapted < lower_tol_cv): # & (factors[-2]< lower_tol):
# the new step can be half of the code_old one
step = step / 2
height = height_prev + step
# elif (factor_adapted > upper_tol):
# offset = v_offset - step
elif (cvs_mean_adapted > upper_tol_cv): # & (factors[-2]> upper_tol):
step = step / 2
height = height_prev - step
# elif (factor_adapted < lower_tol):
# offset = v_offset - step
# print( ' height ' + str(height)+' CV ' + str(cvs_mean_adapted)+' upper tol'+ str(upper_tol_cv) +' lower tol '+ str(lower_tol_cv))
# embed()
heights, cvs_here, step, height = recurent_adapt_height(cvs_here, upper_tol_cv, lower_tol_cv, heights, step,
height, trial_nr, model_params, eod_fr, deltat, offset,
variance, tt, gain, zeros,
fish_morph_harmonics_vars, cell_recording, n, variant,
exponential, adapt_offset, exp_tau,
v_exp,
upper_tol, lower_tol,
trials_pop, phaseshift_fr, us_name, damping_type,
mimick, fishe_receiver, nfft_for_morph,
a_fe,
stimulus_length,
noise_name, var, T, nfft, stim_type_noise, cut_off1,
cell, a_fr, dendrid, trans,
noise_added, extract,
stim_type_afe, cut_off2, ref_type, var_type,
adapt_type,
c_noise, c_sig, cut_off_type)
else:
height = height_prev
return heights, cvs_here, step, height
def adapt_cv_stimulus(model_params, eod_fr, deltat, offset, variance, tt, gain,
zeros, fish_morph_harmonics_vars, cell_recording, n, variant, exponential, adapt_offset, exp_tau,
v_exp, upper_tol, lower_tol,
trial_nrs, phaseshift_fr, us_name, damping_type, mimick, fishe_receiver, nfft_for_morph, a_fe,
stimulus_length, noise_name, var, T, nfft, stim_type_noise,
cut_off, cell, reshuffled='reshuffled',
dendrid='', a_fr=1, trans=0,
cut_off2=300, stim_type_afe='',
noise_added='', var_type='additiv', c_noise=0.1, ref_type='', adapt_type='', extract='',
c_sig=0.9,
cut_off_type='', height=0):
# var_type = 'additiv_cv_adapt_factor_scaled0.2'
try:
frame = pd.read_csv(load_folder_name('calc_base') + '/cv.csv')
except:
print('frame thing 22')
embed()
frame = frame.sort_values(by='cv')
fr_base = frame[frame.cell == cell].fr # np.array(model_cells['cell'])
cv_base = frame[frame.cell == cell].cv
f_max = cut_off2
N = int(T / deltat)
RAM = np.random.normal(0, var, size=N)
carrier_RAM = np.random.normal(0, 1, size=N) # *0.2
input_scaling = model_params.input_scaling
if 'StimPreSaved' in stim_type_noise:
RAM, carrier_RAM, eod_interp, sampling, eod_mt, time_wn_cut = load_stim_presaved_model(noise_name,
stimulus_length, deltat,
a_fe)
RAM_afe = RAM
RAM_noise = np.zeros(len(RAM))
else:
# define if RAM only intrinsic, intrinsic + additiv, additiv
stim_type, RAM, height, RAM_afe, RAM_noise = RAM_additiv_not(stim_type_noise, input_scaling, a_fe,
stim_type_afe, model_params,
stimulus_length, deltat, noise_added,
cut_off, variance, cell, c_sig, var_type,
cut_off_type, height=height)
# make the stimulus as a RAM or directly as stimulus
if 'RAM' in stim_type:
try:
carrier_RAM, eod_fish_r, deltat, eod_fr, time_array, stimulus_length_here = stimulus2RAM(nfft, a_fr, zeros,
RAM, trans,
stimulus_length,
deltat, eod_fr,
mimick,
cell_recording,
nfft_for_morph,
fish_morph_harmonics_vars,
sampling_rate=1 / deltat,
fishe_receiver=fishe_receiver)
except:
print('RAM problem ')
embed()
if 'extractedCarrier' in extract:
am_extracted = extract_am(carrier_RAM, np.arange(0, len(carrier_RAM) * deltat, deltat), norm=False)[0]
am_extracted = am_extracted[0:len(RAM)]
RAM = am_extracted
###############################
# spike response
# embed()
time_array = np.arange(0, stimulus_length, deltat)
baseline_with_wave_damping, baseline_without_wave = prepare_baseline_array(time_array, eod_fr, nfft_for_morph,
phaseshift_fr, mimick, zeros,
cell_recording, 1 / deltat,
stimulus_length, fishe_receiver, deltat,
nfft, damping_type, '', us_name, gain,
beat='beat',
fish_morph_harmonics_var='analyzed')
# do the model
spike_adapted = [[]] * trial_nrs
spikes = [[]] * trial_nrs
if a_fr == 0:
variant_here = variant + '_' + zeros
else:
variant_here = variant
# embed()
for t in range(trial_nrs):
if t == 0:
adapt_offset_here = adapt_offset
else:
adapt_offset_here = ''
# embed()
cvs, adapt_output, baseline_after, spike_adapted[
t], rate_adapted, rate_baseline_before, rate_baseline_after, spikes[
t], stimulus_power, v_dent, offset, v_mem_output, noise_final = simulate(cell, offset, carrier_RAM,
cut=False, deltat=deltat,
adaptation_variant=adapt_offset_here,
adaptation_yes_e=0,
adaptation_yes_t=tt,
adaptation_upper_tol=upper_tol,
adaptation_lower_tol=lower_tol,
power_variant=variant_here,
power_nr=n, waveform_damping='',
waveform_damping_baseline=baseline_with_wave_damping,
waveform_mimick=mimick,
reshuffle=reshuffled,
noisesplit_c_noise=c_noise,
noisesplit_noise_added=noise_added,
noisesplit_var_type=var_type,
noisesplit_cut_off=cut_off,
noisesplit_c_sig=c_sig,
LIF_ref_type=ref_type,
LIF_adapt_type=adapt_type,
LIF_dendrid=dendrid,
LIF_exponential=exponential,
LIF_exponential_tau=exp_tau,
LIF_expontential__v=v_exp,
**model_params)
# embed()
return height, cv_base, cvs, spikes, spike_adapted, carrier_RAM
def load_height_d(noise_strength, c_signal, cell, var, stimulus_length, input_scaling):
d = noise_strength # (noise_strength ** 2) / 2
var_val = (d * c_signal) * 2 # / deltat
frame = pd.read_csv(load_folder_name('calc_RAM') + '/calc_RAM_extraction_factor-' + 'RAM_extract_hists.csv',
index_col=0) # "2012-12-21-ai-invivo-1","2012-06-27-an-invivo-1",
factor = np.median(frame['additiv_cutoff_scaled' + '_' + cell])
# todo: es gibt noch das stimulus length problem
if 'scaled' in var:
height = np.sqrt(var_val * stimulus_length) * factor / input_scaling # * sigma#/deltat#stimulus_length
else:
height = np.sqrt(var_val * stimulus_length) * factor # / input_scaling # * sigma#/deltat# * stimulus_length
return height, frame, factor, var_val, d
def save_spikes_burst_add(a_fe, a_fr, adapt_type, burst_corr, c_noise, c_sig, cell, cell_recording_save_name, cut_off1,
cut_off2, dendrid, duration_noise, extract, fft_i, fft_o, formula, mimick, nfft, noise_added,
power, ref_type, stim_type_afe, stim_type_noise, stimulus_length, stimulus_type, trial_nrs,
var_type, variant, burst_corrs=[], trial_nrs_base=20):
redo = False
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
save_name = find_load_function() + '_' + cell + '_' + burst_corr + '.csv'
if (version_comp == 'code') | (((version_comp == 'develop') & (not os.path.exists(save_name))) | redo == True):
model_cells = resave_small_files("models_big_fit_d_right.csv", load_folder='calc_model_core')
model_cells = resave_small_files("models_big_fit_d_right.csv", load_folder='calc_model_core')
# spikes_tr, spikes_tr_bef
# todo: das hier halt abspeichern
model_params = model_cells[model_cells.cell == cell].iloc[0]
cell = model_params.pop('cell')
eod_fr = model_params['EODf']
deltat = model_params.pop("deltat")
offset = model_params.pop("v_offset")
f_max = cut_off2
T = stimulus_length # T seems to be the timewindow, need still to figure out the units
N = int(T / deltat)
var = 1
RAM = np.random.normal(0, var, size=N)
carrier_RAM = np.random.normal(0, 1, size=N) # *0.2
input_scaling = model_params.input_scaling
nfft_for_morph = 4096 * 4
gain = 1
zeros = 'ones'
us_name = ''
adapt_offset = 'adaptoffset_bisecting' # 'adaptoffsetallall2' # ''#'adaptoffsetallall2' #'', 'adaptoffsetallall2''adaptoffsetstableall''adaptoffsetallall2'3: noise is not random for the adaptation,'']#, 'adapt_offset_allall2''']
lower_tol = 0.995
upper_tol = 1.005
cell_recording = '' # '2019-09-23-aj-invivo-1' # ['2019-10-21-am-invivo-1']#['2019-09-23-aj-invivo-1']
fishe_receiver = 'Alepto' # ['Sternopygus']#['Alepto']'Sternarchella',
exponential = ''
if exponential == '':
v_exp = 1
exp_tau = 0.001
phaseshift_fr = 0
fish_morph_harmonics_vars = 'harmonic'
trans = 1 # 50#500#0
cut_off_type = ''
variance = 1
reshuffled = 'reshuffled'
Hz = 'Hz'
mV = 'mV'
save_name_here_cv = save_ram_model(stimulus_length, cut_off1, nfft, a_fe, stim_type_noise, mimick, variant,
trial_nrs_base, power,
cell_recording_save_name, burst_corr=burst_corr, Hz=Hz, mV=mV,
fft_i=fft_i, fft_o=fft_o, stim_type_afe=stim_type_afe,
extract=extract, noise_added=noise_added, adapt_type=adapt_type,
ref_type=ref_type, c_noise=c_noise, c_sig=c_sig,
cut_off2=cut_off2, a_fr=a_fr, var_type=var_type, zeros=zeros,
trials_nr=trial_nrs, dendrid=dendrid, trans=trans)
# load_path = save_name_here
# save_name_here_cv = save_name_here * 1
damping_type = ''
noise_name = ''
n = 1
trial_pop = 1
if 'additiv_cv_adapt_factor_scaled' in var_type:
simple_cv_adapt = True
if simple_cv_adapt:
save_name_here_cv = save_ram_model_cv(a_fe, stim_type_noise, stim_type_afe=stim_type_afe,
noise_added=noise_added, c_noise=c_noise, c_sig=c_sig,
var_type=var_type)
# replace('fft_o_forward_fft_i_forward_Hz_mV','')
# replace('cutoff1_300_cutoff2_300no_sinz_length1__a_fr_1__trans1s__')
# spikes_after_burst_corr(spikes_mt, isi, burst_corr, data_name, eod_fr):
# ok wir wollen einentlich so ein einfaches Ding was hier für alle gilt
# embed()
# if not os.path.exists(save_name_here_cv + '.csv'):
else:
save_name_here_cv = name_for_cv_presaved(save_name_here_cv, burst_corr, trial_nrs_base)
if not os.path.exists(save_name_here_cv + '.csv'):
# resave_small_files(save_name_here_cv + '.csv')
print('cv table somehow not there')
print('load ' + save_name_here_cv + ' from other PC')
embed()
frame_cv_test = pd.read_csv(save_name_here_cv + '.csv')
if cell in frame_cv_test.keys():
cont_cell = True
else:
cont_cell = False
else:
cont_cell = True
# embed()
if cont_cell: # cont_cell
T = stimulus_length
cut_off = 300
spikes_tr = [[]] * trial_nrs_base
spikes_tr_bef = [[]] * trial_nrs_base
corrs_all = []
for b in range(len(burst_corrs)):
corrs_all.append([[]] * trial_nrs_base)
for tr in range(trial_nrs_base):
RAM_afe, RAM_noise, stim_type, RAM, carrier_RAM, deltat, eod_fr, time_array, eod_fish_r, am_extracted, stimulus_length_here = get_stim(
carrier_RAM, a_fr, zeros, eod_fr, mimick, fishe_receiver, cell_recording, trans, nfft,
extract, noise_added, cut_off, stim_type_afe, model_params, variance, cell,
save_name_here_cv, c_sig, var_type, cut_off_type, input_scaling, noise_name,
stimulus_length, deltat, a_fe, stim_type_noise, nfft_for_morph,
fish_morph_harmonics_vars)
###############################
# spike response
# if 'added' in burst_corr:
# embed()
# burst_corr_plt = burst_corr + '2'
noise_final_c, offset, v_mem_output, spikes_bef, rate_adapted, rate_baseline_after, spikes, isi, v_dent = get_spikes(
adapt_type, offset,
adapt_offset, a_fr,
nfft, lower_tol, 0,
carrier_RAM, n, cell,
trial_pop, variant,
stimulus_length, deltat,
exponential, upper_tol,
v_exp, exp_tau, dendrid,
noise_added, cut_off,
c_sig, var_type,
ref_type, c_noise,
model_params,
fishe_receiver,
phaseshift_fr,
nfft_for_morph, eod_fr,
damping_type, mimick,
burst_corr,
cell_recording, us_name,
gain, reshuffled, zeros)
spikes_tr[tr] = spikes[0]
spikes_tr_bef[tr] = spikes_bef[0]
for b, burst_corr in enumerate(burst_corrs):
try:
corrs_all[b][tr] = np.array(
spikes_after_burst_corr(spikes_bef[0], isi, burst_corr, cell, eod_fr,
model_params=model_params))
except:
print('assign thing')
embed()
# spikes[t]
# embed()
if (version_comp == 'develop'):
carrier = pd.DataFrame(carrier_RAM)
fr = pd.DataFrame(spikes_tr)
fr_bef = pd.DataFrame(spikes_tr_bef)
# embed()
# corr = pd.DataFrame(corrs_all)
# np.save(corrs_all, save_name.replace('.csv','_all.npy'))
fr.to_csv(save_name)
carrier.to_csv(save_name.replace('.csv', '_carrier.csv'))
fr_bef.to_csv(save_name.replace('.csv', '_bef.csv'))
# embed()
else:
spikes_tr_df = pd.read_csv(save_name, index_col=0)
carrier = pd.read_csv(save_name.replace('.csv', '_carrier.csv'), index_col=0)
# embed()
spikes_tr = []
for sp in range(len(spikes_tr_df)):
spikes_tr.append(np.array(np.array(spikes_tr_df.iloc[sp].dropna())))
spikes_tr_bef_df = pd.read_csv(save_name.replace('.csv', '_bef.csv'), index_col=0)
spikes_tr_bef = []
for sp in range(len(spikes_tr_bef_df)):
spikes_tr_bef.append(np.array(np.array(spikes_tr_bef_df.iloc[sp].dropna())))
model_cells = resave_small_files("models_big_fit_d_right.csv", load_folder='calc_model_core')
# spikes_tr, spikes_tr_bef
# todo: das hier halt abspeichern
model_params = model_cells[model_cells.cell == cell].iloc[0]
cell = model_params.pop('cell')
eod_fr = model_params['EODf']
corrs_all = []
for b, burst_corr in enumerate(burst_corrs):
isi = calc_isi(spikes_tr_bef[0], eod_fr)
try:
corrs_all.append([np.array(
spikes_after_burst_corr(spikes_tr_bef[0], isi, burst_corr, cell, eod_fr,
model_params=model_params))])
except:
print('assign thing')
embed()
# embed()
return carrier, corrs_all, spikes_tr, spikes_tr_bef
def calc_psd_from_spikes(nfft, sampling_calc, spikes_all_here):
p_array = [[]] * len(spikes_all_here)
spikes_mat = [[]] * len(spikes_all_here)
for s, sp in enumerate(spikes_all_here):
spikes_mat[s] = cr_spikes_mat(np.array(sp) / 1000, sampling_rate=sampling_calc,
length=int(sampling_calc * np.array(sp[-1]) / 1000))
p_array[s], f_array = ml.psd(spikes_mat[s] - np.mean(spikes_mat[s]), Fs=sampling_calc, NFFT=nfft,
noverlap=nfft // 2)
# axp.plot(f_array,p_array[s], color = colors[str(cell_type)], alpha = float(alpha-0.05*s))
return spikes_mat, f_array, p_array
def make_log_ticks(axes):
for ax in axes:
# ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
s = 2
if s == 0:
# funktioniert nicht
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
elif s == 1:
# funktioniert nicht
for a in [ax.xaxis, ax.yaxis]:
formatter = ScalarFormatter()
formatter.set_scientific(False)
a.set_major_formatter(formatter) # ok das dunktioniert schond as ist dass das x nicht da ist
elif s == 2:
# das funktioniert
# SOLUTION
ax.yaxis.set_major_formatter(StrMethodFormatter('{x:.0f}'))
ax.yaxis.set_minor_formatter(NullFormatter())
elif s == 3:
# das sieht hässlich aus das macht nur die minor ticks
ax.yaxis.set_minor_formatter(ScalarFormatter())
ax.ticklabel_format(style='plain', axis='x')
# das funktioniert
print('should have formated')
# todo: irgendwas davon funktioniert aber was und wie
# embed()
# todo: jetzt muss das hoch noch weg
# das brauchen wir noch damit es nicht 10.0 is sondern 10
# todo: das funktioniert manchmal aber noch nicht immer
def save_ram_model_cv(a_fe, stim_type_noise, stim_type_afe='', c_noise=0.1, c_sig=0.9, noise_added='', var_type='',
save_dir=None):
if 'additiv' in var_type:
var_type = '_' + var_type + '_cNoise_' + str(c_noise) + '_cSig_' + str(c_sig)
a_fe_name = ''
noise_added_name = noise_added
elif var_type != '':
var_type = '_' + var_type + '_'
noise_added_name = ''
else:
a_fe_name = '_afe_' + str(a_fe)
var_type = ''
noise_added_name = ''
# cut_off_name = '_cutoff1_' + str(cut_off1) + '_cutoff2_' + str(cut_off2)
# folder_name('calc_model')+'/noise4_' + duration_noise + '_nfft_' + str(nfft) + '_power_' + str(n) + a_fe_name + str(
# formula) + '_' + stim_type_name + var_type + cut_off_name + cell_recording_save_name + mimick + '_' + variant + '_' + stimulus_type + 'length' + str(
# stimulus_length) + '_TrialsStim_' + str(
# trials_stim) + a_fr_name + dendrid_name + trans_name + trials_nr_name + ref_type + adapt_type #str(formula)
# embed()# stimulus_type
if 'additiv' in var_type:
if 'RAM' in stim_type_noise:
stim_type_name1 = '_' + stim_type_noise # + str(1)
else:
stim_type_name1 = '_' + stim_type_noise
else:
stim_type_name1 = ''
# if a_fe !=
if not save_dir:
save_dir = load_folder_name('calc_model') # '../calc_model/'
return save_dir + '/' + var_type + '_extranoise_' + stim_type_afe + '_internalnoise_' + stim_type_noise + '_cvtable'
# calc_RAM_model
def find_mean_period(eod2, sampling_frequency, norm=True, threshold=0.02, sigma=1, ):
# zero_crossings_eod = np.where(np.diff(np.sign(eod)))[
# 0] # here are the Zerocrossings of the EOD (because you can't find the Zero)
# diff_zero_crossings = np.diff(
# zero_crossings_eod) # this yields the periods, but actually I dont know yet if thats for use
if norm:
eod_norm = zenter_and_normalize(eod2, 1)
else:
eod_norm = eod2
# nicht vergessen zu filtern sonst kann es probleme geben!
eod_indices, eod_times, filtered = thresh_crossings(eod_norm, np.arange(0, len(eod_norm) / sampling_frequency,
1 / sampling_frequency), threshold,
sigma=sigma, ) # -0.02
zero_crossings_time_adapted = eod_times # [0::2] # ich glaube das nimmt immer jedes zweite!
# zero_crossings_beginning_of_period = [zero_crossings_eod[0],zero_crossings_eod[2]]
# zero_crossings_time_adapted = zero_crossings_beginning_of_period / sampling_frequency
eod_period = np.mean(np.diff(zero_crossings_time_adapted)) # und das ist die mittlere periode
test = False
if test:
fig, ax = plt.subplots(3, 1)
ax[0].plot(eod)
ax[0].scatter(zero_crossings_eod, np.zeros(len(zero_crossings_eod)))
ax[0].scatter(np.diff(zero_crossings_beginning_of_period),
np.zeros(len(np.diff(zero_crossings_beginning_of_period))))
return eod_period, zero_crossings_time_adapted, eod_times
def file_names_to_exclude(cell_type='p-unit'):
# 'gwn50Hz10s0.3', 'InputArr_400hz_30', 'gwn300Hz10s0.3',
# 'gwn300Hz50s0.3', 'InputArr_250to300hz_30',
# 'InputArr_350to400hz_30', 'InputArr_50hz_30',
# 'InputArr_150to200hz_30', 'InputArr_50to100hz_30',
# 'gwn250Hz50s0.3', 'gwn150Hz50s0.3', 'InputArr_50to100hz_30s',
# 'InputArr_150to200hz_30s', 'InputArr_250to300hz_30s',
# 'InputArr_350to400hz_30s', 'InputArr_50hz_30s',
# 'InputArr_400hz_30s', 'gwn50Hz50s0.3', 'gwn50Hz10s0.3short',
# 'gwn300Hz10s0.3short', 'gwn150Hz10s0.3', 'gwn150Hz10s0.3short',
# 'gwn25Hz10s0.3short', 'gwn100Hz10s0.3', 'gwn25Hz10s0.3',
# 'gwn200Hz10s0.3']
if 'p-unit' in cell_type: # == ['p-unit', ' P-unit']:
file_names_exclude = ['InputArr_350to400hz_30',
'InputArr_250to300hz_30',
'InputArr_150to200hz_30',
'InputArr_50to100hz_30',
'InputArr_50hz_30',
'InputArr_350to400hz_30s',
'InputArr_250to300hz_30s',
'InputArr_150to200hz_30s',
'InputArr_50to100hz_30s',
'InputArr_50hz_30s',
'gwn100Hz10s0.3',
'gwn100Hz10s0.3short',
'gwn50Hz50s0.3',
'gwn50Hz10s0.3',
'gwn50Hz10.3',
'gwn50Hz10s0.3short',
'gwn25Hz10s0.3',
'gwn25Hz10s0.3short',
'FileStimulus-file-gaussian50.0',
'FileStimulus-file-gaussian25.0',
] #
else:
file_names_exclude = ['InputArr_350to400hz_30',
'InputArr_250to300hz_30',
'InputArr_150to200hz_30',
'InputArr_50to100hz_30',
'InputArr_50hz_30',
'InputArr_350to400hz_30s',
'InputArr_250to300hz_30s',
'InputArr_150to200hz_30s',
'InputArr_50to100hz_30s',
'InputArr_50hz_30s',
'gwn50Hz50s0.3',
'gwn50Hz10s0.3',
'gwn50Hz10.3',
'gwn50Hz10s0.3short',
'gwn25Hz10s0.3',
'gwn25Hz10s0.3short',
'FileStimulus-file-gaussian50.0',
'FileStimulus-file-gaussian25.0',
] #
return file_names_exclude
def p_units_to_show(type_here='intro'):
# type_here = #'bursts''ampullary','low_cv','high_cv_punit','strong_low_cv_punit','eigenmania_high_cv', 'eigemania_low_cv'
several = 'several'
# if several == 'single':
# das sind die hohen Punkte
# 2018-09-06-au
# 2010-07-28-ae
low_cv_punit = ["2010-06-21-ai-invivo-1",
"2010-06-21-ak-invivo-1",
"2010-06-21-av-invivo-1",
"2010-08-25-ah-invivo-1",
"2010-08-31-ad-invivo-1",
"2018-05-08-ac-invivo-1",
"2018-09-06-au-invivo-1",
"2021-08-03-ab-invivo-1",
'2022-02-07-ah-invivo-1'] # '2018-04-24-ai-invivo-1',
strong_low_cv_punit = ["2021-08-03-ab-invivo-1",
"2020-10-27-aa-invivo-1",
"2020-10-27-ae-invivo-1",
"2020-10-27-ag-invivo-1",
"2020-10-27-ah-invivo-1",
"2010-06-15-af-invivo-1",
"2010-07-13-ab-invivo-1",
"2012-07-03-ak-invivo-1",
"2022-02-07-ah-invivo-1",
"2010-08-25-ah-invivo-1"
] #
high_cv_punit = [
"2011-02-18-ad-invivo-1",
"2012-03-08-ah-invivo-1",
"2018-01-17-al-invivo-1",
"2018-01-19-aj-invivo-1",
"2018-05-08-ae-invivo-1",
"2018-08-24-af-invivo-1",
"2018-08-24-ac-invivo-1",
"2018-08-24-aj-invivo-1",
"2018-08-24-al-invivo-1",
"2018-08-24-an-invivo-1",
"2018-08-29-ab-invivo-1",
"2018-08-29-af-invivo-1",
"2018-08-30-ab-invivo-1",
"2018-09-05-al-invivo-1",
"2018-09-06-aj-invivo-1",
"2018-09-06-af-invivo-1",
"2018-09-06-as-invivo-1",
"2020-10-29-ag-invivo-1"] #
ampullary_eigen = [
"2012-01-20-am",
"2012-01-11-ah",
]
ampullary = [
"2011-09-21-ab-invivo-1",
"2012-03-30-af",
"2012-04-26-ae",
"2012-04-30-af",
"2012-05-15-ac-invivo-1",
"2012-05-30-aa-invivo-1",
] # '2017-10-25-am-invivo-1',
# '2019-06-28-ag-invivo-1''2013-02-21-af-invivo-1''2012-12-18-ac-invivo-1',
burst_cells = [
"2010-07-08-ac-invivo-1",
"2010-08-27-ag-invivo-1",
"2010-08-11-ab-invivo-1",
"2010-08-11-ar-invivo-1",
"2010-08-27-ag-invivo-1",
"2011-06-09-aa-invivo-1",
"2011-10-25-aa-invivo-1",
"2012-03-08-aj-invivo-1",
"2012-03-30-aa-invivo-1",
"2017-08-11-ad-invivo-1",
"2018-05-08-ac-invivo-1",
"2018-09-06-ae-invivo-1",
"2020-10-20-ab-invivo-1",
"2020-10-20-ae-invivo-1",
"2020-10-21-aa-invivo-1",
"2020-10-21-ac-invivo-1",
"2020-10-27-af-invivo-1",
"2022-01-05-ac-invivo-1",
"2022-01-05-af-invivo-1",
"2022-01-06-aa-invivo-1",
"2022-01-06-ab-invivo-1",
"2022-01-06-ai-invivo-1",
"2022-01-06-ah-invivo-1",
"2022-01-28-ac-invivo-1",
"2022-01-28-ad-invivo-1",
"2022-01-28-af-invivo-1",
"2022-01-28-ag-invivo-1",
"2012-03-23-ae-invivo-1"] # "2018-08-04-ab-invivo-1","2022-08-03-ae-invivo-1",
eigemania = ["2011-10-20-ab-invivo-1",
"2011-10-10-an-invivo-1",
"2012-02-21-ai-invivo-1",
"2012-02-27-ao-invivo-1",
"2012-02-28-aq-invivo-1",
"2012-02-28-ai-invivo-1",
"2012-10-25-an-invivo-1",
] # "2012-02-27-an-invivo-1","2012-02-23-ac-invivo-1"
eigenmania_high_cv = ["2011-10-17-ag",
"2011-10-17-ac",
"2012-02-16-ab",
"2012-01-27-ah"] #
light_bursters = ['2018-08-14-ac-invivo-1',
'2018-08-14-ad-invivo-1',
'2018-07-26-aa-invivo-1',
'2010-07-13-af-invivo-1',
'2010-11-08-ae-invivo-1',
'2010-08-25-ag',
'2018-07-26-ag-invivo-1',
'2019-09-23-am-invivo-1',
'2010-09-23-aa',
'2019-10-28-ad-invivo-1',
'2022-01-05-ab-invivo-1',
] #
# high_burst_cells = [,
# ]
reclass = ['2012-03-20-ae']
model_cells, remaining = overlap_cells()
#########################################################
# '2012-05-15-ac-invivo-1'
# ['2012-05-07-ac-invivo-1', ,,'2010-06-21-am-invivo-1','2010-06-21-ac-invivo-1' ]
# geil '2012-04-26-ae',,,'2011-09-21-ab-invivo-1'
# auswahl aus diesen zellen
amp = '2012-04-26-ae' # '2012-03-30-af' # ok die Zelle die FR passt einfach mehrmals in das EODf: '2012-03-30-af', hat aber mehrere Linien, deswegen in die Kontrast Abbildung aber nicht in die Intro Abbildung # '2011-10-17-ad'#'2012-05-30-aa-invivo-1', '2012-04-26-ea'
l_cv = '2010-06-21-ai' # ok die ist auch gut!'2010-06-21-ak'# zu wenige spikes:'2010-07-13-ad'#zu kleiner Peak'2010-08-31-ad'#,#die ist gut'2010-06-21-ai'# ok die hat echt eine kleine feurrate'2010-06-21-av'#'2021-06-18-ae-invivo-1' #da sind die Linien ein bisschen verschoben: "2018-09-06-au-invivo-1" # "2010-06-21-ai-invivo-1"
# alternativen:
# 2010-06-21-ai-invivo-1
# 2010-06-21-ak
# 2010-08-31-ad
# 2010-06-21-av
h_cv = "2018-08-24-af-invivo-1"
#########################################################
l_cv_ii = "2018-09-06-au-invivo-1" # '2010-07-29-ae-invivo-1'#"2018-09-06-au-invivo-1" # "2010-06-21-ai-invivo-1"
h_cv_ii = "2018-08-24-af-invivo-1"
amp_ii = '2012-12-18-ac' # ok die Zelle die FR passt einfach mehrmals in das EODf: '2012-03-30-af' # '2011-10-17-ad'#'2012-05-30-aa-invivo-1', '2012-04-26-ea'
#########################################################
b1 = '2010-08-11-ab' # '2012-03-08-aj'#'2022-01-06-ab-invivo-1'#
# ok ich lass diese krasse Zelle erstmal raus das wird alles zu viel
b2 = '2022-01-06-aa-invivo-1' # die ist ok mit mehren Linien '2022-01-05-ac-invivo-1' und ist nich schlecht aber das ac hat halt wenige spikes '2022-01-06-ah-invivo-1' bei dem hier geht der peak nicht so hoch#'2022-01-06-ai-invivo-1'#'2022-01-06-ah-invivo-1'
# '2022-01-06-aa-invivo-1' das ist ein ausreißer
b3 = '2022-01-28-ad-invivo-1'
bl = '2019-09-23-am-invivo-1'
burst_dublets = ['2022-01-28-ag-invivo-1', '2019-10-28-aj-invivo-1', '2022-01-06-ab-invivo-1']
#########################################################
eigen_1 = '2012-02-28-aq' # '2012-02-27-al'#'2012-02-21-ai'#'2012-02-28-aq'#'2012-02-21-ai'#'2012-02-27-al'#'2012-02-21-ai'#'2012-02-28-aq'#die drei sind interessant"2012-02-28-aq"#'2012-02-21-ai'#'2012-02-27-al',
# ok die eigen_1 Zellen sind alle ein bisschen wierd, vielleicht machen wir echt nur zwei Zellen also eine mit dem Peak an der halben und einen Peak nicht an der halben
eigen_2 = '2012-02-27-ao' # '2011-10-17-ag'#'2012-02-27-ao'## '2011-10-07-aj','2012-02-27-aj'#
eigen_3 = '2011-10-20-ab' # die ist auch an der halben # das war hoch cv jetzt wollen wir was wo es eben an der halben ist!"2012-02-16-ab"
# das ist hoch cv = "2012-02-16-ab"
amp_e = '2012-01-20-am' # '2012-01-11-ah'#"2011-10-17-ad" # "2012-01-11-ah" # '2011-10-17-ad'#"2011-10-17-ad-invivo-1"
# '2011-10-17-aj'
# Ampullary Zellen, P-unit_low_cv, P-unit_high_cv, P-unit_burst, Eigemania_low_cv
#
if type_here == 'intro':
cells_plot2 = [l_cv_ii,
h_cv_ii,
bl,
amp_ii, ]
#
# burst_cells[0], ,good_punits[0] '2010-08-27-ag-invivo-1''2010-06-21-av-invivo-1',
elif type_here == 'intro_simple': # '2022-01-28-ad-invivo-1'
cells_plot2 = [l_cv_ii]
#
# burst_cells[0], ,good_punits[0] '2010-08-27-ag-invivo-1''2010-06-21-av-invivo-1',
elif type_here == 'intro_simple2': # '2022-01-28-ad-invivo-1'
cells_plot2 = [l_cv_ii, b3]
#
# burst_cells[0], ,good_punits[0] '2010-08-27-ag-invivo-1''2010-06-21-av-invivo-1',
elif type_here == 'contrasts':
cells_plot2 = [l_cv,
h_cv]
elif type_here == 'amp': #
cells_plot2 = [amp]
elif type_here == 'antrag':
cells_plot2 = [amp,
h_cv,
b3
]
elif type_here == 'ampullary_small':
cells_plot2 = [amp] # ampullary[0]
elif type_here == 'eigen_small':
cells_plot2 = [
eigen_2,
eigen_3
] # amp_e, eigen_1,eigemania[0]"2011-10-20-ab",
elif type_here == 'bursts': #
cells_plot2 = [b3, b1] # b2,
elif type_here == 'bursts_first': #
cells_plot2 = [b3] # b2,
elif type_here == 'burst_didactic':
cells_plot2 = ['2010-06-21-av',
] # '2018-07-26-am-invivo-1'#'2010-08-11-aa'
elif type_here == 'burst_didactic_all':
cells_plot2 = ['2018-07-26-al-invivo-1',
'2018-07-26-am-invivo-1',
'2018-08-29-af-invivo-1',
'2018-08-24-ag-invivo-1',
'2018-08-24-aj-invivo-1',
'2018-09-05-al-invivo-1',
'2010-08-11-aa',
'2010-06-21-av'] #
elif type_here == 'bursts_all':
cells_plot2 = burst_cells # [0:3]#[0:3]
elif type_here == 'light_bursts_all':
cells_plot2 = light_bursters # [0:3]#[0:3]
elif type_here == 'ampullary':
cells_plot2 = ampullary
elif type_here == 'model':
cells_plot2 = model_cells
elif type_here == 'low_cv_punit':
cells_plot2 = low_cv_punit
elif type_here == 'eigemania_low_cv': #
cells_plot2 = eigemania
elif type_here == 'eigenmania_high_cv':
cells_plot2 = eigenmania_high_cv
elif type_here == 'strong_low_cv_punit':
cells_plot2 = strong_low_cv_punit
elif type_here == 'high_cv_punit': # 'high_cv_punit','strong_low_cv_punit'
cells_plot2 = high_cv_punit
elif type_here == 'reclassified':
cells_plot2 = reclass
# burst_cells,ampullary,eigemania, eigenmania_high_cv, low_cv_punit, strong_low_cv_punit, high_cv_punit
elif type_here == 'mascha':
# Ampullary Zellen,
# P-unit_low_cv,
# P-unit_high_cv,
# P-unit_burst,
# Eigemania_low_cv#'bursts','contrasts'
cells_plot2 = ['2012-03-30-af',
"2010-06-21-ai-invivo-1",
"2018-08-24-af-invivo-1",
'2022-01-06-aa-invivo-1',
"2010-10-20-an-invivo-1"]
# todo: hier noch eine cells_all corr einführen
cells_plot2 = update_cell_names(cells_plot2)
# ['2020-10-27-af-invivo-1','2018-09-06-af-invivo-1']
return cells_plot2
def update_cell_names(cells_plot2):
# dir, cells = find_all_dir_cells()
cells_loaded = resave_small_files('all_cells.npy', load_folder='calc_RAM', resave='npy')
# cells_loaded = np.load(load_folder_name('calc_RAM')+'/all_cells.npy')
# cells
for c, cell in enumerate(cells_plot2):
if cell not in cells_loaded:
try:
cell = cell.replace('-invivo-1', '') # not in cells_loaded:
except:
print('cell name thing')
embed()
cells_plot2[c] = cell
if cell not in cells_loaded:
cell = cell + '-invivo-1' # not in cells_loaded:
cells_plot2[c] = cell
if cell not in cells_loaded:
print('cell still not there')
embed()
return cells_plot2
def find_norm_susept(f_idx_sum, isf):
# power_isf_1 = (np.abs(isf) ** 2)
# power_isf_1 = [power_isf_1] * len(f_idx_sum)
# power_isf_2 = np.transpose(power_isf_1)
power_isf_1, power_isf_2 = find_isf_matrices(f_idx_sum, isf)
scale = 1 / (2 * np.array(np.abs(power_isf_1) ** 2 * np.abs(power_isf_2) ** 2))
return scale
def find_isf_matrices(f_idx_sum, isf):
# x varying values
rate_matrix1 = np.conjugate(isf)
rate_matrix1 = [rate_matrix1] * len(f_idx_sum)
# y varying values
rate_matrix2 = np.transpose(rate_matrix1)
return rate_matrix1, rate_matrix2
def restrict_freqs(f_orig, max_f):
rest_nr = 0
restrict = (f_orig > rest_nr)
f = f_orig[(f_orig > rest_nr) & (f_orig < max_f)]
return f, restrict
def synthetise_eod(mt_nr, extract, sampling_rate, sampling, mt_idx, idx, arrays_calc, mt_group):
keys = [k for k in arrays_calc[idx]] #
eods_loc_synch = {}
eods_am_synch = {}
for i in range(len(arrays_calc[idx])):
name_orig = keys[i] #
# wir nehmen das jeweilige global EOD und normalisieren das
eod_fish_r = zenter_and_normalize(arrays_calc[idx][name_orig], 1)
# embed()
f1 = mt_group[1].f1.iloc[mt_idx]
f2 = mt_group[1].f2.iloc[mt_idx]
# und die restlichen synthetisieren wir!
time = np.arange(0, len(eod_fish_r) / sampling, 1 / sampling)
if name_orig == 'control_01':
eods_loc_synch[name_orig] = np.array(-eod_fish_r + 0.2 * np.sin(
2 * np.pi * time * f1))
elif name_orig == 'control_02':
eods_loc_synch[name_orig] = np.array(-eod_fish_r + 0.2 * np.sin(
2 * np.pi * time * f2))
elif name_orig == '012':
eods_loc_synch[name_orig] = np.array(-eod_fish_r + 0.2 * np.sin(
2 * np.pi * time * f1) + 0.2 * np.sin(
2 * np.pi * time * f2 + np.pi / 2))
else:
eods_loc_synch[name_orig] = np.array(-eod_fish_r)
time_eod_here = np.arange(0,
len(eods_loc_synch[name_orig]) / sampling,
1 / sampling)
eods_am_synch[name_orig] = extract_am(eods_loc_synch[name_orig], time_eod_here,
sampling=sampling_rate, eodf=mt_group[1].eodf[mt_nr],
emb=False, extract=extract)[0]
return eods_loc_synch, eods_am_synch
def load_overview_susept(save_name, redo=False, redo_class=True):
# todo: aus irgendeinem grund kommt was anderes raus je nach dem ob das .csv oder .pkl ist
path_csv = load_folder_name('calc_base') + '/calc_base_data-base_frame.pkl' # ../calc_base/
# name =
load_function = find_load_function() + '.csv'
# todo das noch mit public machen und updaten wenn das andere neu geladen wurde
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
if version_comp != 'public':
# embed()
save_name_here = load_folder_name('calc_RAM') + '/' + save_name + '.csv'
cont = check_creation_time(load_function, save_name_here)
# redo_class = False#True # False#True
# redo = False # False#True
if (not os.path.exists(load_function)) | cont | (redo == True):
# embed()
# if : # path_csv if (redo == True) | (not os.path.exists(name1)):
print('redoing')
# frame_base = load_cv_table(path_sascha=path_csv, redo = redo)
frame_load = pd.read_csv(save_name_here, index_col=0)
# embed()
time_base = os.path.getctime(path_csv)
time_ram = os.path.getctime(save_name_here)
do = True
if do:
frame_load = update_overview_class(frame_load, save_name_here, redo_class)
# embed()
test = False
if test:
frame_load[frame_load.cell == '2012-04-20-ae-invivo-1']['cell_type_reclassified']
# frame_base[frame_base.cell == '2012-04-20-ae-invivo-1']['cell_type_reclassified']
# embed()
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
if version_comp == 'develop':
frame_load.to_csv(load_function) # , index_col=0
# else:
# if os.path.exists(load_function):
# frame_load = pd.read_csv(load_function, index_col=0)
# else:
# frame_load = []
# else: # wenns man nicht auf Saschas Computer ist
else:
if os.path.exists(load_function):
frame_load = pd.read_csv(load_function, index_col=0)
else:
frame_load = []
else:
if os.path.exists(load_function):
frame_load = pd.read_csv(load_function, index_col=0)
else:
frame_load = []
# embed()
return frame_load
def check_creation_time(load_function, save_name_here):
if os.path.exists(load_function):
if (os.path.getmtime(load_function) > os.path.getmtime(save_name_here)):
cont = True
else:
cont = False
else:
cont = False
return cont
def update_overview_class(frame_load, save_name_here, redo_class=False):
if (redo_class == True) | ('cell_type_reclassified' not in frame_load): # )::
frame_load = pd.read_csv(save_name_here, index_col=0)
frame_load = update_RAM_with_CVs_baseline(frame_load, save_name_here, redo=True)
# calc_base_reclassification([save_name_here], cell_type='celltype')
frame_load = pd.read_csv(save_name_here, index_col=0)
return frame_load
def update_RAM_with_CVs_baseline(frame_load, save_name_here, printing=True, redo=False, contras_burst_update=True,
save=True):
# wir machen jetzt das als overview weil hier brauchen wir ja keine spikes
path_csv = load_folder_name('calc_base') + '/calc_base_data-base_frame_overview.csv' # ../calc_base/
path_csv = load_folder_name('calc_base') + '/calc_base_data-base_frame_overview.pkl' # ../calc_base/
frame_base = load_cv_table(path_sascha=path_csv, redo=redo)
# frame_overview = resave_small_files('calc_coherence_overview-_calc_coherence_overview.csv','calc_RAM')
# if 'burst_corr' in save_name_here:
#
# also cv cv_wo_burstcorr beziehen sich auf die Baseline charachteristics# frame_load.filter('cv')
# das was wir hier wollen ist der cv_stim für den Fall dass wir die anderen nicht da haben
# frame_load[frame_load['cell'] == cell].filter(like='cv')
# deswegen setzten wir das erstmal auf den ursprünglichen Index
cont_total = False
if printing:
print('updating')
##################################
# similiar frame updates
if contras_burst_update:
frame_load = contra_burst_update(cont_total, frame_load, save_name_here)
# print('updating third')
# if cont_total:
# und die basis für die reclassifed machen wir den celltype
frame_load['cell_type_reclassified'] = frame_load['celltype']
# embed()
#################################################################
# overview frame update
test = False
if test:
path_csv = load_folder_name(
'calc_RAM') + '/calc_coherence_overview-_calc_coherence_overview.csv' # ../calc_base/
frame_overview = pd.read_csv(path_csv)
# embed()
frame_overviews = pd.read_csv(
load_folder_name('calc_RAM') + '/calc_coherence_overview-_calc_coherence_overview.csv')
key_names = ['ratio_single_freqs', 'contrast_single_freqs', 'contrast_integral',
'contrast_single_freqs_direct', 'contrast_integral_direct',
'ratio_integral_direct', 'ratio_single_freqs_direct',
'contrast_single_freqs_first', 'contrast_integral_first',
'ratio_integral_first', 'ratio_single_freqs_first']
key_names = ['pi_integral', 'pi_single_freqs', 'coherence_diff',
'contrast_single_freqs', 'contrast_integral', 'ratio_integral',
'ratio_single_freqs', 'coherence_diff_direct',
'contrast_single_freqs_direct', 'contrast_integral_direct',
'ratio_integral_direct', 'ratio_single_freqs_direct',
'coherence_diff_first', 'contrast_single_freqs_first',
'contrast_integral_first', 'ratio_integral_first',
'ratio_single_freqs_first', 'coherence_diff_direct_first',
'contrast_single_freqs_direct_first', 'contrast_integral_direct_first',
'ratio_integral_direct_first', 'ratio_single_freqs_direct_first']
# embed()
for key in key_names:
# print(key)
cells_there, frame_load = update_cv_RAMs(frame_load, load_folder_name(
'calc_RAM') + '/calc_coherence_overview-_calc_coherence_overview.csv', stim_name=key,
stim_save_name=key, base_name=key) # 'ratio_integral'
# cells_there, frame_load = update_cv_RAMs(frame_load, load_folder_name('calc_RAM') + '/calc_coherence_overview-_calc_coherence_overview.csv', stim_name='ratio_single_freqs',
# stim_save_name='ratio_single_freqs', base_name='ratio_single_freqs')
####################################################
# base updates
# und für die Zellen reclassifiezieren wir das
cells_there = frame_load['cell'].unique() # np.array(frame_load[frame_load['cell'].isin(frame_base.cell)].cell)
for cell in cells_there:
len_indexed = len(frame_load[frame_load['cell'] == cell])
# frame_base.filter(like = 'cv')#cv_burst_corr_individual
if cell in frame_base.cell.unique():
# hier machen wir den cell_type reclassified
try:
frame_load.loc[frame_load['cell'] == cell, 'cell_type_reclassified'] = [np.array(
frame_base[frame_base['cell'] == cell][
'cell_type_reclassified'])] * len_indexed # [np.array(frame_base[frame_base['cell'] == cell]['cell_type_reclassified'].iloc[0])]*len_indexed
except:
print('frame load stuff')
embed()
# if cell == '2010-07-29-ae-invivo-1':
# print('base thing')
# embed()
# wenn da tragen wir nun die Base Werte ein
update_factors = [
'burst_factor_burst_corr', 'burst_factor_burst_corr_individual',
'sum_corr'] # , 'fr' 'ser', 'ser_first','burst_fraction_burst_corr', 'burst_fraction_burst_corr_individual',
for up_factor in update_factors:
if not np.isnan(np.sum(np.array(frame_base[frame_base['cell'] == cell][up_factor]))):
frame_load.loc[frame_load['cell'] == cell, up_factor + '_base'] = [np.array(
frame_base[frame_base['cell'] == cell][up_factor])] * len_indexed
if not np.isnan(np.sum(np.array(frame_base[frame_base['cell'] == cell]['cvs']))).all():
# embed()
if np.array(frame_base[frame_base['cell'] == cell]['cv_min']) > \
frame_load.loc[frame_load['cell'] == cell, 'cv_stim'].iloc[0]:
print('cv_base>cv_stim')
# if np.array(frame_base[frame_base['cell'] == cell]['cv_min']) < np.array(frame_base[frame_base['cell'] == cell]['cv'])-0.1:
# print('cv_min<cv-0.1')
###################################
# also wir machen das nur für die zwei Zellen sonst ist das glaube ich keine gute Idee
cv_min_for = ['2021-06-18-ae-invivo-1', '2018-09-06-ag-invivo-1']
if cell in cv_min_for:
frame_load.loc[frame_load['cell'] == cell, 'cv_base'] = [np.nanmin(
np.array(frame_base[frame_base['cell'] == cell]['cvs'].iloc[0][0]))] * len_indexed
names = ['fr', 'ser_first', 'ser', 'burst_fraction_burst_corr',
'burst_fraction_burst_corr_individual', ]
for name in names:
if 'fraction' in name:
frame_load.loc[frame_load['cell'] == cell, name + '_base'] = [np.array(
frame_base[frame_base['cell'] == cell][name.replace('fraction', 'fractions')].iloc[0][
0])[np.nanargmin(
np.array(frame_base[frame_base['cell'] == cell]['cvs'].iloc[0][0]))]] * len_indexed
else:
frame_load.loc[frame_load['cell'] == cell, name + '_base'] = [np.array(
frame_base[frame_base['cell'] == cell][name + 's'].iloc[0][0])[np.nanargmin(
np.array(frame_base[frame_base['cell'] == cell]['cvs'].iloc[0][0]))]] * len_indexed
# todo: hier fehlen noch die seriellen korrelationen
else:
frame_load.loc[frame_load['cell'] == cell, 'cv_base'] = [np.nanmean(
np.array(frame_base[frame_base['cell'] == cell]['cvs'].iloc[0][0]))] * len_indexed
names = ['fr', 'ser_first', 'ser', 'burst_fraction_burst_corr',
'burst_fraction_burst_corr_individual', ]
for name in names:
if 'fraction' in name:
frs = np.nanmean(np.array(
frame_base[frame_base['cell'] == cell][name.replace('fraction', 'fractions')].iloc[0][
0]))
else:
frs = np.nanmean(np.array(frame_base[frame_base['cell'] == cell][name + 's'].iloc[0][0]))
if name == 'fr':
if np.isnan(frs):
frs = np.nanmean(frame_base[frame_base['cell'] == cell]['frs_given'].iloc[0][0])
frame_load.loc[frame_load['cell'] == cell, name + '_base'] = [frs] * len_indexed
# if 'cv' in name:
# if frame_load.loc[frame_load['cell'] == cell, name+'_base'] > 1.5:
# embed()
# print('ser test')
# embed()
burst_corrs = ['burst_corr_individual']
for burst_c in burst_corrs:
lim_here = find_lim_here(cell, burst_c)
frame_load.loc[frame_load['cell'] == cell, 'lim_individual'] = [lim_here] * len_indexed
# embed()
frame_load.loc[frame_load['cell'] == cell, 'cv_min'] = [np.nanmin(
np.array(frame_base[frame_base['cell'] == cell]['cvs'].iloc[0][0]))] * len_indexed
# frame_load.loc[frame_load['cell'] == cell, 'cv_min'] = [np.nanmin(
# np.array(frame_base[frame_overview['cell'] == cell]['cvs'].iloc[0][0]))] * len_indexed
# embed()
if np.isnan(frame_load.loc[frame_load['cell'] == cell, 'fr_base'].iloc[0]) & (
not np.isnan(frame_load.loc[frame_load['cell'] == cell, 'cv_base'].iloc[0])):
print('isnan diff')
embed()
test = False
if test:
test_frame()
# if not np.isnan(np.sum(np.array(frame_base[frame_base['cell'] == cell]['cvs']))):
# frame_load.loc[frame_load['cell'] == cell,'cv_base_min'] = [np.array(np.min(frame_base[frame_base['cell'] == cell]['cvs']))]*len_indexed
# embed()
if not np.isnan(np.sum(np.array(frame_base[frame_base['cell'] == cell]['cv_burst_corr_individual']))):
frame_load.loc[frame_load['cell'] == cell, 'cv_base_w_burstcorr'] = [np.array(
frame_base[frame_base['cell'] == cell]['cv_burst_corr_individual'])] * len_indexed
# print('updated base cv')
if not np.isnan(np.sum(np.array(frame_base[frame_base['cell'] == cell]['fr_burst_corr_individual']))):
frame_load.loc[frame_load['cell'] == cell, 'fr_base_w_burstcorr'] = [np.array(
frame_base[frame_base['cell'] == cell]['fr_burst_corr_individual'])] * len_indexed
# print('updated base cv')
if not np.isnan(np.sum(np.array(frame_base[frame_base['cell'] == cell]['cv_burst_corr']))):
frame_load.loc[frame_load['cell'] == cell, 'cv_base_w_burstcorr1.5'] = [np.array(
frame_base[frame_base['cell'] == cell]['cv_burst_corr'])] * len_indexed
# print('updated base cv')
if not np.isnan(np.sum(np.array(frame_base[frame_base['cell'] == cell]['fr_burst_corr']))):
frame_load.loc[frame_load['cell'] == cell, 'fr_base_w_burstcorr1.5'] = [np.array(
frame_base[frame_base['cell'] == cell]['fr_burst_corr'])] * len_indexed
# print('updated base cv')
if not np.isnan(np.sum(np.array(frame_base[frame_base['cell'] == cell]['cv_given']))):
frame_load.loc[frame_load['cell'] == cell, 'cv_given'] = [np.array(
frame_base[frame_base['cell'] == cell]['cv_given'])] * len_indexed
# if np.isnan(np.sum(np.array(frame_load[frame_load['cell'] == cell]['cv_base']))):
# print('cv is nan')
# embed()
# embed()
test = False
# ok also dieses cv_stim > cv_base betrifft 5 Zellen von den 230, ich denke ich sollte deswegen nicht extra eine Ausnahme machen oder?
# sonst kann ich das immer noch besprechen
# das machen wir nicht mit cv_base ändern falls cv_base größer ist!
# embed()
if test:
cv_test()
# ok also es gibt eine Zelle ohne Baseline aber das ist ok die schließen wir einfach aus
if test:
frame_file = setting_overview_score(frame_load, min_amp='min')
null_val = frame_file['cv_base'] - frame_file['fr_base']
frame_file[np.isnan(null_val)].cell
# embed()
# embed()
test = False
if test:
frame_amp = frame_load[frame_load['cell_type_reclassified'] == ' Ampullary'] # ['cv_base']
frame_amp[['cv_base', 'cv_stim']]
if save:
if 'pkl' in save_name_here:
frame_load.to_pickle(save_name_here)
else:
frame_load.to_csv(save_name_here)
return frame_load
def contra_burst_update(cont_total, frame_load, save_name_here, cv_base_update=False):
########################################
# hier ergänze ich noch die cvs aus dem Base file!
# fazit die Namen stimmen schon, ich mache das nur individuell weil das CV_stim vom File zu File abhängt!
if '__burstIndividual_' in save_name_here:
# das ist das ohne brust corr
frame_load['cv_stim_w_burstcorr'] = frame_load['cv_stim'] #
# das ist der back up wenn es kein Base gibt
# wir machen erstmal das CV stim als base überhautp
frame_load['cv_base_w_burstcorr'] = frame_load['cv_stim'] # frame_load['cv_wo_burstcorr']
save_name_withouth = save_name_here.replace('__burstIndividual_', '')
if os.path.exists(save_name_withouth):
cont_total = True
# print('updating frist')
if cont_total:
cells_there, frame_load = update_cv_RAMs(frame_load, save_name_withouth, stim_name='cv_stim_wo_burstcorr',
stim_save_name='cv_stim', base_name='cv_base')
vars = ['coherence_', 'mutual_informaiton_']
for var in vars:
frame_load[var + 'w_burstcorr'] = frame_load[var] # frame_load['cv_wo_burstcorr']
# 'coherence_', 'mutual_informaiton_'
cells_there, frame_load = update_cv_RAMs(frame_load, save_name_withouth, stim_name=var + 'wo_burstcorr',
stim_save_name=var, base_name=var + 'wo_burstcorr')
elif '__burstIndividual_' not in save_name_here:
# das ist ohne burst corr
try:
frame_load['cv_stim_wo_burstcorr'] = frame_load['cv_stim'] #
except:
print('reload problems')
embed()
# das ist der back up wenn es kein Base gibt
# wir machen erstmal das CV stim als base überhautp
# ja das machen wir nicht
if cv_base_update:
frame_load['cv_base'] = frame_load['cv_stim'] # frame_load['cv_wo_burstcorr']
# print('updating second')
# embed()
# das ost ,ot nirst cprr
save_name_withouth = save_name_here.replace('.csv', '__burstIndividual_.csv')
if os.path.exists(save_name_withouth):
cont_total = True
if cont_total:
cells_there, frame_load = update_cv_RAMs(frame_load, save_name_withouth, stim_name='cv_stim_w_burstcorr',
stim_save_name='cv_stim', base_name='cv_base_w_burstcorr')
# todo: daraus eventuell einen eigneen frame machen
vars = ['coherence_', 'mutual_informaiton_']
for var in vars:
try:
frame_load[var + 'wo_burstcorr'] = frame_load[var] # frame_load['cv_wo_burstcorr']
except:
print('coherence not their')
embed()
# 'coherence_', 'mutual_informaiton_'
cells_there, frame_load = update_cv_RAMs(frame_load, save_name_withouth, stim_name=var + 'w_burstcorr',
stim_save_name=var, base_name=var + 'w_burstcorr')
# frame_amp_load.file_name
# frame_amp_load_with.file_name
test = False
if test:
test_frame2()
# embed()
return frame_load
def update_cv_RAMs(frame_load, save_name_withouth, stim_name='cv_stim_w_burstcorr', stim_save_name='cv_stim',
base_name='cv_base_w_burstcorr'):
frame_load_with = pd.read_csv(save_name_withouth, index_col=0)
cells_there = frame_load['cell'].unique() # np.array(frame_load[frame_load['cell'].isin(frame_base.cell)].cell)
# embed()
try:
if 'index' in frame_load_with.keys():
frame_load_with.pop('index')
frame_load_with = frame_load_with.reset_index()
a_key = list(frame_load_with.keys())
if 'eodf_size' in a_key:
a_key.remove('eodf_size')
frame_load_with = frame_load_with.drop_duplicates(subset=a_key)
if 'index' in frame_load_with.keys():
frame_load_with.pop('index')
# embed()
if 'index' in frame_load.keys():
frame_load.pop('index')
frame_load = frame_load.reset_index()
a_key = list(frame_load.keys())
if 'eodf_size' in a_key:
a_key.remove('eodf_size')
# a = a.dropna(axis=1, how='all').drop_duplicates(subset=a_key)
frame_load = frame_load.drop_duplicates(subset=a_key)
if 'index' in frame_load.keys():
frame_load.pop('index')
except:
print('index something')
embed()
for cell in cells_there:
# print('resaving')
frame_here = frame_load.loc[frame_load['cell'] == cell]
frame_load_with_here = frame_load_with.loc[frame_load_with['cell'] == cell]
amps = frame_here.amp.unique()
file_names = frame_here.file_name.unique()
snippets = frame_here.snippets.unique()
for amp in amps:
for file_name in file_names:
# snippets = np.unique()
for snippet in snippets:
frame_amp_load = frame_here[(frame_here.amp == amp) & (frame_here.file_name == file_name) & (
frame_here.snippets == snippet)]
# embed()
if 'snippets' in frame_load_with_here.keys():
frame_amp_load_with = frame_load_with_here[
(frame_load_with_here.amp == amp) & (frame_load_with_here.file_name == file_name) & (
frame_load_with_here.snippets == snippet)]
else:
frame_amp_load_with = frame_load_with_here[
(frame_load_with_here.amp == amp) & (frame_load_with_here.file_name == file_name)]
if (len(np.array(frame_amp_load.loc[frame_amp_load['cell'] == cell])) > 0):
if (len(np.array(frame_amp_load_with.loc[frame_amp_load_with['cell'] == cell])) > 0):
try:
frame_load.loc[frame_amp_load.index, stim_name] = np.array(
frame_amp_load_with[stim_save_name]) # ]*len(frame_amp_load.index) #
except:
print('there thing')
# ok ich mach das jetzt einfach so, da hat sich was falsch abgespeichert
# todo: also das macht halt noch nicht alle duplicates weg
# aber da das auftritt wenn eben beim rechnen mal ein dubplicate auftritt kann man das einfach so lösen
# langfristig wäre aber ein bessere dubplicate berienigung gut
# print('something')
try:
frame_load.loc[frame_amp_load.index, stim_name] = [np.array(
frame_amp_load_with[stim_save_name])] * len(frame_amp_load.index)
except:
print('something')
embed()
# das ist der back up wenn es kein Base gibt
# wir machen erstmal das CV stim als base überhautp
# embed()
try:
frame_load.loc[frame_amp_load.index, base_name] = np.array(
frame_amp_load_with[stim_save_name]) # frame_load['cv']
except:
frame_load.loc[frame_amp_load.index, base_name] = [np.array(
frame_amp_load_with[stim_save_name])] * len(frame_amp_load.index)
print('there thing 2')
# embed()
# if np.sum(frame_load.loc[frame_load['cell'] == cell]['cell_type_reclassified'] == ' Ampullary') > 0:
# print('frame thing')
# embed()
return cells_there, frame_load
def rainbow_cmap(frame, nrs=30, cm='plasma_r'):
# evently spaced colormaß
# embed()
if type(cm) != str:
cm = cm
else:
cm = plt.get_cmap(cm)
col = [cm(float(i) / (nrs)) for i in range(len(frame))]
return col
def set_same_ylim(ax_upper=[], ylim_type='ylim', up=''):
if len(ax_upper) < 1:
# embed()
fig = plt.gcf()
ax_upper = fig.get_axes()
if ylim_type == 'ylim':
ylim_min = []
ylim_max = []
for ax in ax_upper:
ylim_min.append(ax.get_ylim()[0])
ylim_max.append(ax.get_ylim()[1])
# embed()
for ax in ax_upper:
if up == 'up':
ax.set_ylim(-np.max(ylim_max), np.max(ylim_max))
else:
ax.set_ylim(np.min(ylim_min), np.max(ylim_max))
else:
ylim_min = []
ylim_max = []
for ax in ax_upper:
ylim_min.append(ax.get_xlim()[0])
ylim_max.append(ax.get_xlim()[1])
# embed()
for ax in ax_upper:
ax.set_xlim(np.min(ylim_min), np.max(ylim_max))
def find_first_second(b, names_mt_gwn, m, mt, data_between_2017_2018='', mm=0, ends_mt=''):
try:
stimulus_length = mt.extents[:][m]
except:
print('stim length thing')
embed()
first = mt.positions[:][m]
if data_between_2017_2018 == 'all':
# das problem ist hier kann ist die info in den mt.extents teilweise nicht richtig drin
if (mt.positions[:][m] + mt.extents[:][m]) > ends_mt[mm]:
second = ends_mt[mm] - mt.positions[:][m]
else:
second = mt.extents[:][m]
else:
second = mt.extents[:][m]
#######################################
# damit schränke ich das halt ein
other_tag = find_restricted_tag(first, second, b, names_mt_gwn)
if len(other_tag) > 0:
print('changed end of MT')
second = np.min(other_tag) - first
# second = other_tag
minus = mt.positions[:][m]
return first, minus, second, stimulus_length
def load_spikes_array(b):
try:
sp_data_array = b.data_arrays['Spikes-1']
except:
sp_data_array = b.data_arrays['spikes-1']
return sp_data_array
def link_arrays_spikes(b, first=0, second=0, minus_spikes=0, printing=False, ):
sp_data_array = load_spikes_array(b)
try:
# DEFAULT! BEI DEN SPIKES IST DAS AUS IRGENDEINEM GRUND SCHNELLER!
# die machen schon das gleiche aber das ist halt einfach schneller
# und auch bei einer Stunde ist das schneller
t1 = time.time()
spikes_mt = spikes_cut_manual(sp_data_array, first, second, minus_spikes)
# embed()
if printing:
print('manuell slicing ' + str(time.time() - t1))
except:
t1 = time.time()
spikes_mt = sp_data_array.get_slice([first], [second], nix.DataSliceMode.Data)[:] - minus_spikes
if printing:
print('slicing ' + str(time.time() - t1))
# in case this fails the necessarty then manually add the cut
if np.max(spikes_mt) > first + second - minus_spikes:
spikes_mt = spikes_mt[(spikes_mt > first - minus_spikes) & (spikes_mt < first + second - minus_spikes)]
if printing:
print('slicing1 ' + str(time.time() - t1))
test = False
# if test:
# link_arrays_eod()
# embed()
return spikes_mt
def spikes_cut_manual(sp_data_array, first, second, minus_spikes):
# print('link array problem')
# embed()
try:
spikes = sp_data_array[:]
except:
# embed()
# print('spikes inflated, therefore cut them')
try:
spikes = sp_data_array.data[
0:-688] # todo: das komische es passiert genau hier: 0:-688, keinen Punkt davor oder danach
except:
# print('here we dont continue')
spikes = []
if len(spikes) > 0:
spikes_mt = spikes[(spikes > first) & (spikes < second + first)] - minus_spikes
else:
spikes_mt = []
# embed()
# except:
return spikes_mt
def open_group_gwn(group, file_name, cut_off, sd, data_between_2017_2018=''):
id_g = group[1].id
c = group[0]
if data_between_2017_2018 != 'all':
files_load = group[1].file
else:
files_load = file_name
repro = group[1].repro
if len(id_g.unique()) != len(repro):
print('len problem1')
embed()
# embed()
if len(np.unique(np.array(files_load))) > 1:
print('files to long figure out what this is about')
embed()
# embed()
# file_name = file_names.iloc[0]
# file_name_save = file_name_saves.iloc[0]
c_orig = group[1].c_orig.iloc[0]
c_unit = group[1].c_unit.iloc[0]
c_len = group[1].c_len.iloc[0]
if data_between_2017_2018 != 'all':
cut_off = group[1].cut_off.iloc[0]
sd = group[1].sd.iloc[0]
start = group[1].start.iloc[0]
end = group[1].end.iloc[0]
rep = group[1].repro.iloc[0]
amplsel = group[1].amplsel.iloc[0]
return sd, start, end, rep, cut_off, c_len, c_unit, c_orig, c_len, files_load, c, id_g, amplsel
def find_indices_to_match_contrats(grouped, group, mt, id_g, mt_ids, data_between_2017_2018=''):
ends_mt = []
# for case there are not ids then I just sort it based on their rank
# bei den Daten hier können wir den Kontrast nicht anhand des tags erfahren wir müssen das wirklich nach der Zeit sortieren,
# sonst geht das nicht, außer vielleicht man erstellt die nix files neu
# weil diese tags überlagern sich nicht, deswegen gehen wir da druch und schauen in welchen tiel das denn reinpasst
if data_between_2017_2018 == 'all':
indices = []
for gr in range(len(grouped)):
# jetzt können hier ja mehrere file stimuli sein
print(gr)
try:
starts = group[1].start
ends = group[1].end
indices = []
ends_mt = []
for p in range(len(starts)):
ind = np.arange(0, len(mt.positions[:]), 1)[(mt.positions[:] > starts.iloc[p]) & (
mt.positions[:] < ends.iloc[p])] # np.arange(0, len(mt.positions[:]), 1)
indices.extend(ind)
ends_mt.extend([ends.iloc[p]] * len(ind))
except:
print('start problem')
embed()
else:
indices = find_indices_to_match_contrasts_new(id_g, mt_ids)
return indices, ends_mt
def find_indices_to_match_contrasts_new(id_g, mt_ids):
indices = []
for repro_id in id_g:
indices_all = np.where(mt_ids == repro_id)[0]
indices.extend(indices_all)
return indices
def find_feature_gwn_id(mt):
features = []
id = []
for ff, f in enumerate(mt.features):
if 'id' in f.data.name:
id = f.data.name
else:
features.append(f.data.name)
#############################################
# 'FileStimulus-file-gaussian noise-white noise psd(f) ~ 1 f<fc-1' ist special!
# ok mit dem hier haben wir ein problem: 'FileStimulus-file-gaussian noise-white noise psd(f) ~ 1 f<fc-1'
# es hat mehrere noise parts in einem tag, davor hatte ich einen noise pro tag, ich hab einfach alle zum tag zugehörigen mts über
# die id gefunden aber hier gibts weder eine ID noch gibts nur einen noise teil. Vielleicht werde ich das ausscließen
# also bei diesen komischenstimulus scheinen die mts die richtigen infos zu haben
# die frage ist nur wo ich jetzt die kontraste her bekomme
# geht bis 2020-03-11-ac-invivo-1 die Problematik
# dieses all das ist so ein Ding, das ist für den Fall dass es keine id gibt,
# das ist der Fall bei den Daten zwischen 2017 und 2018, da steht ziemlich viel Schrott drin in den metadatan
# und das Problem ist das der Konrast in den Metadaten ist aber der file name im mt, also machen wir die all variante
if len(id) > 0:
mt_ids = mt.features[id].data[:]
data_between_2017_2018 = ''
else:
# mt.positions[:]
mt_ids = [mt.id] * len(mt.positions[:])
data_between_2017_2018 = 'all'
return features, id, data_between_2017_2018, mt_ids
def exclude_files(file_stimulus): # StimTInsMissedF
return ('chirptrace' not in file_stimulus.lower()) & ('zap.dat' not in file_stimulus) & (
'contrast_sweep.dat' not in file_stimulus) & ('phase_sweep.dat' not in file_stimulus) & (
'StimTMissed' not in file_stimulus) & ('StimTInsMissed' not in file_stimulus)
def find_files_in_repros(dataset, clean_file=False):
repros = dataset.repros
cut_offs = []
files = []
sds = []
# hier finde ich erstmal alle gwns raus die es gibt
for r in repros:
if 'FileStimulus' in r:
file_stimulus = dataset.repro_runs(r)[0]
# embed()
file = file_stimulus.metadata['RePro-Info']['settings']['file'][0][0].split('/')[-1]
# wir excluden die hier weil die brauchen wir erstmal auch nicht
if exclude_files(file):
try:
cut_off = file_stimulus.metadata['RePro-Info']['settings']['uppercutoff'][0][0]
except:
if 'gwn' in file:
cut_off = int(file.split('gwn')[1].split('Hz')[0])
elif 'blwn' in file:
cut_off = int(file.split('blwn')[1].split('Hz')[0])
else:
cut_off = float('nan')
# print('cut off upper problem')
# embed()
sd = file_stimulus.metadata['RePro-Info']['settings']['sigstdev'][0][0]
if clean_file:
if 'zap' in file:
print('clean_file zap')
embed()
file_name, file_name_save, cut_off = file_name_clean_up(file, cut_off)
try:
cut_offs.append(cut_off)
except:
print('cut off append problem2')
embed()
files.append(file)
sds.append(sd)
return files, sds, cut_offs
def find_file_names(nix_name, mt, names_mt_gwn):
try:
##########################################
# das mt hat einen Cut-off eine File und eine sd, dann ist alles klar
if names_mt_gwn in mt.metadata.sections:
cut_off = mt.metadata.sections[names_mt_gwn].props['fc'].values[
0] # sections[0]
file = mt.metadata.sections[names_mt_gwn].props['File'].values[
0] # sections[0]
sd = mt.metadata.sections[names_mt_gwn].props['sd'].values[0] # sections[0]
# new_v = 'new'
else:
# try:
cut_off = mt.metadata['fc']
file = mt.metadata['File']
sd = mt.metadata['sd']
# new_v = 'old'
# except:
# cut_off = 300
# file = '_baseline_'
# sd = 0.3
# new_v = 'old'
gwn_save, file_name, file_name_save = adapt_file_name(file, names_mt_gwn, sd)
# print('section problem')
except:
try:
dataset = rlx.Dataset(nix_name)
except:
print('dataset thing')
embed()
# embed()
contrasts = pd.DataFrame()
contrasts2 = pd.DataFrame()
idx_c = 0
# embed()
# wenn wir hier ein file haben ist auch alles gut
files, sds, cut_offs = find_files_in_repros(dataset, clean_file=False)
dataset.close()
#################################
# complexer wird es wenn wir mehrere files haben pro run
# dann extrahieren wir hier alle von diesen Files
if len(np.unique(files)) > 1:
print('ambiguity problem')
file_name = []
file_name_save = []
cut_off = []
for f in range(len(files)):
#############
# attention hier müssen wir aufpassen dass die gleichen Files nicht doppelt drin sind!
# if names_mt_gwn in files[f]:
sd = sds[f]
file = files[f]
cut_off0 = cut_offs[f]
if 'zap' in file:
print('clean_file2 zap')
embed()
file_name0, file_name_save0, cut_off0 = file_name_clean_up(file, cut_off0)
if file_name0 not in file_name:
file_name.append(file_name0)
file_name_save.append(file_name_save0)
cut_off.append(cut_off0)
file = files
sd = sds
else:
file = np.unique(files)[0]
if exclude_files(file):
if 'zap' in np.unique(files)[0]:
print('clean_file3 zap')
embed()
file_name, file_name_save, cut_off = file_name_clean_up(np.unique(files)[0], np.unique(cut_offs)[0])
sd = np.unique(sds)[0]
else:
file_name = float('nan')
file_name_save = float('nan')
sd = float('nan')
cut_off = float('nan')
# embed()
try:
file_name, file_name_save, cut_off, file, sd
except:
print('file name problem')
embed()
return file_name, file_name_save, cut_off, file, sd
def adapt_file_name(file, names_mt_gwn, sd):
if 'gwn' in str(names_mt_gwn):
file_name = str(names_mt_gwn)[0:-3] + str(sd)
file_name_save = str(names_mt_gwn)[0:-3] + str(sd) # [0:-3]
elif 'FileStimulus-file-gaussian' in str(names_mt_gwn):
file_name = str(file)[0:-1] + str(sd)
file_name_save = str(file)[0:-1] + str(sd) # [0:-3]
# file_name = 'FileStimulus-file-gaussian' + str(cut_off)
# file_name_save = 'FileStimulus-file-gaussian' + str(names_mt_gwn)
elif 'noise' in str(names_mt_gwn):
file_name = str(file)[0:-1] + str(sd)
file_name_save = str(file)[0:-1] + str(sd) # [0:-3]
# file_name = 'FileStimulus-file-gaussian' + str(cut_off)
# file_name_save = 'FileStimulus-file-gaussian' + str(names_mt_gwn)
elif 'FileStimulus-file-' in str(names_mt_gwn):
file_name = str(file) # [0:-1] #+ str(sd)
file_name_save = str(file) # [0:-1]
# elif names_mt_gwn == 'InputArr_400hz_30s-1':
# file_name = str(names_mt_gwn)[0:-2]
# file_name_save = str(names_mt_gwn)[0:-2]
elif 'InputArr' in str(names_mt_gwn):
file_name = str(names_mt_gwn)[0:-3]
file_name_save = str(names_mt_gwn)[0:-2]
else:
file_name = '_Baseline_'
file_name_save = '_Baseline_'
gwn_save = names_mt_gwn.replace(" ", "")
gwn_save = gwn_save.replace("%", "_")
gwn_save = gwn_save.replace("<", "_")
gwn_save = gwn_save.replace("~", "_")
return gwn_save, file_name, file_name_save
def load_rlxnix(nix_name):
# warnings.filterwarnings("ignore", message="WARNING:root:MultiTag type relacs.stimulus.segment")
try:
dataset = rlx.Dataset(nix_name) # todo: dieses warnign will ich weghaben
rlx_problem = True
except:
rlx_problem = False
dataset = []
print('rlx problem')
return dataset, rlx_problem
def get_contrasts_over_rlx_calc_RAM(dataset, propertie_frame=[], idx_c=0):
# propertie_frame = []
ids = []
# embed()
if len(propertie_frame) < 1:
propertie_frame = pd.DataFrame()
propertie_frame2 = pd.DataFrame()
# embed()
idx_c2 = 0
repros = dataset.repros
file_extras = []
contrasts = []
files = []
for r in repros:
if 'FileStimulus' in r:
file_extras, propertie_frame, contrasts, files, idx_c = find_contrasts_file_gwn(file_extras, dataset, r,
contrasts, files,
propertie_frame, idx_c)
else:
file_stimulus = dataset.repro_runs(r)[0]
propertie_frame2.loc[idx_c, 'id'] = file_stimulus.id
idx_c2 += 1
# embed()
return file_extras, idx_c, propertie_frame, propertie_frame2
def find_contrasts_file_gwn(file_extras, dataset, r, contrasts, files, propertie_frame, idx_c):
file_stimulus = dataset.repro_runs(r)[0]
test = False
try:
# key = [k for k in file_stimulus]
key = str(list(map(np.array, file_stimulus.metadata.keys()))[-1])
key2 = str(list(map(np.array, file_stimulus.metadata[key].keys()))[-1])
# file_stimulus.metadata[key][key2]
if test:
contrasts.append(file_stimulus.metadata[key][key2]['contrast'][0][0])
files.append(file_stimulus.metadata[key][key2]['file'][0])
print(file_stimulus)
except:
key = 'RePro-Info'
key2 = 'settings'
# file_stimulus.metadata[key][key2]
if test:
contrasts.append(file_stimulus.metadata[key][key2]['contrast'][0][0])
files.append(file_stimulus.metadata[key][key2]['file'][0])
print(file_stimulus)
# embed()
if exclude_files(file_stimulus.metadata[key][key2]['file'][0][0]):
# if all == 'all':
# if len(file_stimulus.metadata[key][key2]['file'][0]) != 1:
# embed()
# if (len(file_stimulus.metadata[key][key2]['file'][0]) == 1) & (file_stimulus.metadata[key][key2]['file'][0][0] == ''):
# print('empty file problem')
# embed()
# embed()
if file_stimulus.metadata[key][key2]['contrast'][1] == '%':
propertie_frame.loc[idx_c, 'c'] = file_stimulus.metadata[key][key2]['contrast'][0][0]
elif file_stimulus.metadata[key][key2]['contrast'][1] == '':
propertie_frame.loc[idx_c, 'c'] = file_stimulus.metadata[key][key2]['contrast'][0][0] * 100
else:
try:
propertie_frame.loc[idx_c, 'c'] = file_stimulus.metadata[key][key2]['contrast'][0][0]
except:
print('frame orig problem')
# if np.isinf(propertie_frame.loc[idx_c, 'c']):
# print('isinf')
# embed()
propertie_frame.loc[idx_c, 'c_orig'] = file_stimulus.metadata[key][key2]['contrast'][0][0]
try:
propertie_frame.loc[idx_c, 'c_unit'] = file_stimulus.metadata[key][key2]['contrast'][1] # [1]
except:
print('unit problem')
embed()
propertie_frame.loc[idx_c, 'c_len'] = len(file_stimulus.metadata[key][key2]['contrast'])
# embed()
# ok hier gibt es immer einen filestimulus mit einer Star_time und mit eier duration
# if all == 'all':
propertie_frame.loc[idx_c, 'id'] = file_stimulus.start_time
propertie_frame.loc[idx_c, 'start'] = file_stimulus.start_time
propertie_frame.loc[idx_c, 'end'] = file_stimulus.start_time + file_stimulus.duration
# wir gehen immer davon aus, dass ein repro run nur einen noise hat aber stimmt das so überhaupt?
propertie_frame.loc[idx_c, 'id'] = file_stimulus.id
if (type(propertie_frame.loc[idx_c, 'c']) == str) | (
type(propertie_frame.loc[idx_c, 'c']) == numpy.str_):
if 'inf' in propertie_frame.loc[idx_c, 'c']:
propertie_frame.loc[idx_c, 'c'] = float('inf')
elif 'nan' in propertie_frame.loc[idx_c, 'c']:
propertie_frame.loc[idx_c, 'c'] = float('nan')
else:
propertie_frame.loc[idx_c, 'c'] = 13
propertie_frame.loc[idx_c, 'repro'] = r
file_extra = []
for f in range(len(file_stimulus.metadata[key][key2]['file'][0])):
file_extra.append(file_stimulus.metadata[key][key2]['file'][0][f].split('/')[-1])
# file_extra = file_stimulus.metadata[key][key2]['file']
file_extras.append(file_extra)
# file_extras = list(file_extras)
# ok das zweite ist glaube ich immer die unit nicht wahr, das erste in den klammern ist der wert und das zweite ist die unit!
if (len(file_stimulus.metadata[key][key2]['file'][0]) == 1):
# file_extras.pop('')
# embed()
file_all, propertie_frame, idx_c = files_in_repros(propertie_frame, idx_c, file_stimulus)
# if (len(file_stimulus.metadata[key][key2]['file'][0]) == 2) & (
# file_stimulus.metadata[key][key2]['file'][0][1] == ''):
# file_extra = file_stimulus.metadata[key][key2]['file'][0][0].split('/')[-1]
# else:
# # for the case we have more than a noise assigned to a multitag, when we have a problem I guess
# einem tag können offenabr mehr als nur ein file zugeordnet sein
save_structure_to_frame(idx_c, propertie_frame, file_all, name='file_all')
propertie_frame.loc[idx_c, 'file_all_len'] = len(file_all)
# print('file_all' + str(file_extra))
else:
several_files = True
# print('several files')
# es gibt halt diesen Fall wo es mehrere Files gibt
try:
if 'amplsel' in file_stimulus.metadata[key][key2].keys():
propertie_frame.loc[idx_c, 'amplsel'] = file_stimulus.metadata[key][key2]['amplsel'][0][0]
else:
propertie_frame.loc[
idx_c, 'amplsel'] = 'unknown' # das ist einfach für die alten Zellen, da gehe ich davon aus dass das halt auch so umgerechnet wurde
except:
print('ampslthing')
embed()
# propertie_frame
# if len(file_all)>0:
# print('file_all too long')
# embed()
# embed()
idx_c += 1
return file_extras, propertie_frame, contrasts, files, idx_c
def files_in_repros(propertie_frame, idx_c, file_stimulus):
# file_stimulus = dataset.repro_runs(r)[0]
# embed()
try:
key = 'RePro-Info'
key2 = 'settings'
# file = file_stimulus.metadata[key][key2]['file'][0][0].split('/')[-1]
file = file_stimulus.metadata[key][key2]['file'][0][0].split('/')[-1]
except:
# try:
# key = [k for k in file_stimulus]
key = str(list(map(np.array, file_stimulus.metadata.keys()))[-1])
key2 = str(list(map(np.array, file_stimulus.metadata[key].keys()))[-1])
# file = file_stimulus.metadata['RePro-Info']['settings']['file'][0][0].split('/')[-1]
file = file_stimulus.metadata[key][key2]['file'][0][0].split('/')[-1]
# if (len(file_stimulus.metadata[key][key2]['file'][0]) == 2) & (file_stimulus.metadata[key][key2]['file'][0][1] == ''):
file_all = file_stimulus.metadata[key][key2]['file']
file_holder = file_stimulus.metadata[key][key2]['file'][0][0]
sd = file_stimulus.metadata[key][key2]['sigstdev'][0][0]
upper_holder = file_stimulus.metadata[key][key2]
propertie_frame = get_file_also_for_dat(propertie_frame, idx_c, file_holder)
propertie_frame.loc[idx_c, 'sd'] = sd
return file_all, propertie_frame, idx_c
def get_file_also_for_dat(propertie_frame, idx_c, file_holder):
file = file_holder.split('/')[-1]
# embed()
cut_off_test = float('nan')
# if 'gwn' in file:
# cut_off_test = int(file.split('gwn')[1].split('Hz')[0])
# elif 'blwn' in file:
# cut_off_test = int(file.split('blwn')[1].split('Hz')[0])
cut_off = 'nocutoff'
if 'gwn' in file:
cut_off = int(file.split('gwn')[1].split('Hz')[0])
elif 'blwn' in file:
cut_off = int(file.split('blwn')[1].split('Hz')[0])
# try:
# cut_off = upper_holder['uppercutoff'][0][0]# ok dieses Zeug ist halt falsch nicht wahr
# except:
# if 'gwn' in file:
# cut_off = int(file.split('gwn')[1].split('Hz')[0])
# elif 'blwn' in file:
# cut_off = int(file.split('blwn')[1].split('Hz')[0])
# # print('cut off upper problem')
# # embed()
# ok ab 2012 stimmt dieser upper cut off nicht immer deswegen hier nochmal diese klausel
# if not np.isnan(cut_off_test):
# if cut_off != cut_off_test:
# embed()
if 'zap' in file:
print('zap file 5')
embed()
# if file == '':
# print('file klammer problem')
# embed()
# exclude_files()
if exclude_files(file):
try:
file_name, file_name_save, cut_off = file_name_clean_up(file, cut_off)
except:
print('cut off not found')
embed()
cut_off_lower, cut_off_diff = calc_cut_off_diffs(file, cut_off)
if cut_off == 0:
print('cut off = 0')
embed()
propertie_frame.loc[idx_c, 'cut_off'] = cut_off
propertie_frame.loc[idx_c, 'cut_off_diff'] = cut_off_diff
propertie_frame.loc[idx_c, 'cut_off_lower'] = cut_off_lower
propertie_frame.loc[idx_c, 'file'] = file
propertie_frame.loc[idx_c, 'file_name'] = file_name
propertie_frame.loc[idx_c, 'file_name_save'] = file_name_save
return propertie_frame
def get_data_array_names(b):
names = []
for array in b.data_arrays:
names.append(array.name)
return names
def find_array_names(b):
names_blocks = []
for stims in b.data_arrays:
names_blocks.append(stims.name)
return names_blocks
def get_sampling(b, load_eod_array='EOD'):
eod_data_array = b.data_arrays[load_eod_array]
return int(len(eod_data_array.get_slice([0], [0.1], nix.DataSliceMode.Data)[:]) * 10)
def link_arrays_eod(b, first=0, second=0, array_name='LocalEOD-1', printing=False, ):
names = find_array_names(b)
eod_data_array = b.data_arrays[array_name]
test = False
if test:
print('local eod problem')
embed()
test = False
if test:
test_eod2()
sampling = get_sampling(b, load_eod_array=array_name)
# sampling = len(eod_data_array)/b.metadata['Recording']['Recording duration']
# v1_data_array = b.data_arrays['V-1']
try:
# DAS HIER IST DEFAULT DAS IST VIEL EFFIZIENTER
t1 = time.time()
# eod_mt = eod_data_array.get_slice([first/2], [second], nix.DataSliceMode.Data)[:]
eod_mt = eod_data_array.get_slice([first], [second], nix.DataSliceMode.Data)[:]
if printing:
print('eodslice' + str(time.time() - t1))
except:
# time_whole = np.arange(0, len(eod_data_array[:]) / 40000, 1 / 40000)
t1 = time.time()
time_whole = np.arange(0, len(eod_data_array[:]) / sampling, 1 / sampling)
eod_mt = eod_data_array[(time_whole >= first) & (time_whole < first + second)]
if printing:
print('eodmanuell' + str(time.time() - t1))
if len(eod_mt) < 1:
# sampling = len(eod_data_array.get_slice([0], [1], nix.DataSliceMode.Data)[:])
try:
time_whole = np.arange(0, len(eod_data_array[:]) / sampling, 1 / sampling)
eod_mt = eod_data_array[(time_whole >= first) & (time_whole < first + second)]
except:
eod_mt = []
test = False
if test:
test_v1()
# embed()
return eod_mt, sampling
def load_presaved(b, c, file_name_save, first, m, mt, sampling, second, duration_given=None, sd=None):
# embed()
# os.path.exists(load_folder_name('calc_RAM')+'/'+file_name_save+'.dat')
if duration_given:
time_array = np.arange(0, duration_given, 1 / sampling)
else:
time_array = np.arange(0, mt.extents[:][m], 1 / sampling)
# try:
eod_interp, time_wn_cut, _ = load_noise(file_name_save, mt, m, duration_given=duration_given)
# except:
# print('eod interp something')
# embed()
try:
eod_interp = interpolate(time_wn_cut, eod_interp,
time_array,
kind='cubic')
except:
print('interpolate problem')
embed()
# except:
# print('load noise file problem')
# # embed()
# eod_interp = eod_interp * c #/ 100
# todo: das Problem des presaved ist natürlich dass es die Stimulus artefakte nicht drin hat
# bzw vielleicht ist das auch ein Vorteil
eod_mt_global, sampling = link_arrays_eod(b, first,
second,
array_name='EOD') # 'EOD','LocalEOD-1'
deltat = 1 / sampling
try:
eodf_size = np.max(eod_mt_global) - np.min(eod_mt_global)
except:
print('eodf size not working')
embed()
####################################
# das Ganze sollte auf den Kontrast normiert sein
# #todo: hier noch das mit dem Kontrast impementieren
# embed()
eodf_size_scale = False
if not sd:
sd = np.std(eod_interp)
if eodf_size_scale:
eod_interp = eod_interp * eodf_size * c / 100
else:
# 'DEFAULT'
contrast_skale = c / 100
eod_interp = eod_interp * (contrast_skale / sd)
print(' c=' + str(c) + ' std lost ' + str(np.round(np.std(eod_interp) * 100, 1)))
if np.abs(c - np.std(eod_interp) * 100) > 2:
print('contrast adjust thing!')
eod_interp, time_wn_cut, _ = load_noise(file_name_save, mt, m, duration_given=duration_given)
try:
eod_interp = interpolate(time_wn_cut, eod_interp,
time_array,
kind='cubic')
except:
print('interpolate problem')
embed()
sd = np.std(eod_interp)
eod_interp = eod_interp * (contrast_skale / sd)
print(' c=' + str(c) + ' std lost ' + str(np.round(np.std(eod_interp) * 100, 1)))
if np.abs(c - np.std(eod_interp) * 100) > 2:
print('contrast adjust thing2!')
embed()
# print('contrast skale '+str( contrast_skale))
# embed()
test = False
if test:
###################
# hier teste ich den Kontrast indem ich das local EOD extrahiere
eod_mt_local, sampling = link_arrays_eod(b, first,
second,
array_name='LocalEOD-1')
extracted = extract_am(eod_mt_local, np.arange(0, len(eod_mt_local) * deltat, deltat), norm=False)[0]
if ((np.max(eod_mt_local) > 0) & (np.min(eod_mt_local) > 0)) | (
(np.max(eod_mt_local) < 0) & (np.min(eod_mt_local) < 0)):
range_l = (np.abs(np.max(eod_mt_local)) - np.abs(np.min(eod_mt_local))) * 0.5
range_g = (np.abs(np.max(eod_mt_global)) - np.abs(np.min(eod_mt_global))) * 0.5
else:
range_l = (np.abs(np.max(eod_mt_local)) + np.abs(np.min(eod_mt_local))) * 0.5
range_g = (np.abs(np.max(eod_mt_global)) + np.abs(np.min(eod_mt_global))) * 0.5
#########
# hier berechne ich erst die amplitude der localen EOD ohne die modulation
reduce_am = (np.max(extracted) - np.min(extracted))
range_l_reduced = range_l - reduce_am
# amp_provided_prev = std_here / range_l
#### Dann schauen ich mir dei AM std der localen EOD an
std_here = np.std(extracted)
amp_provided = np.std(extracted / range_l_reduced)
# amp_provided = std_here/range_l_reduced
#### std of the signal
eod_interp_orig, time_wn_cut, _ = load_noise(file_name_save, mt, m, duration_given=duration_given)
eod_interp_orig = interpolate(time_wn_cut, eod_interp_orig,
time_array,
kind='cubic')
std_orig_loaded = np.std(eod_interp_orig)
#### scaled by the signal
contrast_skale = c / 100
eod_interp0 = eod_interp_orig * contrast_skale
std_loaded = np.std(eod_interp0)
#####
# note das ist also richtig
eod_interp1 = (eod_interp_orig / std_orig_loaded) * contrast_skale # (contrast_skale / std_orig_loaded )
std_loaded_potential = np.std(eod_interp1)
print(' c=' + str(c) + ' sd purified=' + str(np.round(std_loaded_potential * 100, 1)) + ' LocalEODamp=' + str(
np.round(amp_provided * 100, 1)) + ' contrast multiplied=' + str(
np.round(std_loaded * 100, 1)) + ' sd loaded=' + str(np.round(std_orig_loaded * 100, 1)))
# embed()
testtest = False
if testtest:
dirs = os.listdir('calc_RAM/')
for file_name_here in dirs:
if '.dat' in file_name_here:
eod_interp_orig, time_wn_cut, _ = load_noise(file_name_here.split('.dat')[0], mt, m,
duration_given=duration_given)
print(file_name_here + ' std ' + str(np.std(eod_interp_orig)))
# eodf_size = np.max(eod_mt_global) - np.min(eod_mt_global)
# sstim_presaved = sstim_presaved * eodf_size * c / 100
# embed()
# time_wn = np.arange(0,time_wn_cut[-1],1/sampling)
return deltat, eod_interp, eodf_size, sampling, time_array
def load_noise(file_name, mt=None, m=None, duration_given=None, ):
# tag_data = b.tags[noise_names[0]]
# try:
# noise_path = tag_data.metadata.sections[0].sections[1].props['file'].values[0] # .value
# except:
# noise_path = tag_data.metadata.sections[0].sections[1].props['file'].values[0]
# print('noise load problem')
# embed()
# noise_data_name = noise_path.rsplit('/', 1)[1]
# file_name = 'InputArr_400hz_30s'
noise_data_name = file_name # 'gwn300Hz50s0.3'
stimulus_wn = []
time_wn = []
# try:
# if not os.path.exists(load_folder_name('calc_RAM') + '/' + noise_data_name + '.dat'):
# print('open problems: '+load_folder_name('calc_RAM') + '/' + noise_data_name + '.dat')
# embed()
# start_vals = resave_small_files(noise_data_name + '.dat', load_folder='calc_RAM') # load_folder_name(
version_comp, subfolder, mod_name_slash, mod_name, subfolder_path = find_code_vs_not()
if version_comp == 'public':
start = ''
else:
start = load_folder_name('calc_RAM') + '/'
for line in open(start + noise_data_name + '.dat'): # +
if not line.startswith("#") and line != "\n":
stimulus_wn.append(float(line.rstrip("\n").split()[1]))
time_wn.append(float(line.rstrip("\n").split()[0]))
# except:
# print('not working')
# embed()
if duration_given:
duration = duration_given
else:
if not mt:
try:
if 'Hz' in file_name:
duration = float(file_name.split('Hz')[1].split('s')[0])
else:
duration = float(file_name.split('hz')[1].split('s')[0].replace('_', ''))
except:
print('duration thing')
embed()
else:
duration = mt.extents[:][m]
# embed()
try:
time_wn_cut = np.array(time_wn)[np.array(time_wn) < duration]
eod_mt = np.array(stimulus_wn)[np.array(time_wn) < duration]
except:
print('time eod')
embed()
sampling = 1 / np.diff(time_wn_cut)[0]
return eod_mt, time_wn_cut, sampling
def link_arrays(b, first=0, second=0, minus_spikes=0, load_eod_array='LocalEOD-1'):
try:
sp_data_array = b.data_arrays['Spikes-1']
except:
sp_data_array = b.data_arrays['spikes-1']
try:
spikes_mt = sp_data_array.get_slice([first], [second], nix.DataSliceMode.Data)[:] - minus_spikes
except:
print('link array problem')
try:
spikes = sp_data_array[:]
except:
# embed()
# print('spikes inflated, therefore cut them')
spikes = sp_data_array.data[
0:-688] # todo: das komische es passiert genau hier: 0:-688, keinen Punkt davor oder danach
# try:
spikes_mt = spikes[(spikes > first) & (spikes < second + first)] - minus_spikes
# embed()
# except:
# embed()
try:
eod_data_array = b.data_arrays[load_eod_array]
except:
embed()
print('local eod problem')
test = False
if test:
test_eod3()
sampling = len(eod_data_array.get_slice([0], [1], nix.DataSliceMode.Data)[:])
# sampling = len(eod_data_array)/b.metadata['Recording']['Recording duration']
# v1_data_array = b.data_arrays['V-1']
try:
# eod_mt = eod_data_array.get_slice([first/2], [second], nix.DataSliceMode.Data)[:]
eod_mt = eod_data_array.get_slice([first], [second], nix.DataSliceMode.Data)[:]
except:
# time_whole = np.arange(0, len(eod_data_array[:]) / 40000, 1 / 40000)
time_whole = np.arange(0, len(eod_data_array[:]) / sampling, 1 / sampling)
eod_mt = eod_data_array[(time_whole >= first) & (time_whole < first + second)]
if len(eod_mt) < 1:
# sampling = len(eod_data_array.get_slice([0], [1], nix.DataSliceMode.Data)[:])
time_whole = np.arange(0, len(eod_data_array[:]) / sampling, 1 / sampling)
eod_mt = eod_data_array[(time_whole >= first) & (time_whole < first + second)]
test = False
if test:
test_eod4()
# embed()
return eod_mt, spikes_mt, sampling
def find_mt_all(b):
all_mt_names = []
for trials in b.multi_tags:
all_mt_names.append(trials.name)
return all_mt_names
def setting_overview_score(frame, cell_type_here=' P-unit', burst_lim=-99, snippets='snippets',
min_amp='range', only_base_data=True, f_exclude=True, snippet=20, bigger='bigger',
species=' Apteronotus leptorhynchus'):
if f_exclude:
fn_exlude = file_names_to_exclude()
# frame = frame_load
if cell_type_here != '':
file_cell0 = frame[(frame.cell_type_reclassified == cell_type_here)]
else:
file_cell0 = frame
# hier nehme wir nur Zellen die auch die Baseline aufgenommen haben (2 Zellen fallen nicht drunter)
if only_base_data:
if 'cv_base' in file_cell0.keys():
file_cell1 = file_cell0[~np.isnan(file_cell0.cv_base)]
else:
file_cell1 = file_cell0
# print(str(len(file_cell1))+str(len(file_cell0)))
else:
file_cell1 = file_cell0
# das sind die default einstellungen
###################################################
if f_exclude:
file_cell = file_cell1[(~file_cell1.file_name.isin(fn_exlude))]
else:
file_cell = file_cell1
# burst restriction choice
if burst_lim != -99:
if bigger == 'bigger': # ( ~file_cell.file_name.isin(fn_exlude)) &
file_here = file_cell[(file_cell.burst_factor_burst_corr_individual > burst_lim)]
else:
file_here = file_cell[(file_cell.burst_factor_burst_corr_individual <= burst_lim)]
else: # (~file_cell.file_name.isin(fn_exlude)) &
try:
file_here = file_cell # [(~file_cell.file_name.isin(fn_exlude))]
except:
print('length thing2')
embed()
###########################################
# species choice
if species == ' Apteronotus leptorhynchus': # np.isnan(species):
frame_load = file_here[~file_here['species'].isin(
[' Eigenmannia virescens', ' Apteronotus albifrons'])] # ' Apteronotus leptorhynchus',
elif species == '':
frame_load = file_here
else:
frame_load = file_here[file_here.species == species]
if snippet:
frame_loaded = frame_load[(frame_load[snippets] == snippet)]
else:
frame_loaded = frame_load
# grouped_cell = frame_loaded.groupby(['cell'])
# grouped = frame_loaded.groupby(['cell', 'file_name'])
# cells_several = grouped_cell.cell[np.array(grouped_cell.count().file_name > 1)]
# grouped.count('cell')
# embed()
# wir nehmen immer das mit dem höheren cut_off
if f_exclude:
frame_loaded = fname_with_highest_cutoff(frame_loaded)
# embed()
if min_amp == 'min':
frame_loaded = fname_with_smalles_amp(frame_loaded)
elif min_amp == 'range':
frame_loaded = fname_with_range(frame_loaded)
# embed()
test = False
if test:
frame_test = file_cell0[file_cell0.cell == '2018-08-24-af-invivo-1']
frame_test0 = frame[frame.cell == '2018-08-24-af-invivo-1']
# if min_amp:
# frame_grouped = file_here.groupby('cell')
# frame_grouped.amp
return frame_loaded
def fname_amp_diff(cv_base, frame_loaded, score):
file_name1 = True
cut_offs_all = []
drops = 0
try:
frame_loaded = frame_loaded.reset_index()
except:
print('no reset necessary')
fil = []
fil_true = []
cvs = []
resp_diffs = []
score_diffs = []
cells = []
for c, cell in enumerate(np.array(np.unique(frame_loaded.cell))):
# embed()
resp = frame_loaded[frame_loaded.cell == cell].response_modulation
if len(resp) > 1:
resp_min = np.min(frame_loaded[frame_loaded.cell == cell].response_modulation)
resp_max = np.max(frame_loaded[frame_loaded.cell == cell].response_modulation)
try:
min_i = resp.index[np.argmin(resp)]
max_i = resp.index[np.argmax(resp)]
except:
print('min something')
embed()
score_diff = frame_loaded.loc[max_i, score] - frame_loaded.loc[min_i, score]
score_diffs.append(score_diff)
resp_diff = resp_max - resp_min
resp_diffs.append(resp_diff)
cv = frame_loaded.loc[max_i, cv_base]
cvs.append(cv)
cells.append(cell)
return cvs, resp_diffs, score_diffs, cells
def fname_with_range(frame_loaded):
file_name1 = True
cut_offs_all = []
drops = 0
try:
frame_loaded = frame_loaded.reset_index()
except:
print('no reset necessary')
# embed()#frame_loaded['max(diag5Hz)/med_diagonal_proj_fr']
if 'level_0' in frame_loaded.keys():
frame_loaded = frame_loaded.dropna(axis=1, how='all')
frame_loaded = frame_loaded.drop('level_0', axis=1)
frame_loaded = frame_loaded.drop('index', axis=1)
frame_loaded = frame_loaded.drop_duplicates()
# dublicates
fil = []
fil_true = []
for c, cell in enumerate(np.array(np.unique(frame_loaded.cell))):
fil.append(np.unique(frame_loaded[frame_loaded.cell == cell].amp))
if len(fil[-1]) > 1:
fil_true.append(True)
cut_offs = []
argmin = np.argmin(fil[-1])
argmax = np.argmax(fil[-1])
file_to_cick = fil[-1][argmin]
file_to_cickmax = fil[-1][argmax]
file_cell = frame_loaded[frame_loaded.cell == cell]
# if len(file_cell) > 2:
# print('double cells')
# embed()
drop_idx = file_cell[(file_cell.amp != file_to_cick) & (file_cell.amp != file_to_cickmax)].index # [arg]
drops += len(drop_idx)
# embed()
frame_loaded = frame_loaded.drop(drop_idx)
else:
fil_true.append(False)
# (frame_file.amp < 9)
return frame_loaded
def fname_with_smalles_amp(frame_loaded):
file_name1 = True
cut_offs_all = []
drops = 0
try:
frame_loaded = frame_loaded.reset_index()
except:
print('no reset necessary')
fil = []
fil_true = []
for c, cell in enumerate(np.array(np.unique(frame_loaded.cell))):
fil.append(np.unique(frame_loaded[frame_loaded.cell == cell].amp))
if len(fil[-1]) > 1:
fil_true.append(True)
cut_offs = []
arg = np.argmin(fil[-1])
file_to_cick = fil[-1][arg]
file_cell = frame_loaded[frame_loaded.cell == cell]
drop_idx = file_cell[file_cell.amp != file_to_cick].index # [arg]
drops += len(drop_idx)
frame_loaded = frame_loaded.drop(drop_idx)
else:
fil_true.append(False)
# (frame_file.amp < 9)
return frame_loaded
def fname_with_highest_cutoff(frame_loaded):
# ATTENTION: Only valid if two fnames left, sort out BEFORE
file_name1 = True
cut_offs_all = []
drops = 0
# embed()
# todo: hier noch file_name auschließen
try:
frame_loaded = frame_loaded.reset_index()
except:
print('no reset necessary')
fil = []
fil_true = []
for c, cell in enumerate(np.array(np.unique(frame_loaded.cell))):
# cut_off_calc(frame_loaded.file_name)
fil.append(np.unique(frame_loaded[frame_loaded.cell == cell].file_name))
if len(fil[-1]) > 1:
fil_true.append(True)
cut_offs = []
for f in fil[-1]:
cut_off = calc_cut_offs(f)
cut_offs.append(cut_off)
if len(cut_offs) > 2:
print('to many cutoffs')
embed()
if len(np.unique(cut_offs)) == 1:
print('same cut off')
embed() # ok das muss man halt später debuggen falls das halt mal passiert
cut_offs_all.append(cut_offs)
arg = np.argmin(cut_offs)
# arg = np.argmax(cut_offs)
file_to_cick = fil[-1][arg]
# print('row')
# embed()
file_cell = frame_loaded[frame_loaded.cell == cell]
drop_idx = file_cell[file_cell.file_name == file_to_cick].index # [arg]
drops += len(drop_idx)
# if len(drop_idx)>1:
# print('drop thing')
# embed()
frame_loaded = frame_loaded.drop(drop_idx)
# frame_loaded.loc[drop_idx]
else:
fil_true.append(False)
# embed()
# (frame_file.amp < 9)
return frame_loaded
def find_restricted_tag(first, second, b, names_mt_gwn):
mts_all = find_mt_all(b)
all_positions = []
all_extends = []
names = []
for mt_name in mts_all:
# print(mt_name)
if (mt_name != names_mt_gwn) & ('init' not in mt_name):
mt_here = b.multi_tags[mt_name]
all_positions.extend(mt_here.positions[:])
all_extends.extend(mt_here.extents[:])
names.extend([mt_name] * len(mt_here.extents[:]))
try:
if len(np.shape(all_positions)) > 1:
all_positions = np.concatenate(all_positions)
all_extends = np.concatenate(all_extends)
except:
print('tags concat thing')
embed()
# first = mt.positions[:][-1]
# second = mt.extents[:][-1]
# other_tag = all_positions[(all_positions > first) & (all_positions < first + second)]
try:
other_tag = np.array(all_extends)[(all_positions > first) & (all_positions < first + second)]
except:
print('Tag stuff')
embed()
positions_chosen = np.array(all_positions)[(all_positions > first) & (all_positions < first + second)]
names_chosen = np.array(names)[(all_positions > first) & (all_positions < first + second)]
return positions_chosen
def crossSpectrum_response(isfs, osfs, append_stacks=True, restrict=None):
# embed()
time1 = time.time()
cross = np.zeros(len(isfs[0]), dtype=np.complex_)
count = 0
if restrict:
max_nr = restrict
else:
max_nr = len(isfs) * len(osfs) * 2
all_ri_spectra = []
all_rj_spectra = []
for i, isf in enumerate(isfs):
for j, osf in enumerate(osfs):
if count < max_nr:
if j < i:
if (not np.isnan(np.sum(isf))) & (not np.isnan(np.sum(osf))):
if append_stacks:
if count == 0:
all_ri_spectra = isf
all_rj_spectra = osf
else:
all_ri_spectra = np.vstack((all_ri_spectra, isf))
all_rj_spectra = np.vstack((all_rj_spectra, osf))
count += 1
else:
cross += isf.conj() * osf
count += 1
# print('append stacks '+str(time.time()-time1))# append stacks ist schneller (doppelt so schnell
return cross, count, all_ri_spectra, all_rj_spectra
def crossSpectrum(isfs, osfs, matrix=True):
if matrix:
cross = np.conj(isfs) * osfs
sum_here = np.sum(cross, axis=1)
cross = np.nansum(cross, axis=0)
count_final = np.sum(~np.isnan(sum_here))
else:
cross = np.zeros(len(isfs[0]), dtype=np.complex_)
count_final = 0
for i, isf in enumerate(isfs):
if (not np.isnan(np.sum(isf))) & (not np.isnan(np.sum(osfs[i]))):
cross += isf.conj() * osfs[i]
count_final += 1
return cross, count_final
def calc_coh(a_mi, p11s, p12s, p22s):
# das stimmt so (validiert)
coh = np.abs(p12s / a_mi) ** 2 / (p11s.real / a_mi) / (
p22s.real / a_mi)
return coh
def calc_r_r_coherences(cut_vals, length_array_isf, length_array_osf, nfft, simplified='simplified', mean=''):
#######################################
# response response coherence values
p22_rrs = np.zeros(int(nfft / 2 - 1), dtype=np.complex_)
p12_rrs = np.zeros(int(nfft / 2 - 1), dtype=np.complex_)
p12_rrs_mean = np.zeros(int(nfft / 2 - 1), dtype=np.complex_)
p22_rrs_mean = np.zeros(int(nfft / 2 - 1), dtype=np.complex_)
p12_rrs_restrict = np.zeros(int(nfft / 2 - 1), dtype=np.complex_)
##############################################
# response response
range_here = range(np.shape(length_array_osf)[0])
coh_resp_directs = np.zeros(int(nfft / 2 - 1))
mutual_information_resp_directs = np.zeros(int(nfft / 2 - 1))[cut_vals]
coh_resp_directs_restrict = np.zeros(int(nfft / 2 - 1))
mutual_information_resp_directs_restrict = np.zeros(int(nfft / 2 - 1))[cut_vals]
coh_s_directs = np.zeros(int(nfft / 2 - 1))
mutual_information_s_directs = np.zeros(int(nfft / 2 - 1))[cut_vals]
coh_resp_mean = np.zeros(int(nfft / 2 - 1))
mutual_information_resp_mean = np.zeros(int(nfft / 2 - 1))[cut_vals]
coh_resp_restrict = []
coh_resp_first = []
mutual_information_resp_first = []
mutual_information_resp_restrict = []
mean_numbers = 0
a_mir = 0
a_mir2 = 0
a_mir_restrict = 0
cont_direct = 0
cont_direct_r = 0
###############################
# all
######################################
# the all trials condition
if simplified == '':
# if calc_all:
# ok das wollen wir gar nicht haben
concat_vals = np.concatenate(length_array_osf, axis=0)
p12_rr_all, count, all_ri_spectra, all_rj_spectra = crossSpectrum_response(concat_vals, concat_vals)
p22_rr_all, final_count = crossSpectrum(concat_vals, concat_vals)
# p22,count_final = crossSpectrum(osfs, osfs)
coh_resp_all = calc_resp_direct(count, final_count, p12_rr_all, p22_rr_all)
coh_resp_all = []
test = False
if test:
# all_ri_spectra, all_rj_spectra, f = get_ri_spectra_stacks(dt, noverlap, nperseg, rates)
_, rr_gamma = rr_coherence(rates, nfft, nfft // 2, dt)
# embed()
# print(range_here)
for a in range_here:
##################################################
# same number
if 'direct' in mean:
#################################################
# direct im sinne der response respone coherence
p22, final_count = crossSpectrum(length_array_osf[a, :, :], length_array_osf[a, :, :])
p12, final_count = crossSpectrum(length_array_isf[a, :, :], length_array_osf[a, :, :])
p11, final_count = crossSpectrum(length_array_isf[a, :, :], length_array_isf[a, :, :])
coh_s_direct, cut_vals, _, mutual_information_s_direct = calc_coherence_and_mutual(final_count, [],
[], p11,
p12, p22, cut_vals)
#####################################
# p12_rr, count = crossSpectrum_response(osfs, osfs) expected
count, p12_rr_mean, p22_rr_mean = mean_spectra_calculations(length_array_osf[a, :, :])
mean_numbers = count
p22_rr, final_count = crossSpectrum(length_array_osf[a, :, :], length_array_osf[a, :, :])
coh_resp_mean_here = calc_coh(mean_numbers, p22_rr, p12_rr_mean, p22_rr_mean)
mutual_information_resp_mean_here = - np.log2(1 - coh_resp_mean_here[cut_vals])
count, p12_rr = get_p12(length_array_osf[a, :, :])
cont_coh = True
if len(np.unique(p22_rr.real)) == 1:
if np.unique(p22_rr.real) == 0:
cont_coh = False
if cont_coh:
mutual_information_s_directs += mutual_information_s_direct
coh_s_directs += coh_s_direct
coh_resp_mean += coh_resp_mean_here
mutual_information_resp_mean += mutual_information_resp_mean_here
coh_resp_direct = calc_resp_direct(count, final_count, p12_rr, p22_rr)
coh_resp_directs += coh_resp_direct
mutual_information_resp_directs += - np.log2(
1 - np.sqrt(coh_resp_direct[cut_vals])) # np.sum(* np.diff(freq)[0]
cont_direct += 1
# embed()
else:
# print(str(a)+' in RR')
######################################
# the all trials condition
count, p12_rr_mean, p22_rr_mean = mean_spectra_calculations(length_array_osf[a, :, :])
p22_rrs_mean += p22_rr_mean
mean_numbers += count
count, p12_rr = get_p12(length_array_osf[a, :, :])
print(count)
p12_rrs += p12_rr
p12_rrs_mean += p12_rr_mean
p22_rr, final_count = crossSpectrum(length_array_osf[a, :, :], length_array_osf[a, :, :])
p22_rrs += p22_rr
# if a == 0:
# das sollte schon stimmen count sollte len(length_array_osf[:, a, :])*len(length_array_osf[:, a, :])/2-len(length_array_osf[:, a, :])/2 sein und das stimm thier
a_mir += count
a_mir2 += final_count # len(length_array_osf[a, :, :])
if simplified == '':
p12_rr_restrict, count_r, all_ri_spectra, all_rj_spectra = crossSpectrum_response(length_array_osf[a, :, :],
length_array_osf[a, :, :],
restrict=len(
length_array_osf[a, :,
:]))
p12_rrs_restrict += p12_rr_restrict
a_mir_restrict += count_r
test = False
if test:
test_input()
# embed()
#
#################################################
# the first condition
if a == 0:
p12_rrs_first = p12_rr
p22_rrs_first = p22_rr
a_mir2_first = len(length_array_osf[a, :, :])
a_mir_first = count
# vielleicht muss ich hier einmal mitteln oder so?
#################################
# direct mean
# embed()
cont_coh = True
if len(np.unique(p22_rr.real)) == 1:
if np.unique(p22_rr.real) == 0:
cont_coh = False
if cont_coh:
coh_resp_direct_restrict = np.abs(p12_rr_restrict / count_r) ** 2 / (
p22_rr.real / len(length_array_osf[a, :, :])) ** 2
coh_resp_directs_restrict += coh_resp_direct_restrict
mutual_information_resp_directs_restrict += - np.log2(
1 - np.sqrt(coh_resp_direct_restrict[cut_vals])) # np.sum(* np.diff(freq)[0]
cont_direct_r += 1
# cont_direct += 1
# if a == 0:
# embed()
if simplified == '':
coh_resp_directs_restrict = coh_resp_directs_restrict / cont_direct_r
mutual_information_resp_directs_restrict = mutual_information_resp_directs_restrict / cont_direct_r
else:
coh_resp_directs_restrict = []
mutual_information_resp_directs_restrict = []
if 'direct' in mean:
coh_resp_directs = coh_resp_directs / cont_direct
mutual_information_resp_directs = mutual_information_resp_directs / cont_direct
coh_s_directs = coh_s_directs / cont_direct
mutual_information_s_directs = mutual_information_s_directs / cont_direct
else:
coh_resp_directs = []
mutual_information_resp_directs = []
coh_s_directs = []
mutual_information_s_directs = []
# embed()
if 'direct' not in mean:
#####################################
# p12_rr, count = crossSpectrum_response(osfs, osfs)
coh_resp, mutual_information_resp = coherence_and_mutual_response(a_mir, a_mir2,
cut_vals,
p12_rrs, p22_rrs)
#####################################
# p12_rr, count = crossSpectrum_response(osfs, osfs) expected
coh_resp_mean = calc_coh(mean_numbers, p22_rrs, p12_rrs_mean, p22_rrs_mean)
mutual_information_resp_mean = - np.log2(1 - coh_resp_mean[cut_vals])
else:
coh_resp = []
mutual_information_resp = []
# coh_resp_mean = []
# mutual_information_resp_mean = []
if simplified == '':
######################################
# alsways takin only the first nosie
coh_resp_first, mutual_information_resp_first = coherence_and_mutual_response(
a_mir_first,
a_mir2_first,
cut_vals,
p12_rrs_first,
p22_rrs_first) #
####################################
# gleiche Anzahl an snippets für CR und RR
coh_resp_restrict, mutual_information_resp_restrict = coherence_and_mutual_response(
a_mir_restrict, a_mir2, cut_vals, p12_rrs_restrict, p22_rrs)
return coh_resp_mean, mutual_information_resp_mean, p12_rrs, coh_resp_all, coh_s_directs, mutual_information_s_directs, coh_resp_directs_restrict, mutual_information_resp_directs_restrict, a_mir, a_mir2, coh_resp, coh_resp_directs, coh_resp_first, coh_resp_restrict, mutual_information_resp, mutual_information_resp_directs, mutual_information_resp_first, mutual_information_resp_restrict, range_here
def get_p12(length_array_osf):
append_stacks = False
try:
p12_rr, count, all_ri_spectra, all_rj_spectra = crossSpectrum_response(length_array_osf,
length_array_osf,
append_stacks=append_stacks)
except:
print('length somethings3')
embed() # aber hier ist jetzt append false schneller, warum auch immer
if append_stacks:
try:
p12_rr, count = crossSpectrum(all_ri_spectra, all_rj_spectra)
except:
print('not working')
embed()
return count, p12_rr
def mean_spectra_calculations(length_array_osf):
mean_spectrum = np.array([np.nanmean(length_array_osf, axis=0)] * len(length_array_osf))
# embed()
try:
p12_rr_mean, count = crossSpectrum(length_array_osf, mean_spectrum)
except:
print('length somethings2')
embed()
# das kann man eventuell auch schneller machen
# p22_rr_mean,_ = crossSpectrum(mean_spectrum, mean_spectrum)
p22_rr_mean, _ = crossSpectrum(np.array([mean_spectrum[0]]),
np.array([mean_spectrum[0]])) # *len(mean_spectrum)
p22_rr_mean = p22_rr_mean * len(mean_spectrum)
return count, p12_rr_mean, p22_rr_mean
def calc_resp_direct(count, final_count, p12_rr, p22_rr):
coh_resp_direct = np.abs(p12_rr / count) ** 2 / (p22_rr.real / final_count) ** 2
return coh_resp_direct
def coherence_and_mutual_response(a_mir, a_mir2, cut_vals, p12_rrs, p22_rrs):
coh_resp = np.abs(p12_rrs / a_mir) ** 2 / (p22_rrs.real / a_mir2) ** 2
mutual_information_resp = - np.log2(1 - np.sqrt(coh_resp[cut_vals])) # np.sum(* np.diff(freq)[0]
return coh_resp, mutual_information_resp
def calc_coherence_and_mutual(a_mi, cut_off, f_same, p11s, p12s, p22s, cut_vals=[]):
coh = calc_coh(a_mi, p11s, p12s, p22s)
if len(cut_vals) < 1:
f_new = f_same[f_same < cut_off]
cut_vals = f_same < cut_off
else:
f_new = f_same
mutual_information = - np.log2(1 - coh[cut_vals]) # np.sum(* np.diff(freq)[0]
return coh, cut_vals, f_new, mutual_information
def get_ri_spectra_stacks(dt, noverlap, nperseg, rates):
count = 0
for i in range(rates.shape[0]):
ri_segments = get_segments(rates[i, :], nperseg, noverlap)
f, ri_spectra = spectra(ri_segments, dt)
for j in range(rates.shape[0]):
if j >= i:
break
rj_segments = get_segments(rates[j, :], nperseg, noverlap)
_, rj_spectra = spectra(rj_segments, dt)
if count == 0:
all_ri_spectra = ri_spectra
all_rj_spectra = rj_spectra
else:
all_ri_spectra = np.vstack((all_ri_spectra, ri_spectra))
all_rj_spectra = np.vstack((all_rj_spectra, rj_spectra))
count += 1
return all_ri_spectra, all_rj_spectra, f
def get_segments(x, nperseg, noverlap):
indices = np.arange(nperseg, len(x), nperseg)
overlap_indices = np.arange(nperseg + noverlap, len(x), nperseg + noverlap) # + noverlap das war ein Fehler
indices = np.arange(0, len(x), nperseg)
overlap_indices = np.arange(0 + noverlap, len(x), nperseg) # + noverlap das war ein Fehler
hann = np.hanning(nperseg)
segments = np.array_split(x, indices)
overlap_segments = np.array_split(x, overlap_indices)
valid_segments = [(s - np.mean(s)) * hann for s in segments if len(s) == nperseg]
test = False
if test:
test_valid()
# valid_segments = [s for s in segments if len(s) == nperseg]
# todo: alse denn overlapp machts nicht
valid_overlaps = [(s - np.mean(s)) * hann for s in overlap_segments if len(s) == nperseg]
# valid_overlaps = [(s - np.mean(s)) for s in overlap_segments if len(s) == nperseg]
# valid_overlaps = [s for s in overlap_segments if len(s) == nperseg]
# embed()
valid_segments.extend(valid_overlaps)
all_segs = np.array(valid_segments)
return all_segs
def spectra(segments, dt, norm='forward'):
# ok das mit worward ist richtig
f = np.fft.fftfreq(segments[0].size, d=dt)
specs = []
for s in segments:
specs.append(np.fft.fft(s, norm=norm))
return f, np.array(specs)
def cross_spectrum(stims, rates):
scrs = np.zeros_like(stims)
srcs = np.zeros_like(rates)
for i, (s, r) in enumerate(zip(stims, rates)):
if (not np.isnan(np.sum(s))) & (not np.isnan(np.sum(r))):
scrs[i, :] = np.conjugate(s) * r
srcs[i, :] = s * np.conjugate(r)
else:
scrs = np.delete(scrs, (i), axis=0)
srcs = np.delete(srcs, (i), axis=0)
# ok die zwei sind die gleichen
csd = np.abs(np.mean(srcs, axis=0) * np.mean(scrs, axis=0))
test = False
if test:
csd = np.abs(np.mean(srcs, axis=0) * np.mean(scrs, axis=0))
csd2 = np.abs(np.mean(srcs, axis=0) ** 2)
return csd
def auto_spectrum(spectra):
scs = np.zeros_like(spectra, dtype=np.complex_) # ok das mit dem complex macht hier keinen Uterschied
for i, s in enumerate(spectra):
if (not np.isnan(np.sum(s))):
scs[i, :] = np.conjugate(s) * s
else:
scs = np.delete(scs, (i), axis=0)
# ja wobei doch das sollte schon passen
asd = np.abs(np.mean(scs, axis=0))
return asd
def rr_coherence(rates, nperseg, noverlap, dt):
all_ri_spectra, all_rj_spectra, f = get_ri_spectra_stacks(dt, noverlap, nperseg, rates)
csd = cross_spectrum(all_ri_spectra, all_rj_spectra)
all_ri_spectra, f = get_ri_auto_spectra_stack(all_ri_spectra, dt, f, noverlap, nperseg, rates)
asd = auto_spectrum(all_ri_spectra) ** 2
coh = csd / asd
return f[f >= 0], np.sqrt(coh[f >= 0])
def get_ri_auto_spectra_stack(all_ri_spectra, dt, f, noverlap, nperseg, rates):
for i in range(rates.shape[0]):
ri_segments = get_segments(rates[i, :], nperseg, noverlap)
f, ri_spectra = spectra(ri_segments, dt)
if i == 0:
all_ri_spectra = ri_spectra
else:
all_ri_spectra = np.vstack((all_ri_spectra, ri_spectra))
return all_ri_spectra, f
def tag2(fig=None, axes=None, xoffs=None, yoffs=None,
labels=None, minor_label=None, major_index=None,
minor_index=None, **kwargs):
"""Tag each axes with a label.
Labels are left/top aligned.
Parameters
----------
fig: matplotlib figure
If None take figure from first element in `axes`.
axes: None or matplotlib axes or int or list of matplotlib axes or int
If None label all axes of the figure.
Integers in the list are indices to the axes of the figure.
For axes in the out list, `labels` is used for tagging,
for axes in (optional) inner lists, `minor_label` is used.
xoffs: float, 'auto', or None
X-coordinate of label relative to origin of axis in multiples of the width
of a character (simply 60% of the current font size).
If 'auto' and this is the first call of this function on the figure,
set it to the distance of the right-most axis to the left figure border,
otherwise use the value computed by the first call.
If None take value from `mpl.rcParams['figure.tags.xoffs']`.
yoffs: float, 'auto', or None
Y-coordinate of label relative to top end of left yaxis in multiples
of the height of a character (the current font size).
If 'auto' and this is the first call of this function on the figure,
set it to the distance of the top-most axis to the top figure border,
otherwise use the value computed by the first call.
If None take value from `mpl.rcParams['figure.tags.yoffs']`.
labels: string or list of strings
If string, then replace formatting substrings
'%A', '%a', '%1', '%i', and '%I' to generate labels for each axes in the outer list.
- '%A': A B C ...
- '%a': a b c ...
- '%1': 1 2 3 ...
- '%i': i ii iii iv ...
- '%I': I II III IV ...
Subsequent calls to `tag()` keep incrementing the label.
With a list arbitary labels can be specified.
If None, set to `mpl.rcParams['figure.tags.label']`.
minor_label: string
If `axes` is a nested list of axes, then for the inner lists
`minor_label` is used for formatting the axes label.
Formatting substrings '%A', '%a', '%1', '%i', and '%I' are replaced
by the corresponding tags for the outer list, '%mA', '%ma', '%m1', '%mi',
and '%mI' are replaced by the equivalently formatted tags for the inner list.
See `labels` for meaning of the formatting substrings.
If None, set to `mpl.rcParams['figure.tags.minorlabel']`.
major_index: int or None
Start labeling major axes with this index (0 = 'A').
If None, use last index from previous call to `tag()`.
minor_index: int or None
Start labeling minor axes with this index (0 = 'A').
If None, start with 0.
kwargs: dict
Key-word arguments are passed on to ax.text() for formatting the tag label.
Overrides settings in `mpl.rcParams['figure.tags.font']`.
"""
if fig is None:
fig = axes[0].get_figure()
if axes is None:
axes = fig.get_axes()
if not isinstance(axes, (list, tuple, np.ndarray)):
axes = [axes]
if labels is None:
labels = mpl.rcParams['figure.tags.label']
if minor_label is None:
minor_label = mpl.rcParams['figure.tags.minorlabel']
if not isinstance(labels, (list, tuple, np.ndarray)):
# generate labels:
romans_lower = ['i', 'ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix', 'x',
'xi', 'xii', 'xiii']
romans_upper = [r.upper() for r in romans_lower]
if major_index is None:
if hasattr(fig, 'tags_major_index'):
major_index = fig.tags_major_index
else:
major_index = 0
if minor_index is None:
minor_index = 0
label_list = []
for k, axs in enumerate(axes):
if isinstance(axs, (list, tuple, np.ndarray)):
for j in range(len(axs)):
mlabel = str(minor_label) if minor_label else str(lables)
mlabel = mlabel.replace('%a', chr(ord('a') + major_index + k))
mlabel = mlabel.replace('%A', chr(ord('A') + major_index + k))
mlabel = mlabel.replace('%1', chr(ord('1') + major_index + k))
mlabel = mlabel.replace('%i', romans_lower[major_index + k])
mlabel = mlabel.replace('%I', romans_upper[major_index + k])
mlabel = mlabel.replace('%ma', chr(ord('a') + minor_index + j))
mlabel = mlabel.replace('%mA', chr(ord('A') + minor_index + j))
mlabel = mlabel.replace('%m1', chr(ord('1') + minor_index + j))
mlabel = mlabel.replace('%mi', romans_lower[minor_index + j])
mlabel = mlabel.replace('%mI', romans_upper[minor_index + j])
label_list.append(mlabel)
minor_index = 0
else:
label = labels.replace('%a', chr(ord('a') + major_index + k))
label = label.replace('%A', chr(ord('A') + major_index + k))
label = label.replace('%1', chr(ord('1') + major_index + k))
label = label.replace('%i', romans_lower[major_index + k])
label = label.replace('%I', romans_upper[major_index + k])
label_list.append(label)
fig.tags_major_index = major_index + len(axes)
else:
label_list = labels
# flatten axes:
axes_list = []
for axs in axes:
if isinstance(axs, (list, tuple, np.ndarray)):
axes_list.extend(axs)
else:
axes_list.append(axs)
# font settings:
fkwargs = dict(**mpl.rcParams['figure.tags.font'])
fkwargs.update(**kwargs)
# get axes offsets:
xo = -1.0
yo = 1.0
for ax, l in zip(axes_list, label_list):
if isinstance(ax, int):
ax = fig.get_axes()[ax]
x0, y0, width, height = ax.get_position(original=True).bounds
if x0 <= -xo:
xo = -x0
if 1.0 - y0 - height < yo:
yo = 1.0 - y0 - height
# get figure size in pixel:
w, h = fig.get_window_extent().bounds[2:]
ppi = 72.0 # points per inch:
fs = mpl.rcParams['font.size'] * fig.dpi / ppi
# compute offsets:
if xoffs is None:
xoffs = mpl.rcParams['figure.tags.xoffs']
if yoffs is None:
yoffs = mpl.rcParams['figure.tags.yoffs']
if xoffs == 'auto':
if hasattr(fig, 'tags_xoffs'):
xoffs = fig.tags_xoffs
else:
xoffs = xo
xoffs = [xoffs] * len(axes_list)
if len(axes_list) != len(xoffs):
xoffs = xo
xoffs = [xoffs] * len(axes_list)
elif (type(xoffs) != float) & (type(xoffs) != int):
xoffs = np.array(xoffs)
try:
xoffs *= 0.6 * fs / w
except:
xoffs = xoffs * (0.6 * fs / w)
missing = len(axes_list) / len(xoffs)
xoffs = list(xoffs)
for r in range(int(missing) - 1):
xoffs.extend(xoffs)
else:
xoffs *= 0.6 * fs / w
xoffs = [xoffs] * len(axes_list)
if yoffs == 'auto':
if hasattr(fig, 'tags_yoffs'):
yoffs = fig.tags_yoffs
else:
yoffs = yo - 1.0 / h # minus one pixel
yoffs = [yoffs] * len(axes_list)
if len(axes_list) != len(yoffs):
yoffs = yo - 1.0 / h # minus one pixel
yoffs = [yoffs] * len(axes_list)
#embed()
elif (type(yoffs) != float) & (type(yoffs) != int):
yoffs = np.array(yoffs)
try:
yoffs *= 0.6 * fs / w
except:
yoffs = yoffs * (0.6 * fs / w)
print('same kind something')
# embed()
missing = len(axes_list) / len(yoffs)
yoffs = list(yoffs)
for r in range(int(missing) - 1):
yoffs.extend(yoffs)
else:
yoffs *= fs / h
yoffs = [yoffs] * len(axes_list)
# embed()
fig.tags_xoffs = xoffs
fig.tags_yoffs = yoffs
# put labels onto axes:
count = 0
for ax, l in zip(axes_list, label_list):
if isinstance(ax, int):
ax = fig.get_axes()[ax]
x0, y0, width, height = ax.get_position(original=True).bounds
x = x0 + xoffs[count]
if x <= 0.0:
x = 0.0
try:
y = y0 + height + yoffs[count]
except:
print('y something')
embed()
#try:
if y >= 1.0:
y = 1.0
#except:
# print('y something')
# embed()
ax.text(x, y, l, transform=fig.transFigure, ha='left', va='top', **fkwargs)
count += 1
def update_ssh_file(save_names, creation_time_update=False, size_update=True):
hostname, password, root, username = credencials0()
save_name_here = save_names
ssh = paramiko.SSHClient()
# username = 'rudnaya'
# ssh.connect(server, 22, hostname = server)#username=username, password=password)
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# embed()
try:
ssh.connect(hostname=hostname, port=22, username=username, password=password)
# sftp.lstat(self, path)
sftp = ssh.open_sftp()
if 'cells' in save_name_here:
root = root.replace('code', '')
root_file = root + save_name_here.replace('..', '')
try:
info = sftp.stat(root + save_name_here.replace('..', ''))
cont = True
except:
cont = False
print('doesnt exist on the other pc')
# embed()
if cont:
cont2 = False
# check which size is bigger
if size_update:
if os.path.exists(save_name_here):
size_here, size_there = check_sizes(root, save_name_here, sftp)
if size_there > size_here:
cont2 = True
else:
cont2 = False
else:
cont2 = True
###############################
if creation_time_update:
# chekc the creatin time
file1_last_modified = os.path.getmtime(save_name_here)
# last_modified_date = datetime.datetime.fromtimestamp(mtime)
file2_stat = sftp.stat(root + save_name_here.replace('..', ''))
file2_last_modified = file2_stat.st_mtime # datetime.datetime.fromtimestamp(file2_stat.st_mtime)
if file2_last_modified > file1_last_modified:
cont2 = True
# except:
# print('size something')
# embed()
dated_up = 'no'
if cont2:
# sftp.put(save_name_here, root)
if 'cells' in save_name_here:
new_dir_required = save_name_here.replace('/' + save_name_here.split('/')[-1], '')
if not os.path.exists(new_dir_required):
os.makedirs(new_dir_required, exist_ok=True)
sftp.get(root_file, save_name_here)
size_here, size_there = check_sizes(root, save_name_here, sftp)
dated_up = 'yes'
print('dated up')
sftp.close()
ssh.close()
else:
dated_up = 'recalc'
except:
print('connection not possible')
dated_up = 'recalc'
return dated_up
def check_sizes(root, save_name_here, sftp):
info = sftp.stat(root + save_name_here.replace('..', ''))
size_there = info.st_size
file_stats = os.stat(save_name_here)
size_here = file_stats.st_size
return size_here, size_there
def f_pi_core():
return '$A(f_{' + vary_val() + '})$ in ' + onebeat_cond() + ' $ \Delta f_{' + vary_val() + '}$'
def f_eod_pi_core():
return '$A(f_{EOD})$ in ' + onebeat_cond() + ' $ \Delta f_{' + vary_val() + '}$'
def DF_pi_core():
return '$A(\Delta f_{' + vary_val() + '})$ in ' + onebeat_cond() + ' $ \Delta f_{' + vary_val() + '}$'
# one-beat condition One-beat condition
def onebeat_cond(big=False, double=False, cond=True):
if cond == True:
if big:
val = 'One-beat condition'
else:
val = 'one-beat condition'
if double:
val += ':'
else:
if big:
val = 'One beat'
else:
val = 'one beat'
if double:
val += ':'
return val
def c_stable_name():
return 'c_{2}'
def stable_val():
return '2'
def vary_val():
return '1'
def set_xlabel_arrow_core(ax, val, xpos=1.05, ypos=-0.35, color='black'):
ax.text(xpos, ypos, val, ha='center', va='center',
transform=ax.transAxes, color=color)
def noise_name():
return 'Noise split'
def cv_base_name():
return 'CV$_{Base}$'
def load_spikes(spikes, eod_fr, ms_factor=1000, original=False, eodfs=None, eodfs_orig=None):
# habe die jetzt aus der beschreibung rausgenommen: hoffe es klappt:
# spikes_all = [], hists = [], frs_calc = []
spikes_all = []
isi = []
# hists2 = []
frs_calc = []
cont = True
length_sp = np.shape(spikes)
try:
if (len(length_sp) > 0):
if (np.min(length_sp) != 0):
# if len(length_sp)> 0:
# wir machen das schritt für schritt bis jetzt wissen wir dass die Länge nicht 0 ist
if (len(length_sp) == 1):
for sp in range(len(spikes)):
if len(spikes[sp]) > 2:
spikes_all.append(spikes[sp])
if original:
isi.append(calc_isi_base(np.array(eodfs), np.array(eodfs_orig), sp, spikes))
else:
isi.append((np.diff(spikes[sp]) / ms_factor) / (1 / eod_fr))
frs_calc.append(len(spikes[sp]) / (spikes[sp][-1] / ms_factor))
# jetzt wissen wir dass die Länge größer 1 ist
elif ((len(length_sp) == 2) & ((type(spikes[0][0]) == float)) | (type(spikes[0][0]) == np.float64) | (
type(spikes[0][0]) == np.float32)):
for sp in range(len(spikes)):
if len(spikes[sp]) > 2:
spikes_all.append(spikes[sp])
if original:
isi.append(calc_isi_base(np.array(eodfs), np.array(eodfs_orig), sp, spikes))
else:
isi.append((np.diff(spikes[sp]) / ms_factor) / (1 / eod_fr))
frs_calc.append(len(spikes[sp]) / (spikes[sp][-1] / ms_factor))
# jetzt wissen wir dass die Länge größer 1 ist und dass das nicht direkt ein float ist
elif ((len(length_sp) == 2) & (type(spikes[0][0]) != float)):
# try:
for sp in range(len(spikes[0])):
if len(spikes[0][sp]) > 2:
spikes_all.append(spikes[0][sp])
if original:
isi.append(calc_isi_base(np.array(eodfs)[0], np.array(eodfs_orig)[0], sp, spikes[0]))
else:
try:
isi.append((np.diff(spikes[0][sp]) / ms_factor) / (1 / eod_fr))
except:
print('hist problem')
embed()
frs_calc.append(len(spikes[0][sp]) / (spikes[0][sp][-1] / ms_factor))
# jetzt wissen wir dass die Länge größer 2 ist
elif ((len(length_sp) == 3) & (
(type(spikes[0][0][0]) == float) | (type(spikes[0][0][0]) == np.float64) | (
type(spikes[0][0][0]) == np.float32))):
for sp in range(len(spikes[0])):
if len(spikes[0][sp]) > 2:
spikes_all.append(spikes[0][sp])
if original:
isi.append(calc_isi_base(np.array(eodfs)[0], np.array(eodfs_orig)[0], sp, spikes[0]))
else:
try:
isi.append((np.diff(spikes[0][sp]) / ms_factor) / (1 / eod_fr))
except:
print('hist problem')
embed()
frs_calc.append(len(spikes[0][sp]) / (spikes[0][sp][-1] / ms_factor))
elif len(length_sp) == 3:
try:
for sp in range(len(spikes[0][0])):
spikes_all.append(spikes[0][0][sp])
if original:
isi.append(
calc_isi_base(np.array(eodfs)[0][0], np.array(eodfs_orig)[0][0], sp, spikes[0][0]))
else:
try:
isi.append((np.diff(spikes[0][0][sp]) / ms_factor) / (1 / eod_fr))
except:
print('hist problem')
embed()
frs_calc.append(len(spikes[0][0][sp]) / (spikes[0][0][sp][-1] / ms_factor))
except:
for sp in range(len(spikes[0])):
spikes_all.append(spikes[0][sp])
if original:
isi.append(calc_isi_base(np.array(eodfs)[0], np.array(eodfs_orig)[0], sp, spikes[0]))
else:
try:
isi.append((np.diff(spikes[0][sp]) / ms_factor) / (1 / eod_fr))
except:
print('hist problem')
embed()
frs_calc.append(len(spikes[0][sp]) / (spikes[0][sp][-1] / ms_factor))
else:
if np.isnan(spikes):
cont = False
else:
print('sp problem')
embed()
# embed()
else:
spikes_all = []
isi = []
frs_calc = []
cont = False
else:
spikes_all = []
isi = []
frs_calc = []
cont = False
except:
print('tuple index problems')
embed()
# if len(spikes_all)< 1:
# print('spike len small')
# embed()
return spikes_all, isi, frs_calc, cont
def calc_isi_base(eod_fr_new, eod_fr_orig, s, spikes):
try:
isi = np.diff((np.array(spikes[s]) / 1000) / (1 / eod_fr_new[s])) # (frs[s], cvs[s])
except:
if eod_fr_orig[s] == 0:
div = np.max(eod_fr_orig)
else:
div = eod_fr_orig[s]
try:
isi = np.diff((np.array(spikes[s]) / 1000) / (1 / div))
except:
print('isi problem')
embed()
return isi
def dict_default(dict_new=None, original_dict={'key': 1}):
# dictionary was ich als default dictionary verwende um meine localen variablen
# manchmal zu überschreiben
if dict_new:
original_dict.update(dict_new)
return original_dict
def default_ticks_talks():
plt.rcParams['figure.facecolor'] = 'none'
plt.rcParams['font.size'] = 22
plt.rcParams['axes.titlesize'] = 24
plt.rcParams['axes.labelsize'] = 22
#plt.rcParams['axes.labelsize'] = 22
plt.rcParams['figure.dpi'] = 300
def cm_to_inch(val):
return val / 2.54