diff --git a/ampullary.py b/ampullary.py index 55a6060..15444f2 100644 --- a/ampullary.py +++ b/ampullary.py @@ -1,5 +1,4 @@ - -from threefish.utils0 import p_units_to_show, update_cell_names +from threefish.core_cell_choice import p_units_to_show, update_cell_names from threefish.plot_suscept import ampullary_punit if __name__ == '__main__': diff --git a/cells_suscept.pdf b/cells_suscept.pdf index 5379007..a61c288 100644 Binary files a/cells_suscept.pdf and b/cells_suscept.pdf differ diff --git a/cells_suscept.png b/cells_suscept.png index 927e12b..1767ac6 100644 Binary files a/cells_suscept.png and b/cells_suscept.png differ diff --git a/cells_suscept.py b/cells_suscept.py index 823466a..d0bbfd4 100644 --- a/cells_suscept.py +++ b/cells_suscept.py @@ -1,5 +1,5 @@ -from threefish.utils0 import p_units_to_show -from threefish.utils1_suscept import fr_name_rm +from threefish.core_cell_choice import p_units_to_show +from threefish.core_plot_labels import label_fr_name_rm from threefish.plot_suscept import ampullary_punit if __name__ == '__main__': @@ -7,4 +7,4 @@ if __name__ == '__main__': #stack_file = pd.read_csv('..\calc_RAM\calc_nix_RAM-eod_2022-01-28-ag-invivo-1_all__amp_20.0_filename_InputArr_400hz_30_P-unitApteronotusleptorhynchus.csv') cells_plot2 = p_units_to_show(type_here = 'contrasts')#permuted = True, - ampullary_punit(eod_metrice = False, color_same = False,base_extra = True, fr_name = fr_name_rm(), cells_plot2=[cells_plot2[0]], isi_delta = 5, titles=[''],tags_individual = True, xlim_p = [0,1.15])#Low-CV P-unit, \ No newline at end of file + ampullary_punit(eod_metrice = False, color_same = False, base_extra = True, fr_name = label_fr_name_rm(), cells_plot2=[cells_plot2[0]], isi_delta = 5, titles=[''], tags_individual = True, xlim_p = [0, 1.15])#Low-CV P-unit, \ No newline at end of file diff --git a/cells_suscept_high_CV.pdf b/cells_suscept_high_CV.pdf index 459b216..e0bc6b7 100644 Binary files a/cells_suscept_high_CV.pdf and b/cells_suscept_high_CV.pdf differ diff --git a/cells_suscept_high_CV.py b/cells_suscept_high_CV.py index 09c9127..249a3a8 100644 --- a/cells_suscept_high_CV.py +++ b/cells_suscept_high_CV.py @@ -1,9 +1,9 @@ -from threefish.utils0 import p_units_to_show -from threefish.utils1_suscept import fr_name_rm +from threefish.core_cell_choice import p_units_to_show +from threefish.core_plot_labels import label_fr_name_rm from threefish.plot_suscept import ampullary_punit if __name__ == '__main__': cells_plot2 = p_units_to_show(type_here='contrasts')#permuted = True, - ampullary_punit(base_extra = True,eod_metrice = False, color_same = False, fr_name = fr_name_rm(), tags_individual = True, isi_delta = 5, cells_plot2=[cells_plot2[1]], titles=['', 'Ampullary cell,'],)#High-CV P-unit, \ No newline at end of file + ampullary_punit(base_extra = True, eod_metrice = False, color_same = False, fr_name = label_fr_name_rm(), tags_individual = True, isi_delta = 5, cells_plot2=[cells_plot2[1]], titles=['', 'Ampullary cell,'], )#High-CV P-unit, \ No newline at end of file diff --git a/data_overview_mod.pdf b/data_overview_mod.pdf index 8872e26..e9baa5c 100644 Binary files a/data_overview_mod.pdf and b/data_overview_mod.pdf differ diff --git a/data_overview_mod.py b/data_overview_mod.py index 91971fa..2dcc426 100644 --- a/data_overview_mod.py +++ b/data_overview_mod.py @@ -2,12 +2,17 @@ from IPython import embed from matplotlib import gridspec as gridspec, pyplot as plt import numpy as np -from threefish.utils0 import basename_small, colors_overview, default_figsize, exclude_nans_for_corr, get_grid_4, \ - scatter_with_marginals_colorcoded, update_cell_names, \ - load_overview_susept, \ - make_log_ticks, save_visualization, setting_overview_score, version_final -from threefish.core_plot_labels import pearson_label -from threefish.utils1_suscept import plt_specific_cells, stimname_small, NLI_scorename2_small, start_name +from threefish.core_cell_choice import update_cell_names +from threefish.core_filenames import version_final +from threefish.core_plot_suscept import get_grid_4, scatter_with_marginals_colorcoded +from threefish.defaults import default_figsize +from threefish.core_plot_colors import colors_overview +from threefish.core_reformat import exclude_nans_for_corr, load_overview_susept +from threefish.core_load import save_visualization +from threefish.core_bursts import setting_overview_score +from threefish.core_plot_labels import basename_small, label_NLI_scorename2_small, label_pearson, label_stimname_small, \ + make_log_ticks +from threefish.utils1_suscept import plt_specific_cells, start_name from scipy import stats try: from plotstyle import plot_style, spines_params @@ -126,14 +131,14 @@ def data_overview3(): var_item_names = [var_it,var_it,var_it2]#,var_it2]#['Response Modulation [Hz]',] var_types = ['response_modulation','response_modulation','']#,'']#'response_modulation' max_x = max_xs[c] - x_axis_names = ['CV$'+basename_small()+'$','CV$'+stimname_small()+'$','Response Modulation [Hz]']#$'+basename()+'$,'Fr$'+basename()+'$',] + x_axis_names = ['CV$' + basename_small() +'$','CV$' + label_stimname_small() + '$', 'Response Modulation [Hz]']#$'+basename()+'$,'Fr$'+basename()+'$',] #score = scores[0] score_n = ['Perc99/Med', 'Perc99/Med', 'Perc99/Med'] score = scores[c] scores_here = [score,score,score]#,score] score_name = ['max(diag5Hz)/med_diagonal_proj_fr','max(diag5Hz)/med_diagonal_proj_fr']#,'max(diag5Hz)/med_diagonal_proj_fr']#'Perc99/Med' score_name = ['Fr/Med', 'Fr/Med']#'Fr/Med'] # 'Perc99/Med' - score_name = [NLI_scorename2_small(), NLI_scorename2_small(), NLI_scorename2_small()]#NLI_scorename()] # 'Fr/Med''Perc99/Med' + score_name = [label_NLI_scorename2_small(), label_NLI_scorename2_small(), label_NLI_scorename2_small()]#NLI_scorename()] # 'Fr/Med''Perc99/Med' ax_j = [] axls = [] axss = [] @@ -246,7 +251,7 @@ def data_overview3(): print('frame file lost') embed() corr, p_value = stats.pearsonr(x_axis, y_axis) - pears_l = pearson_label(corr, p_value, y_axis, n=True) + pears_l = label_pearson(corr, p_value, y_axis, n=True) print(start_name(cell_type_here, species) + ' ' + corr_var + ' to '+str(score) + pears_l) #if corr_var == 'ser_first_base':# # embed() @@ -256,7 +261,7 @@ def data_overview3(): c_axis, x_axis, y_axis, exclude_here = exclude_nans_for_corr(frame_file, 'fr_base', cv_name='fr_base', score='cv_base') corr, p_value = stats.pearsonr(x_axis, y_axis) - pears_l = pearson_label(corr, p_value, y_axis, n=True) + pears_l = label_pearson(corr, p_value, y_axis, n=True) print(start_name(cell_type_here, species) + ' ' + 'fr_base' + ' to ' + 'cv_base' + pears_l) @@ -266,7 +271,7 @@ def data_overview3(): c_axis, x_axis, y_axis, exclude_here = exclude_nans_for_corr(frame_file, 'burst_fraction_burst_corr_individual_base', cv_name='burst_fraction_burst_corr_individual_base', score='cv_base') corr, p_value = stats.pearsonr(x_axis, y_axis) - pears_l = pearson_label(corr, p_value, y_axis, n=True) + pears_l = label_pearson(corr, p_value, y_axis, n=True) print(start_name(cell_type_here, species) + ' amprange: ' + 'burst_fraction_individual_base' + ' to ' + 'cv_base' + pears_l) ############################### # fr to nonline but for both modulations @@ -274,7 +279,7 @@ def data_overview3(): c_axis, x_axis, y_axis, exclude_here = exclude_nans_for_corr(frame_file, 'fr_base', cv_name='fr_base', score=score) corr, p_value = stats.pearsonr(x_axis, y_axis) - pears_l = pearson_label(corr, p_value, y_axis, n=True) + pears_l = label_pearson(corr, p_value, y_axis, n=True) print(start_name(cell_type_here, species) + ' amprange: ' + ' ' + 'fr_base' + ' to score' + pears_l) ################################## print('\n') diff --git a/flowchart.pdf b/flowchart.pdf index b974a31..8c6b4fc 100644 Binary files a/flowchart.pdf and b/flowchart.pdf differ diff --git a/flowchart.py b/flowchart.py index 044719f..68e419d 100644 --- a/flowchart.py +++ b/flowchart.py @@ -1,18 +1,21 @@ #from utils0import default_settings #from plt_RAM import model_and_data_isi, model_cells -from plotstyle import plot_style -from threefish.utils0 import default_figsize, save_visualization -from threefish.plot_suscept import model_sheme_split2 #from threefish.utils0 import default_settings, resave_small_files #from plt_RAM import model_and_data, model_and_data_sheme, model_and_data_vertical2 -import matplotlib.gridspec as gridspec +# from threefish.utils0 import default_settings, resave_small_files +# from plt_RAM import model_and_data, model_and_data_sheme, model_and_data_vertical2 + +from threefish.plot_suscept import model_sheme_split2 +# from threefish.utils0 import default_settings, resave_small_files +# from plt_RAM import model_and_data, model_and_data_sheme, model_and_data_vertical2 # from threefish.utils0 import default_settings, resave_small_files # from plt_RAM import model_and_data, model_and_data_sheme, model_and_data_vertical2 import matplotlib.gridspec as gridspec from plotstyle import plot_style +from threefish.core_load import save_visualization from threefish.plot_suscept import model_sheme_split2 -from threefish.utils0 import default_figsize, save_visualization +from threefish.defaults import default_figsize def flowchart(): diff --git a/model_and_data.pdf b/model_and_data.pdf index 37dec81..fabd1e8 100644 Binary files a/model_and_data.pdf and b/model_and_data.pdf differ diff --git a/model_and_data.png b/model_and_data.png index bdfd6e0..e7cbbb2 100644 Binary files a/model_and_data.png and b/model_and_data.png differ diff --git a/model_and_data.py b/model_and_data.py index 3709933..6bc62bd 100644 --- a/model_and_data.py +++ b/model_and_data.py @@ -7,18 +7,18 @@ from IPython import embed from matplotlib import gridspec, pyplot as plt from plotstyle import plot_style -from threefish.utils0 import default_settings, find_cell_add, get_flowchart_params, load_model_susept, \ - nonlin_title, overlap_cells, \ - perc_model_full, plot_lowpass2, plt_data_susept, plt_single_square_modl, plt_time_arrays, \ - remove_xticks, \ - remove_yticks, \ - save_visualization, set_same_ylim -from threefish.core_calc_model import load_folder_name, resave_small_files -from threefish.core_plot_labels import noise_name +from threefish.core_plot_subplots import plot_lowpass2, plt_single_square_modl, plt_time_arrays +from threefish.core_plot_suscept import perc_model_full, plt_data_susept +from threefish.core_filenames import overlap_cells +from threefish.core_load import save_visualization +from threefish.core_reformat import get_flowchart_params, load_model_susept +from threefish.core_calc_model import resave_small_files +from threefish.core import find_folder_name +from threefish.core_plot_labels import label_noise_name, nonlin_title, remove_xticks, remove_yticks, set_xlabel_arrow, \ + set_ylabel_arrow, title_find_cell_add, xlabel_xpos_y_modelanddata import itertools as it -from threefish.utils0 import default_figsize -from threefish.utils1_suscept import xpos_y_modelanddata -from threefish.utils0 import join_x, join_y, set_clim_same, set_xlabel_arrow, set_ylabel_arrow +from threefish.defaults import default_figsize, default_settings +from threefish.core_plot_lims import join_x, join_y, set_clim_same, set_same_ylim #from plt_RAM import model_and_data, model_and_data_sheme, model_and_data_vertical2 @@ -35,22 +35,10 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], contrasts=[0], noises_added=[''], D_extraction_method=['additiv_cv_adapt_factor_scaled'], internal_noise=['RAM'], external_noise=['RAM'], level_extraction=[''], receiver_contrast=[1], dendrids=[''], ref_types=[''], adapt_types=[''], c_noises=[0.1], c_signal=[0.9], cut_offs1=[300]): # ['eRAM'] - # plot_style()#['_RAMscaled']'_RAMscaled' - duration_noise = '_short', - formula = 'code' ##'formula' - # ,int(2 ** 16) int(2 ** 16), int(2 ** 15), stimulus_length = 1 # 20#550 # 30 # 15#45#0.5#1.5 15 45 100 trials_nrs = [1] # [100, 500, 1000, 3000, 10000, 100000, 1000000] # 500 - stimulus_type = '_StimulusOrig_' # ,# - # ,3]#, 3, 1, 1.5, 0.5, ] # ,1,1.5, 0.5] #[1,1.5, 0.5] # 1.5,0.5]3, 1, - variant = 'sinz' - mimick = 'no' - cell_recording_save_name = '' - trans = 1 # 5 rep = 1000000 # 500000#0 - repeats = [20, rep] # 250000 - aa = 0 good_data, remaining = overlap_cells() cells_all = [good_data[0]] @@ -64,8 +52,7 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], mins = [] mats = [] ims = [] - perc05 = [] - perc95 = [] + iternames = [D_extraction_method, external_noise, internal_noise, powers, nffts, dendrids, cut_offs1, trials_nrs, c_signal, c_noises, @@ -95,28 +82,16 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], grid_data = gridspec.GridSpecFromSubplotSpec(1, 1, grid[0, 0], hspace=hs) - - fr_print = False #True - #ypos_x_modelanddata() + fr_print = False nr = 1 ax_data, stack_spikes_all, eod_frs = plt_data_susept(fig, grid_data, cells_all, cell_type='p-unit', width=width, cbar_label=True, fr_print=fr_print, eod_metrice=eod_metrice, nr=nr, amp_given=1, xlabel=False, lp=lp, title=True) for ax_external in ax_data: - # ax.set_ylabel(F2_xlabel()) - # remove_xticks(ax) ax_external.set_xticks_delta(100) - set_ylabel_arrow(ax_external, xpos=xpos_y_modelanddata(), ypos=0.87) - - #embed() + set_ylabel_arrow(ax_external, xpos=xlabel_xpos_y_modelanddata(), ypos=0.87) set_xlabel_arrow(ax_external, ypos=ypos_x_modelanddata()) - # ax.text(-0.42, 0.87, F2_xlabel(), ha='center', va='center', - # transform=ax.transAxes, rotation = 90) - # ax.text(1.66, 0.5, nonlin_title(), rotation=90, ha='center', va='center', - # transform=ax.transAxes) - - #ax_external.arrow_spines('lb') #embed() #plt.show() @@ -128,7 +103,7 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], cell = '2012-07-03-ak-invivo-1' print('cell' + str(cell)) cells_given = [cell] - save_name_rev = load_folder_name( + save_name_rev = find_folder_name( 'calc_model') + '/' + 'calc_RAM_model-2__nfft_whole_power_1_RAM_additiv_cv_adapt_factor_scaled_cNoise_0.1_cSig_0.9_cutoff1_300_cutoff2_300no_sinz_length1_TrialsStim_' + str( trial_nr) + '_a_fr_1__trans1s__TrialsNr_1_fft_o_forward_fft_i_forward_Hz_mV_revQuadrant_' # for trial in trials:#.009 @@ -164,7 +139,10 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], # Hier werde ich nun mit dem Fehler von 0.36 verfahren # das bedeutet aber das sich den Stimulus zwar mit 0.009 ins Modell reintue später für die # Susceptiblitätsberechnung sollte ich ihn aber um den Faktor 0.36 teilen - bias_factor = 0.36 + + # oben habe ich einen bias factor weil die Zelle zu sensitiv gefittet ist, also passe ich das an dass die den + # gleichen CV und feurrate hat, wie die Zelle in der Stimulation, deswegen ist dieser Bias faktor nur oben! + bias_factors = [0.36, 0.36, 1, 1]#0.36 save_names = [ 'calc_RAM_model-2__nfft_whole_power_1_afe_0.009_RAM_cutoff1_300_cutoff2_300no_sinz_length1_TrialsStim_11_a_fr_1__trans1s__TrialsNr_1_fft_o_forward_fft_i_forward_Hz_mV', 'calc_RAM_model-2__nfft_whole_power_1_afe_0.009_RAM_cutoff1_300_cutoff2_300no_sinz_length1_TrialsStim_' + str( @@ -187,8 +165,8 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], c = 2.5 cs = ['$c=%.1f$' % c + '$\,\%$', '$c=0\,\%$'] titles = ['Model\n$N=11$', 'Model\n' + '$N=10^6$', - 'Model\,(' + noise_name().lower() + ')' + '\n' + '$N=11$', - 'Model\,(' + noise_name().lower() + ')' + '\n' + '$N=10^6$' + 'Model\,(' + label_noise_name().lower() + ')' + '\n' + '$N=11$', + 'Model\,(' + label_noise_name().lower() + ')' + '\n' + '$N=10^6$' ] #%#%s$' % (tr_name) + '\,million' #'Model\,('+noise_name().lower()+')' + '\n' + '$N=11$\n $c=1\,\%$',$N=%s $' % (tr_name) +'\,million' # 'Model\,('+noise_name().lower()+')' + '\n' + '$N=%s$' % (tr_name) + '\,million\n $c=1\,\%$ ' @@ -202,28 +180,28 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], embed() ax_model.append(ax_external) - save_name = load_folder_name('calc_model') + '/' + sav_name + save_name = find_folder_name('calc_model') + '/' + sav_name - cell_add, cells_save = find_cell_add(cells_given) + cell_add, cells_save = title_find_cell_add(cells_given) perc = 'perc' path = save_name + '.pkl' # '../'+ - # stack = get_stack_one_quadrant(cell, cell_add, cells_save, path, save_name) + # model = get_stack_one_quadrant(cell, cell_add, cells_save, path, save_name) - # full_matrix = create_full_matrix2(np.array(stack), np.array(stack_rev)) - # stack_final = get_axis_on_full_matrix(full_matrix, stack) - # im = plt_RAM_perc(ax, perc, np.abs(stack)) + # full_matrix = create_full_matrix2(np.array(model), np.array(stack_rev)) + # stack_final = get_axis_on_full_matrix(full_matrix, model) + # im = plt_RAM_perc(ax, perc, np.abs(model)) - stack = load_model_susept(path, cells_save, save_name.split(r'/')[-1] + cell_add) + model = load_model_susept(path, cells_save, save_name.split(r'/')[-1] + cell_add) #embed() - if len(stack) > 0: - add_nonlin_title, cbar, fig, stack_plot, im = plt_single_square_modl(ax_external, cell, stack, perc, + if len(model) > 0: + add_nonlin_title, cbar, fig, stack_plot, im = plt_single_square_modl(ax_external, cell, model, perc, titles[s], width, eod_metrice=eod_metrice, titles_plot=True, resize=True, - bias_factor=bias_factor, + bias_factor=bias_factors[s], fr_print=fr_print, nr=nr) # if s in [1,3,5]: @@ -242,7 +220,7 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], if s in np.arange(col - 1, 100, col): # | (s == 0) remove_yticks(ax_external) else: - set_ylabel_arrow(ax_external, xpos=xpos_y_modelanddata(), ypos=0.87) + set_ylabel_arrow(ax_external, xpos=xlabel_xpos_y_modelanddata(), ypos=0.87) if s >= row * col - col: set_xlabel_arrow(ax_external, ypos=ypos_x_modelanddata()) @@ -258,141 +236,8 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], ################################################# # Flowcharts - var_types = ['', 'additiv_cv_adapt_factor_scaled'] #'additiv_cv_adapt_factor_scaled', - ##additiv_cv_adapt_factor_scaled - a_fes = [c / 100, 0] #, 0.009 - eod_fe = [750, 750] #, 750 - ylim = [-0.5, 0.5] - c_sigs = [0, 0.9] #, 0.9 - grid_left = [[], grid[1, 0]] #, grid[2, 0] - ax_ams = [] - for g, grid_here in enumerate([grid[0, 2], grid[1, 2]]): #, grid[2, 0] - grid_lowpass = gridspec.GridSpecFromSubplotSpec(4, 1, - subplot_spec=grid_here, hspace=0.2, - height_ratios=[1, 1, 1, 0.1]) - - models = resave_small_files("models_big_fit_d_right.csv", load_folder='calc_model_core') - model_params = models[models['cell'] == '2012-07-03-ak-invivo-1'].iloc[0] - cell = model_params.pop('cell') # .iloc[0]# Werte für das Paper nachschauen - eod_fr = model_params['EODf'] # .iloc[0] - deltat = model_params.pop("deltat") # .iloc[0] - v_offset = model_params.pop("v_offset") # .iloc[0] - eod_fr = stack.eod_fr.iloc[0] - print(var_types[g] + ' a_fe ' + str(a_fes[g])) - noise_final_c, spike_times, stimulus, stimulus_here, time, v_dent_output, v_mem_output, frame = get_flowchart_params( - a_fes, a_fr, g, c_sigs[g], cell, deltat, eod_fr, model_params, stimulus_length, v_offset, var_types, - eod_fe=eod_fe) - - if (len(np.unique(frame.RAM_afe)) > 1) & (len(np.unique(frame.RAM_noise)) > 1): - grid_lowpass2 = gridspec.GridSpecFromSubplotSpec(4, 1, - subplot_spec=grid_here, height_ratios=[1, 1, 1, 0.1], - hspace=0.05) - - # if (np.unique(frame.RAM_afe) != 0):grid_left[g] - - ax_external = plt_time_arrays('red', grid_lowpass2, 1, frame.RAM_afe * 100, time=time, nr=0) - # if (np.unique(frame.RAM_noise) != 0): - remove_xticks(ax_external) - ax_intrinsic = plt_time_arrays('purple', grid_lowpass2, 1, frame.RAM_noise * 100, time=time, nr=1) - ax_intrinsic.text(-0.6, 0.5, '$\%$', rotation=90, va='center', transform=ax_intrinsic.transAxes) - ax_intrinsic.show_spines('l') - ax_external.show_spines('l') - # ax_ams.append(axt_p2) - #color_timeseries = 'black' - #axt_p2.set_xlabel('Time [ms]') - #axt_p2.text(-0.6, 0.5, '$\%$', rotation=90, va='center', transform=axt_p2.transAxes) - #ax_ams.append(axt_p2) - vers = 'all' - elif len(np.unique(frame.RAM_afe)) > 1: - color_timeseries = 'red' - nr_plot = 0 - print(str(g) + ' afevar ' + str(np.var(frame.RAM_afe)) + ' afenoise ' + str(np.var(frame.RAM_noise))) - try: - ax_external, ff, pp, ff_am, pp_am = plot_lowpass2([grid_lowpass[nr_plot]], time, - (frame.RAM_afe + frame.RAM_noise) * 100, - deltat, eod_fr, - color1=color_timeseries, lw=1, extract=False) - except: - print('add up thing') - embed() - ax_external.show_spines('l') - - ax_intrinsic = plt.subplot(grid_lowpass[1]) - ax_intrinsic.show_spines('l') - ax_intrinsic.axhline(0, color='black', lw=0.5) - ax_intrinsic.axhline(0, color='purple', lw=0.5) - - remove_xticks(ax_external) - remove_xticks(ax_intrinsic) - join_x([ax_intrinsic, ax_external]) - join_y([ax_intrinsic, ax_external]) - vers = 'first' - elif len(np.unique(frame.RAM_noise)) > 1: - color_timeseries = 'purple' - nr_plot = 1 - print(str(g) + ' afevar ' + str(np.var(frame.RAM_afe)) + ' afenoise ' + str(np.var(frame.RAM_noise))) - try: - ax_intrinsic, ff, pp, ff_am, pp_am = plot_lowpass2([grid_lowpass[nr_plot]], time, - (frame.RAM_afe + frame.RAM_noise) * 100, - deltat, eod_fr, - color1=color_timeseries, lw=1, extract=False) - except: - print('add up thing') - - embed() - - ax_external = plt.subplot(grid_lowpass[0]) - ax_external.show_spines('l') - ax_intrinsic.show_spines('l') - - ax_external.axhline(0, color='black', lw=0.5) - ax_external.axhline(0, color='red', lw=0.5) - join_x([ax_intrinsic, ax_external]) - join_y([ax_intrinsic, ax_external]) - vers = 'second' - ax_intrinsic.set_yticks_delta(6) - ax_external.set_yticks_delta(6) - ax_external.text(-0.6, 0.5, '$\%$', va='center', rotation=90, transform=ax_external.transAxes) - ax_intrinsic.text(-0.6, 0.5, '$\%$', va='center', rotation=90, transform=ax_intrinsic.transAxes) - remove_xticks(ax_intrinsic) - # if (len(np.unique(frame.RAM_afe)) > 1) & (len(np.unique(frame.RAM_noise)) > 1): - ax_external.set_xlabel('') - # remove_yticks(ax) + ax_ams, ax_external = plt_model_flowcharts(a_fr, ax_external, c, cs, grid, model, stimulus_length) - ax_ams.append(ax_external) - remove_xticks(ax_external) - - ax_n, ff, pp, ff_am, pp_am = plot_lowpass2([grid_lowpass[2]], time, noise_final_c, deltat, eod_fr, - extract=False, color1='grey', lw=1) - remove_yticks(ax_n) - if g == 1: - #ax_n.set_xlabel('Time [ms]', labelpad=-0.5) - ax_n.text(0.5, xpos_y_modelanddata()*3, 'Time [ms]', transform = ax_n.transAxes, ha = 'center') - else: - remove_xticks(ax_n) - ax_n.set_ylim(ylim) - - if vers == 'first': - ax_external.text(1, 1, 'RAM(' + cs[0] + ')', ha='right', color='red', transform=ax_external.transAxes) - ax_n.text(start_pos_modeldata(), 1.1, noise_component_name(), ha='right', color='gray', - transform=ax_n.transAxes) - elif vers == 'second': - ax_external.text(1, 1, 'RAM(' + cs[1] + ')', ha='right', color='red', transform=ax_external.transAxes) - ax_intrinsic.text(start_pos_modeldata(), 1.1, signal_component_name(), ha='right', color='purple', - transform=ax_intrinsic.transAxes) - ax_n.text(start_pos_modeldata(), 0.9, noise_component_name(), ha='right', color='gray', - transform=ax_n.transAxes) - else: - ax_n.text(start_pos_modeldata(), 0.9, noise_component_name(), ha='right', color='gray', - transform=ax_n.transAxes) - ax_external.text(1, 1, 'RAM', ha='right', color='red', transform=ax_external.transAxes) - ax_intrinsic.text(start_pos_modeldata(), 1.1, signal_component_name(), ha='right', color='purple', - transform=ax_intrinsic.transAxes) - ax_external.tick_params(axis='y', which='major', labelsize=8.4) - ax_intrinsic.tick_params(axis='y', which='major', labelsize=8.4) - ax_n.tick_params(axis='y', which='major', labelsize=8.4) - #xtick_labelsize = - #embed() set_same_ylim(ax_ams, up='up') axes = np.concatenate([ax_data, ax_model]) @@ -419,6 +264,145 @@ def model_and_data2(eod_metrice=False, width=0.005, nffts=['whole'], powers=[1], save_visualization(pdf=True) +def plt_model_flowcharts(a_fr, ax_external, c, cs, grid, stack, stimulus_length): + var_types = ['', 'additiv_cv_adapt_factor_scaled'] # 'additiv_cv_adapt_factor_scaled', + ##additiv_cv_adapt_factor_scaled + a_fes = [c / 100, 0] # , 0.009 + eod_fe = [750, 750] # , 750 + ylim = [-0.5, 0.5] + c_sigs = [0, 0.9] # , 0.9 + grid_left = [[], grid[1, 0]] # , grid[2, 0] + ax_ams = [] + for g, grid_here in enumerate([grid[0, 2], grid[1, 2]]): # , grid[2, 0] + grid_lowpass = gridspec.GridSpecFromSubplotSpec(4, 1, + subplot_spec=grid_here, hspace=0.2, + height_ratios=[1, 1, 1, 0.1]) + + models = resave_small_files("models_big_fit_d_right.csv", load_folder='calc_model_core') + model_params = models[models['cell'] == '2012-07-03-ak-invivo-1'].iloc[0] + cell = model_params.pop('cell') # .iloc[0]# Werte für das Paper nachschauen + eod_fr = model_params['EODf'] # .iloc[0] + deltat = model_params.pop("deltat") # .iloc[0] + v_offset = model_params.pop("v_offset") # .iloc[0] + eod_fr = stack.eod_fr.iloc[0] + print(var_types[g] + ' a_fe ' + str(a_fes[g])) + noise_final_c, spike_times, stimulus, stimulus_here, time, v_dent_output, v_mem_output, frame = get_flowchart_params( + a_fes, a_fr, g, c_sigs[g], cell, deltat, eod_fr, model_params, stimulus_length, v_offset, var_types, + eod_fe=eod_fe) + + if (len(np.unique(frame.RAM_afe)) > 1) & (len(np.unique(frame.RAM_noise)) > 1): + grid_lowpass2 = gridspec.GridSpecFromSubplotSpec(4, 1, + subplot_spec=grid_here, height_ratios=[1, 1, 1, 0.1], + hspace=0.05) + + # if (np.unique(frame.RAM_afe) != 0):grid_left[g] + + ax_external = plt_time_arrays('red', grid_lowpass2, 1, frame.RAM_afe * 100, time=time, nr=0) + # if (np.unique(frame.RAM_noise) != 0): + remove_xticks(ax_external) + ax_intrinsic = plt_time_arrays('purple', grid_lowpass2, 1, frame.RAM_noise * 100, time=time, nr=1) + ax_intrinsic.text(-0.6, 0.5, '$\%$', rotation=90, va='center', transform=ax_intrinsic.transAxes) + ax_intrinsic.show_spines('l') + ax_external.show_spines('l') + # ax_ams.append(axt_p2) + # color_timeseries = 'black' + # axt_p2.set_xlabel('Time [ms]') + # axt_p2.text(-0.6, 0.5, '$\%$', rotation=90, va='center', transform=axt_p2.transAxes) + # ax_ams.append(axt_p2) + vers = 'all' + elif len(np.unique(frame.RAM_afe)) > 1: + color_timeseries = 'red' + nr_plot = 0 + print(str(g) + ' afevar ' + str(np.var(frame.RAM_afe)) + ' afenoise ' + str(np.var(frame.RAM_noise))) + try: + ax_external, ff, pp, ff_am, pp_am = plot_lowpass2([grid_lowpass[nr_plot]], time, + (frame.RAM_afe + frame.RAM_noise) * 100, + deltat, eod_fr, + color1=color_timeseries, lw=1, extract=False) + except: + print('add up thing') + embed() + ax_external.show_spines('l') + + ax_intrinsic = plt.subplot(grid_lowpass[1]) + ax_intrinsic.show_spines('l') + ax_intrinsic.axhline(0, color='black', lw=0.5) + ax_intrinsic.axhline(0, color='purple', lw=0.5) + + remove_xticks(ax_external) + remove_xticks(ax_intrinsic) + join_x([ax_intrinsic, ax_external]) + join_y([ax_intrinsic, ax_external]) + vers = 'first' + elif len(np.unique(frame.RAM_noise)) > 1: + color_timeseries = 'purple' + nr_plot = 1 + print(str(g) + ' afevar ' + str(np.var(frame.RAM_afe)) + ' afenoise ' + str(np.var(frame.RAM_noise))) + try: + ax_intrinsic, ff, pp, ff_am, pp_am = plot_lowpass2([grid_lowpass[nr_plot]], time, + (frame.RAM_afe + frame.RAM_noise) * 100, + deltat, eod_fr, + color1=color_timeseries, lw=1, extract=False) + except: + print('add up thing') + + embed() + + ax_external = plt.subplot(grid_lowpass[0]) + ax_external.show_spines('l') + ax_intrinsic.show_spines('l') + + ax_external.axhline(0, color='black', lw=0.5) + ax_external.axhline(0, color='red', lw=0.5) + join_x([ax_intrinsic, ax_external]) + join_y([ax_intrinsic, ax_external]) + vers = 'second' + ax_intrinsic.set_yticks_delta(6) + ax_external.set_yticks_delta(6) + ax_external.text(-0.6, 0.5, '$\%$', va='center', rotation=90, transform=ax_external.transAxes) + ax_intrinsic.text(-0.6, 0.5, '$\%$', va='center', rotation=90, transform=ax_intrinsic.transAxes) + remove_xticks(ax_intrinsic) + # if (len(np.unique(frame.RAM_afe)) > 1) & (len(np.unique(frame.RAM_noise)) > 1): + ax_external.set_xlabel('') + # remove_yticks(ax) + + ax_ams.append(ax_external) + remove_xticks(ax_external) + + ax_n, ff, pp, ff_am, pp_am = plot_lowpass2([grid_lowpass[2]], time, noise_final_c, deltat, eod_fr, + extract=False, color1='grey', lw=1) + remove_yticks(ax_n) + if g == 1: + # ax_n.set_xlabel('Time [ms]', labelpad=-0.5) + ax_n.text(0.5, xlabel_xpos_y_modelanddata() * 3, 'Time [ms]', transform=ax_n.transAxes, ha='center') + else: + remove_xticks(ax_n) + ax_n.set_ylim(ylim) + + if vers == 'first': + ax_external.text(1, 1, 'RAM(' + cs[0] + ')', ha='right', color='red', transform=ax_external.transAxes) + ax_n.text(start_pos_modeldata(), 1.1, noise_component_name(), ha='right', color='gray', + transform=ax_n.transAxes) + elif vers == 'second': + ax_external.text(1, 1, 'RAM(' + cs[1] + ')', ha='right', color='red', transform=ax_external.transAxes) + ax_intrinsic.text(start_pos_modeldata(), 1.1, signal_component_name(), ha='right', color='purple', + transform=ax_intrinsic.transAxes) + ax_n.text(start_pos_modeldata(), 0.9, noise_component_name(), ha='right', color='gray', + transform=ax_n.transAxes) + else: + ax_n.text(start_pos_modeldata(), 0.9, noise_component_name(), ha='right', color='gray', + transform=ax_n.transAxes) + ax_external.text(1, 1, 'RAM', ha='right', color='red', transform=ax_external.transAxes) + ax_intrinsic.text(start_pos_modeldata(), 1.1, signal_component_name(), ha='right', color='purple', + transform=ax_intrinsic.transAxes) + ax_external.tick_params(axis='y', which='major', labelsize=8.4) + ax_intrinsic.tick_params(axis='y', which='major', labelsize=8.4) + ax_n.tick_params(axis='y', which='major', labelsize=8.4) + # xtick_labelsize = + # embed() + return ax_ams, ax_external + + def start_pos_modeldata(): return 1.03 diff --git a/model_full.pdf b/model_full.pdf index 84bab6d..4416b23 100644 Binary files a/model_full.pdf and b/model_full.pdf differ diff --git a/model_full.png b/model_full.png index 3364760..e4c793c 100644 Binary files a/model_full.png and b/model_full.png differ diff --git a/model_full.py b/model_full.py index 93739a1..7c1e9e3 100644 --- a/model_full.py +++ b/model_full.py @@ -1,33 +1,31 @@ ##from update_project import ** import numpy as np -import pandas as pd -from IPython import embed from matplotlib import gridspec, pyplot as plt -import matplotlib.mlab as ml -from quantities import sum -from scipy.ndimage import gaussian_filter - -from threefish.utils0 import all_damping_variants, calc_ps, convert_csv_str_to_float, create_full_matrix2, \ - default_model0, deltat_choice, \ - diagonal_points, \ - exclude_file_name_short, find_all_dir_cells, get_arrays_for_three, get_axis_on_full_matrix, get_cutoffs_nr, get_file_names_exclude, get_mat_susept, \ - get_phaseshifts, \ - get_psds_ROC, log_calc_psd, nonlin_title, perc_model_full, plt_model_big, plt_peaks_several, plt_psds_ROC, \ - rescale_colorbar_and_values, version_final -from threefish.core_calc_model import find_code_vs_not, load_folder_name, resave_small_files, simulate + +from threefish.core_calc_fft import create_full_matrix2 +from threefish.core_reformat import chose_mat_max_value, convert_csv_str_to_float, get_axis_on_full_matrix, get_psds_ROC, get_transfer_from_model, \ + load_b_public, load_stack_data_susept +from threefish.core_plot_subplots import colors_suscept_paper_dots, plt_model_full_model2, plt_psds_ROC, plt_RAM_perc +from threefish.core_load import save_visualization +from threefish.core_values import vals_model_full +from threefish.core_cell_choice import find_all_dir_cells +from threefish.core_filenames import version_final +from threefish.core_plot_suscept import perc_model_full, plt_model_big +from threefish.core import find_code_vs_not from plotstyle import plot_style -from threefish.utils0 import ISI_frequency, colorbar_outside, cr_spikes_mat, create_stimulus_SAM, default_figsize, \ - eod_fish_r_generation, extract_am, \ - find_base_fr, find_base_fr2, plt_RAM_perc, \ - remove_xticks, remove_yticks, save_visualization, time +import time +from threefish.defaults import default_diagonal_points, default_figsize +from threefish.core_plot_colorbar import colorbar_outside, rescale_colorbar_and_values from threefish.utils1_suscept import check_var_substract_method, chose_certain_group, \ - colors_suscept_paper_dots, deltaf1_label, \ - deltaf2_label, diff_label, extract_waves, fbasename_small, gaussian_intro, labels_didactic2, \ - load_cells_three, outputmodel, predefine_grouping_frame, restrict_cell_type, \ - save_arrays_susept, sum_label, title_motivation, \ - two_deltaf1_label, two_deltaf2_label -from threefish.utils0 import eod_fish_e_generation, join_y, load_b_public, set_clim_same, set_xlabel_arrow, set_ylabel_arrow, chose_mat_max_value -from threefish.utils1_suscept import load_data_susept, restrict_punits #test_spikes_clusters, + extract_waves, load_cells_three, predefine_grouping_frame, restrict_cell_type, \ + save_arrays_susept +from threefish.core_plot_labels import label_deltaf1, label_deltaf2, label_diff, label_sum, \ + nonlin_title, remove_xticks, set_xlabel_arrow, set_ylabel_arrow, title_motivation, label_two_deltaf1, \ + label_two_deltaf2, \ + xlabel_transfer_hz +from threefish.core_plot_lims import set_clim_same + + #from utils_test import test_spikes_clusters @@ -35,80 +33,48 @@ def model_full(c1=10, mult_type='_multsorted2_', devs=['05'], end='all', chose_s detections=['MeanTrialsIndexPhaseSort'], sorted_on='LocalReconst0.2NormAm', dfs = ['m1', 'm2']): plot_style() - default_figsize(column=2, length=2.7) - grid = gridspec.GridSpec(1, 4, wspace=0.15, bottom = 0.2, width_ratios = [3,1, 1.5,1.5], hspace=0.15, top=0.85, left=0.075, right=0.98) + default_figsize(column=2, length=3.5)#2.7 + grid = gridspec.GridSpec(2, 4, wspace=0.15, bottom = 0.2, width_ratios = [3,1, 1.5,1.5], height_ratios = [1, 5], hspace=0.55, top=0.85, left=0.095, right=0.98)#hspace=0.25, + + + axes = [] + + + + ################################## # model part ls = '--' lw = 0.5 - ax = plt.subplot(grid[0]) + axm = plt.subplot(grid[1,0]) - axes.append(ax) + axes.append(axm) cell = '2012-07-03-ak-invivo-1' - perc, im, stack_final, stack_saved = plt_model_big(ax, ls = ls, pos_rel=-0.12, lw = 0.75, cell = cell, lines = True) + perc, im, stack_final, stack_saved = plt_model_big(axm, ls = ls, pos_rel=-0.12, lw = 0.75, cell = cell, lines = True) + #embed() fr_waves = 139 - fr_noise = 120 - f1 = 33 - f2 = 139 color01, color012, color01_2, color02, color0_burst, color0 = colors_suscept_paper_dots() + + ############################################# + # plot coherence + cross = get_transfer_from_model(stack_saved) + axc = plt.subplot(grid[0, 0]) + axc.plot(stack_saved.index, cross, color = 'black') + axc.set_xlabel('Frequency [Hz]') + axc.set_ylabel(xlabel_transfer_hz()) + + ############################### # data part data_square_extra = False if data_square_extra: - ax = plt.subplot(grid[0]) - axes.append(ax) - cell = '2012-07-03-ak-invivo-1' - mat_rev,stack_final_rev = load_stack_data_susept(cell, redo = True, save_name = version_final(), end = '_revQuadrant_') - mat, stack = load_stack_data_susept(cell, save_name=version_final(), end = '') - - new_keys, stack_plot = convert_csv_str_to_float(stack) - #mat = RAM_norm_data(stack['isf'].iloc[0], stack_plot, - # stack['snippets'].unique()[0], stack_here=stack) # - - new_keys, stack_plot = convert_csv_str_to_float(stack_final_rev) - #mat_rev = RAM_norm_data(stack_final_rev['isf'].iloc[0], stack_plot, - # stack_final_rev['snippets'].unique()[0], stack_here=stack_final_rev) # - - mat, add_nonlin_title, resize_val = rescale_colorbar_and_values(mat) - mat_rev, add_nonlin_title, resize_val = rescale_colorbar_and_values(mat_rev, resize_val = resize_val) - - #try: - full_matrix = create_full_matrix2(np.array(mat),np.array(mat_rev)) - #except: - # print('full matrix something') - # embed() - - stack_final = get_axis_on_full_matrix(full_matrix, mat) - - abs_matrix = np.abs(stack_final) - abs_matrix, add_nonlin_title, resize_val = rescale_colorbar_and_values(abs_matrix,add_nonlin_title = 'k') - - - ax.axhline(0, color = 'white', linestyle = ls, linewidth = lw) - ax.axvline(0, color='white', linestyle = ls, linewidth = lw) - im = plt_RAM_perc(ax, perc, abs_matrix) - cbar, left, bottom, width, height = colorbar_outside(ax, im, add=5, width=0.01) - - set_clim_same([im], mats=[abs_matrix], lim_type='up', nr_clim='perc', clims='', percnr=perc_model_full()) - - #clim = im.get_clim() - #if clim[1]> 1000: - #todo: change clim values with different Hz values - #embed() - cbar.set_label(nonlin_title(add_nonlin_title = ' ['+add_nonlin_title), rotation=90, labelpad=8) - - offset = -0.35 - set_ylabel_arrow(ax, xpos = offset, ypos = 0.97) - set_xlabel_arrow(ax, xpos=1, ypos=offset) - ''' eod_fr, stack_spikes = plt_data_suscept_single(ax, cbar_label, cell, cells, f, fig, file_names_exclude, lp, title, - width)''' - cbar, left, bottom, width, height = colorbar_outside(ax, im, add=5, width=0.01) + ax, cell, stack_final = plt_data_matrix(ax, axes, cell, grid, ls, lw, perc, stack_final) ################# # power spectra data @@ -127,819 +93,162 @@ def model_full(c1=10, mult_type='_multsorted2_', devs=['05'], end='all', chose_s ms = 14 data_extra = False if data_extra: - DF1_desired, DF2_desired, fr, eod_fr, arrays_len = plt_data_full_model(c1, chose_score, detections, devs, dfs, end, grid[3], mult_type, sorted_on, ms = ms, markers = ['s', 'o'],clip_on = True, DF2_desired = DF2_desired_orig, DF1_desired = DF1_desired_orig, alpha = [1,1], log = log, ylim_log = ylim_log, nfft = nfft, xlim_psd = xlim_psd) - fr_mult = fr / eod_fr - - multwise = False - if multwise: - DF1_frmult = np.abs((np.array(DF1_desired)-1)/fr_mult) - DF2_frmult = np.abs((np.array(DF2_desired) - 1) / fr_mult) - else: - DF1_frmult = np.array(DF1_desired_orig)/fr - DF2_frmult = np.array(DF2_desired_orig) / fr - #embed() - DF1_frmult[0] = 1 - DF2_frmult[1] = 0.32339256#1.06949369 - print(DF1_frmult) - print(DF2_frmult) - - grid0 = gridspec.GridSpecFromSubplotSpec(len(DF1_desired), 1, wspace=0.15, hspace=0.35, - subplot_spec=grid[2]) + DF1_desired, DF1_frmult, DF2_desired, DF2_frmult, eod_fr, grid0 = plt_data_peaks(DF1_desired_orig, + DF2_desired_orig, c1, + chose_score, detections, devs, + dfs, end, grid, log, ms, + mult_type, nfft, sorted_on, + xlim_psd, ylim_log) else: markers = ['o', 'o', 'o', 'o', ] DF1_frmult, DF2_frmult = vals_model_full(val = 0.30833333333333335) #0.16666666666666666 grid0 = gridspec.GridSpecFromSubplotSpec(2, 2, wspace=0.15, hspace=0.4, - subplot_spec=grid[2::]) + subplot_spec=grid[:,2::]) + + ################# ################# # power spectra model + ylim_log = (-38, 28) # - - - - - #DF1_frmult[1] = 0.4 - #DF2_frmult[1] = 1.8 - #DF1_frmult[1] = 1.45 - #DF2_frmult[0] = 0.1 - #DF1_frmult[1] = 0.4 - #DF2_frmult[1] = 0.6 - #ylim_log = (-25.2, 3) - ylim_log = (-40, 5) # - - ######################### # punkte die zur zweiten Reihe gehören - diagonal = 'line' - combis = diagonal_points() - freq1_ratio = 1 / 2 - freq2_ratio = 2 / 3 # 0.1 - # for combi in combis: - diagonal = 'diagonal1' # 'vertical3'#'diagonal2'#'diagonal3'#'inside'#'horizontal'#'diagonal'#'vertical'# - - diagonal = 'test_data_cell_2022-01-05-aa-invivo-1' + combis = default_diagonal_points() diagonal = 'diagonal1' - - freq1_ratio = combis[diagonal][0] - freq2_ratio = combis[diagonal][1] diagonal = '' - freq1_ratio = 1.17#0.25 - freq2_ratio = 0.37 # 0.1 - freq1_ratio = 1.2#0.25 - freq2_ratio = 0.7 # 0.1 plus_q = 'plus' # 'minus'#'plus'##'minus' - way = '' # 'mult'#'absolut' - ways = ['mult_minimum_1', 'absolut', 'mult_env_3', 'mult_f1_3', 'mult_f2_3', 'mult_minimum_3', 'mult_env_1', - 'mult_f1_1', 'mult_f2_1', ] length = 2*40 # 5 reshuffled = '' # , - - alphas = [1,0.5] - a_size = 0.0125#25#0.04#0.015 - a_size = 0.01#0.0085#125 # 0.0025 - # 0.01 0.005 und davor 0.025, funktioniert gut 0.0085 + #a_size = 0.0065#0.0125#25#0.04#0.015 + a_size = 0.0005#und 0.0005 für das lineare das ist vielleicht für das nichtlienare 0.0065#0.0085#0.01#0.0085#125 # 0.0025# 0.01 0.005 und davor 0.025, funktioniert gut 0.0085 # ATTENTION: Diese Zelle ('2012-07-03-ak-invivo-1') braucht längere Abschnitte, mindsetesn 5 Sekunden damit das Powerspectrum nicht so niosy ist! - #embed()dev_spikes='original', - #embed()#stack_final - fr_noise, eod_fr_mm, axes2 = plt_model_full_model2(grid0, stack_final = stack_final , reshuffled=reshuffled, dev=0.0005, a_f1s=[a_size], af_2 = a_size, - stimulus_length=length, plus_q=plus_q, stack_saved = stack_saved, - diagonal=diagonal, runs=1, nfft = nfft, xlim_psd = xlim_psd, ylim_log = ylim_log, - cells=[cell], dev_spikes = '05', markers = markers, DF1_frmult = DF1_frmult, DF2_frmult = DF2_frmult, - log = log, ms = ms, clip_on = False, array_len = [1,1,1,1,1]) #arrays_len a_f1s=[0.02]"2012-12-13-an-invivo-1"'2013-01-08-aa-invivo-1' - - + fr_noise, eod_fr_mm, axes2 = plt_model_full_model2(grid0, stack_final = stack_final, reshuffled=reshuffled, dev=0.0005, a_f1s=[a_size], af_2 = a_size, + stimulus_length=length, plus_q=plus_q, stack_saved = stack_saved, + diagonal=diagonal, runs=1, nfft = nfft, xlim_psd = xlim_psd, ylim_log = ylim_log, + cells=[cell], dev_spikes = 'original', markers = markers, DF1_frmult = DF1_frmult, DF2_frmult = DF2_frmult, + log = log, ms = ms, clip_on = False, array_len = [1,1,1,1,1]) #arrays_len a_f1s=[0.02]"2012-12-13-an-invivo-1"'2013-01-08-aa-invivo-1' - #.share - - #ax.plot(fr_noise * f1 / fr_waves, fr_noise * f2 / fr_waves, 'o', ms=5, markeredgecolor=color012, - # markerfacecolor="None", alpha = 0.5) - #ax.plot(-fr_noise * f1 / fr_waves, fr_noise * f2 / fr_waves, 'o', ms=5, markeredgecolor=color01_2, - # markerfacecolor="None", alpha = 0.5) if data_extra: - DF2_hz = np.abs(np.array(DF1_desired) * eod_fr - eod_fr) - DF1_hz = np.abs(np.array(DF2_desired) * eod_fr - eod_fr) - for f in range(len(DF1_hz)): - ax.plot(fr_noise * DF1_hz[f] / fr_waves, fr_noise * DF2_hz[f] / fr_waves, markers[f], ms=5, markeredgecolor=color012, - markerfacecolor="None")#, alpha = alphas[f] - ax.plot(-fr_noise * DF1_hz[f] / fr_waves, fr_noise * DF2_hz[f] / fr_waves, markers[f], ms=5, markeredgecolor=color01_2, - markerfacecolor="None")#, alpha = alphas[f] + plt_data_peaks_letters(DF1_desired, DF2_desired, axm, color012, color01_2, eod_fr, fr_noise, fr_waves, markers) else: - #embed() - letters_plot = True - if letters_plot: - letters = ['B', 'C', 'D', 'E'] - for f in range(len(DF1_frmult)): - ax.text((fr_noise*DF1_frmult[f]), (fr_noise*DF2_frmult[f]-1), letters[f], color = color012, ha = 'center', va = 'center')#, alpha = alphas[f] - ax.text((fr_noise*DF1_frmult[f]-1), -(fr_noise*DF2_frmult[f]-1), letters[f], color = color01_2, ha = 'center', va = 'center')#, alpha = alphas[f] - - else: - for f in range(len(DF1_frmult)): - ax.plot((fr_noise*DF1_frmult[f]), (fr_noise*DF2_frmult[f]-1), markers[f], ms=5, markeredgecolor=color012, - markerfacecolor="None")#, alpha = alphas[f] - ax.plot(-(fr_noise*DF1_frmult[f]-1), (fr_noise*DF2_frmult[f]-1), markers[f], ms=5, markeredgecolor=color01_2, - markerfacecolor="None")#, alpha = alphas[f] + plt_model_letters(DF1_frmult, DF2_frmult, axm, color012, color01_2, fr_noise, markers) - #embed() - #tag2(fig=fig, xoffs=[-4.5, -4.5, -4.5, -5.5], yoffs=1.25) - #axes = plt.gca() fig = plt.gcf()#[axes[0], axes[3], axes[4]] fig.tag([axes[0], axes2[0], axes2[1], axes2[2], axes2[3]], xoffs=-4.5, yoffs=1.2) # ax_ams[3], - + #plt.show() save_visualization() -def vals_model_full(val = 0.2916666666666667): - - DF1_frmult = [val, val, 1, 0.81] - DF2_frmult = [1 - val, 1 + val, val, 1.18] # 1.06949369 - return DF1_frmult, DF2_frmult - +def plt_model_letters(DF1_frmult, DF2_frmult, ax, color012, color01_2, fr_noise, markers): + letters_plot = True + if letters_plot: + letters = ['B', 'C', 'D', 'E'] + for f in range(len(DF1_frmult)): + ax.text((fr_noise * DF1_frmult[f]), (fr_noise * DF2_frmult[f] - 1), letters[f], color=color012, ha='center', + va='center') # , alpha = alphas[f] + ax.text((fr_noise * DF1_frmult[f] - 1), -(fr_noise * DF2_frmult[f] - 1), letters[f], color=color01_2, + ha='center', va='center') # , alpha = alphas[f] -def plt_model_full_model(axp, min=0.2, cells=[], a_f2 = 0.1, perc = 0.05, alpha = 1, trials_nr = 15, - single_waves=['_SingleWave_', '_SeveralWave_', ], cell_start=13, - several_peaks_nr = 2, a_f1s=[0, 0.005, 0.01, 0.05, 0.1, 0.2, ], a_frs=[1], - add_half=0, log = 'log', xlim = [0, 350], freqs_mult1 = None, freqs_mult2 = None, - nfft=int(2 ** 15), gain=1, us_name=''): - model_cells = pd.read_csv(load_folder_name('calc_model_core') + "/models_big_fit_d_right.csv") - if len(cells) < 1: - cells = model_cells.cell.loc[range(cell_start, len(model_cells))] - - plot_style() - fr = float('nan') - for cell in cells: - - # eod_fr, eod_fj, eod_fe = frequency_choice(model_params, step1, step2, three_waves, a_fj, step_type=step_type, - # symmetry=symmetry, start_mult_jammer=start_mult_jammer, - # start_mult_emitter=start_mult_emitter, start_f1=start_f1, end_f1=end_f1, - # start_f2=start_f2, end_f2=end_f2) - - # sachen die ich variieren will - ########################################### - - ####### VARY HERE - for single_wave in single_waves: - if single_wave == '_SingleWave_': - a_f2s = [0] # , 0,0.2 - else: - a_f2s = [a_f2] - for a_f2 in a_f2s: - # 150 - - titles_amp = ['base eodf', 'baseline to Zero', ] - for a, a_fr in enumerate(a_frs): - # fig, ax = plt.subplots(4 + 1, len(a_f1s), figsize=(5, 5.5)) # sharex=True, - # ax = ax.reshape(len(ax),1) - # fig = plt.figure(figsize=(12, 5.5)) - #default_figsize(column=2, length=2.3) # 3 - # default_setting(column = 2, length = 5.5) - #grid = gridspec.GridSpec(3, 2, wspace=0.35, left=0.095, hspace=0.2, top=0.94, bottom=0.25, - # right=0.95) - ax = {} - for aa, a_f1 in enumerate(a_f1s): - - SAM, cell, damping, damping_type, deltat, eod_fish_r, eod_fr, f1, f2, freqs1, freqs2, model_params, offset, phase_right, phaseshift_fr, rate_adapted, rate_baseline_after, rate_baseline_before, sampling, spike_adapted, spikes, stimuli, stimulus_altered, stimulus_length, time_array, v_dent_output, v_mem_output = outputmodel( - a_fr, add_half, cell, model_cells, single_wave, trials_nr, freqs_mult1 = freqs_mult1, freqs_mult2 = freqs_mult2) - - - # ax[3].xscalebar(0.1, -0.02, 20, 'ms', va='right', ha='bottom') ##ylim[0] - # ax[3].set_xlabel('Time [ms]') - #axp = plt.subplot(grid[:, 1]) - axp.show_spines('lb') - - # ax[4, 0].set_xlim(0.1 * 1000, 0.125 * 1000) - - # ax[4, 1].get_shared_x_axes().join(*ax[4, :]) - base_cut, mat_base = find_base_fr(spike_adapted, deltat, stimulus_length, time_array) - fr = np.mean(base_cut) - frate, isis_diff = ISI_frequency(time_array, spike_adapted[0], fill=0.0) - isi = ( - np.diff(spike_adapted[0])) - cv0 = np.std(isi) / np.mean(isi) - cv1 = np.std(frate) / np.mean(frate) - - - fs = 11 - # for fff, freq2 in enumerate(freqs2): - # freq2 = [freq2] - - for ff, freq1 in enumerate(freqs1): - freq1 = [freq1] - freq2 = [freqs2[ff]] - # time_var = time.time() - beat1 = freq1 - eod_fr - titles = False - if titles: - plt.suptitle('diverging from half fr by ' + str(add_half) + ' f1:' + str( - np.round(freq1)[0]) + ' f2:' + str(np.round(freq2)[0]) + ' Hz \n' + str( - beat1) + ' Hz Beat\n' + titles[ff] + titles_amp[a] + ' ' + cell + ' cv ' + str( - np.round(cv0, 3)) + '_a_f0_' + str(a_fr) + '_a_f1_' + str(a_f1) + '_a_f2_' + str( - a_f2) + ' tr_nr ' + str(trials_nr)) - - # if printing: - # print(cell ) - # f_corr = create_beat_corr(np.array([freq1[f1]]), np.array([eod_fr])) - # create the second eod_fish1 array analogous to the eod_fish_r array - - phaseshift_f1, phaseshift_f2 = get_phaseshifts(a_f1, a_f2, phase_right, phaseshift_fr) - eod_fish1, time_fish_e = eod_fish_e_generation(time_array, a_f1, freq1, f1) - eod_fish2, time_fish_j = eod_fish_e_generation(time_array, a_f2, freq2, f2) - - eod_stimulus = eod_fish1 + eod_fish2 - - for t in range(trials_nr): - stimulus, eod_fish_sam = create_stimulus_SAM(SAM, eod_stimulus, eod_fish_r, freq1, f1, - eod_fr, - time_array, a_f1) - - - stimulus_orig = stimulus * 1 - - # damping variants - std_dump, max_dump, range_dump, stimulus, damping_output = all_damping_variants( - stimulus, time_array, damping_type, eod_fr, gain, damping, us_name, plot=False, - std_dump=0, max_dump=0, range_dump=0) - - stimuli.append(stimulus) - - cvs, adapt_output, baseline_after, _, rate_adapted[t], rate_baseline_before[t], \ - rate_baseline_after[t], spikes[t], \ - stimulus_altered[t], \ - v_dent_output[t], offset_new, v_mem_output[t], noise_final = simulate(cell, offset, - stimulus, - adaptation_yes_e=f1, - **model_params) - - #embed() - stimulus_altered_output = np.mean(stimulus_altered, axis=0) - # time_var2 = time.time() - - - test_stimulus_stability = False - - # time_model = time_var2 - time_var # 8 - - - spikes_mat = [[]] * len(spikes) - pps = [[]] * len(spikes) - for s in range(len(spikes)): - spikes_mat[s] = cr_spikes_mat(spikes[s], 1 / deltat, int(stimulus_length * 1 / deltat)) - pps[s], f = ml.psd(spikes_mat[s] - np.mean(spikes_mat[s]), Fs=1 / deltat, NFFT=nfft, - noverlap=nfft // 2) - pp_mean = np.mean(pps, axis=0) - sampling_rate = 1 / deltat - smoothed05 = gaussian_filter(spikes_mat, sigma=gaussian_intro() * sampling_rate) - mat05 = np.mean(smoothed05, axis=0) - - - beat1 = np.round(freq1 - eod_fr)[0] - # if titles: - # ax[0].set_title('a_f1 ' + str(a_f1), fontsize=fs) - - # ax[0, aa].set_title('f1:'+str(np.round(freq1)[0]) +' f2:'+str(np.round(freq2)[0]) + ' Hz \n'+str(beat1) + ' Hz Beat\n' +titles[ff], fontsize = fs) - # ax[0].plot((time_array - min) * 1000, stimulus, color='grey', linewidth=0.5) - - beat1 = (freq1 - eod_fr)[0] - beat2 = (freq2 - eod_fr)[0] - - test = False - if test: - ax = plt.subplot(1,1,1) - ax.axvline(fr, color = 'blue') - ax.axvline(beat1, color = 'green', linestyle = '-.') - ax.axvline(beat2, color = 'purple', linestyle = '--') - ax.plot(f, pp_mean) - plt.show() - #embed() - nr = 2 - color_01, color_012, color01_2, color_02, color0_burst, color0 = colors_suscept_paper_dots() - if 'Several' in single_wave: - freqs_beat = [fr, np.abs(beat1), np.abs(beat2), np.abs(np.abs(beat2) + np.abs(beat1)),np.abs(np.abs(beat2) - np.abs(beat1)) - ] # np.abs(beat2 - beat1),np.abs(beat2 + beat1), - - colors = [color0,color_01, color_02, color_012, color01_2] # 'blue' - labels = ['','', '', '', ''] # small , '|B1-B2|' - add_texts = [nr, nr + 0.35, nr + 0.2, nr + 0.2, nr + 0.2] # [1.1,1.1,1.1] - texts_left = [-7, -7, -7, -7,-7] - else: - freqs_beat = [np.abs(beat1), np.abs(beat1) * 2, np.abs(beat1 * 3), - np.abs(beat1 * 4)] # np.abs(beat1) / 2, - colors = ['black', 'orange', 'blue', 'purple', 'black'] # 'grey', - add_texts = [nr + 0.1, nr + 0.1, nr + 0.1, nr + 0.1] # [1.1,1.1,1.1,1.1] - texts_left = [3, 0, 0, 0] - labels = labels_didactic2() # colors_didactic, labels_didactic - if (np.mean(stimulus) != 0) & (np.mean(stimulus) != 1): - # try: - eod_interp, eod_norm = extract_am(stimulus, time_array, sampling=sampling_rate, - eodf=eod_fr, - emb=False, - extract='', norm=False) - for l in range(len(spikes)): - spikes[l] = (spikes[l] - min) * 1000 - pp, f = ml.psd(mat05 - np.mean(mat05), Fs=1 / deltat, NFFT=nfft, - noverlap=nfft // 2) - if log: - pp_mean = 10 * np.log10(pp_mean / np.max(pp_mean)) - - print(freqs_beat) - print(labels) - plt_peaks_several(freqs_beat, pp_mean, axp, pp_mean, f, labels, 0, colors, ha='center', - add_texts=add_texts, texts_left=texts_left, add_log=2.5, - rots=[0, 0, 0, 0, 0], several_peaks_nr=several_peaks_nr, exact=False, - text_extra=True, perc_peaksize=perc, rel='rel', alphas=[alpha] * len(colors), - ms=14, clip_on=True, several_peaks=True, log=log) # True - - axp.plot(f, pp_mean, color='black', zorder=0) # 0.45 - axp.set_xlim(xlim) - - test = False - if test: - test_spikes_clusters(eod_fish_r, spikes, mat05, sampling, s_name='ms', resamp_fact=1000) - - if log == 'log': - axp.set_ylabel('dB') - else: - axp.set_ylabel('Amplitude [Hz]') - - axp.set_xlabel('Frequency [Hz]') - return fr - -def plt_model_full_model2(grid0, reshuffled='reshuffled', af_2 = 0.1, dev=0.0005, a_f1s=[0.03], - printing=False, plus_q='minus', diagonal='diagonal', stack_final = [], - stimulus_length=0.5, runs=1, cells=[], stack_saved = [], - nfft=int(4096), beat='', nfft_for_morph=4096 * 4, DF2_frmult = [], DF1_frmult = [], - array_len = [20,20,20,20,20], - xlim_psd = [], ylim_log = [], - fish_jammer='Alepto', markers = [], clip_on = True, - ms = 14, dev_spikes='original', log =''): - plot_style() - #model_cells = pd.read_csv(load_folder_name('calc_model_core') + "/models_big_fit_d_right.csv") - model_cells = resave_small_files("models_big_fit_d_right.csv", load_folder='calc_model_core', index_col = True) + else: + for f in range(len(DF1_frmult)): + ax.plot((fr_noise * DF1_frmult[f]), (fr_noise * DF2_frmult[f] - 1), markers[f], ms=5, + markeredgecolor=color012, + markerfacecolor="None") # , alpha = alphas[f] + ax.plot(-(fr_noise * DF1_frmult[f] - 1), (fr_noise * DF2_frmult[f] - 1), markers[f], ms=5, + markeredgecolor=color01_2, + markerfacecolor="None") # , alpha = alphas[f] #embed() - if len(cells) < 1: - cells = len(model_cells) - - axes = [] - ps = [] - results_diff = pd.DataFrame() - for g in range(len(DF2_frmult)): - - freq2_ratio = DF2_frmult[g] - freq1_ratio = DF1_frmult[g] - #embed() - ax = plt.subplot(grid0[g]) - - axes.append(ax) - try: - trials_nr = array_len[g] - except: - print('array nr something') - - embed() - - for cell_here in cells: - ########################################### - single_waves = ['_SeveralWave_'] # , '_SingleWave_'] - for single_wave in single_waves: - if single_wave == '_SingleWave_': - a_f2s = [0] # , 0,0.2 - else: - a_f2s = [af_2] - for a_f2 in a_f2s: - # ,0.05,0.01, 0.005, 0.1, 0.2] # 0.001, - for a_f1 in a_f1s: - a_frs = [1] - titles_amp = ['base eodf'] # ,'baseline to Zero',] - for a, a_fr in enumerate(a_frs): - - #embed() - model_params = model_cells[model_cells['cell'] == cell_here].iloc[0] - # model_params = model_cells.iloc[cell_nr] - - - eod_fr = model_params['EODf'] # .iloc[0] - - offset = model_params.pop('v_offset') - - cell = model_params.pop('cell') - print(cell) - SAM, adapt_offset, cell_recording, constant_reduction, damping, damping_type, dent_tau_change, exponential, f1, f2, fish_emitter, fish_receiver, fish_morph_harmonics_var, lower_tol, mimick, n, phase_right, phaseshift_fr, sampling_factor, upper_tol, zeros = default_model0() - # in case you want a different sampling here we can adujust - time_array, sampling, deltat = deltat_choice(model_params, sampling_factor, eod_fr, - stimulus_length) - - # generate the eod_fish_r in the four mimick variants (copy, thunderfish, mimick, just sinus) - eod_fish_r, deltat, eod_fr, time_array = eod_fish_r_generation(time_array, eod_fr, a_fr, - stimulus_length, phaseshift_fr, - cell_recording, zeros, mimick, - sampling, fish_receiver, deltat, - nfft, nfft_for_morph, - fish_morph_harmonics_var=fish_morph_harmonics_var, - beat=beat) - sampling = 1 / deltat - # prepare for adapting offset due to baseline modification - - spikes_base = [[]] * trials_nr - color01, color012, color01_2, color02, color0_burst, color0 = colors_suscept_paper_dots() - for run in range(runs): - print(run) - t1 = time.time() - for t in range(trials_nr): - # get the baseline properties here - # baseline_after,spikes_base,rate_adapted, rate_baseline_before, rate_baseline_after, np.array(spike_times), stimulus_power, v_dent_output[int(0.05 / deltat):-1], offset, v_mem_output - stimulus = eod_fish_r - stimulus_base = eod_fish_r - #else: - power_here = 'sinz' - - cvs, adapt_output, baseline_after_b, _, rate_adapted_b, rate_baseline_before_b, rate_baseline_after_b, \ - spikes_base[t], _, _, offset_new, _,noise_final = simulate(cell, offset, stimulus, - deltat=deltat, - **model_params)#adaptation_variant=adapt_offset, - #adaptation_yes_j=f2, - #adaptation_yes_e=f1, - #adaptation_yes_t=t, - #power_variant=power_here, - #power_alpha=alpha, - #power_nr=n, - - if t == 0: - # here we record the changes in the offset due to the adaptation - change_offset = offset - offset_new - # and we subsequently reset the offset to be the new adapted for all subsequent trials - offset = offset_new * 1 - - if printing: - print('Baseline time' + str(time.time() - t1)) - base_cut, mat_base = find_base_fr(spikes_base, deltat, stimulus_length, time_array, dev=dev) - - - #if not fr: - #embed() - - #if stack_saved: - # fr = stack_saved.fr.iloc[0] - #else: - fr = np.round(np.mean(base_cut)) - if 'diagonal' in diagonal: - - two_third_fr = fr * freq2_ratio - freq1_ratio = (1 - freq2_ratio) - third_fr = fr * freq1_ratio - else: - two_third_fr = fr * freq2_ratio - try: - third_fr = fr * freq1_ratio - except: - print('freq1 something') - embed() - stack_extract = False - if stack_extract: - # fals man das hier aus dem maximalen wert der matrix extrahieren möchte - max_index = stack_final.stack().idxmax() - x_pos = np.where(stack_final == np.max(np.max(stack_final)))[0][0] - y_pos = np.where(stack_final == np.max(np.max(stack_final)))[1][0] - f2val = stack_final.index[x_pos] - f1val = stack_final.columns[y_pos] - stack_final[max_index[1]].iloc[max_index[0]] - stack_final[max_index[0]].iloc[max_index[1]] - np.argmax(stack_final) - #embed() - if plus_q == 'minus': - two_third_fr = -two_third_fr - third_fr = -third_fr - freqs2 = [eod_fr + two_third_fr] # , eod_fr - third_fr, two_third_fr, - # third_fr, - # two_third_eodf, eod_fr - two_third_eodf, - # third_eodf, eod_fr - third_eodf, ] - freqs1 = [ - eod_fr + third_fr] # , eod_fr - two_third_fr, third_fr,two_third_fr,third_eodf, eod_fr - third_eodf,two_third_eodf, eod_fr - two_third_eodf, ] - #embed() - sampling_rate = 1 / deltat - base_cut, mat_base, smoothed0, mat0 = find_base_fr2(spikes_base, deltat, - stimulus_length, dev=dev) - #fr = np.mean(base_cut) - frate, isis_diff = ISI_frequency(time_array, spikes_base[0], fill=0.0) - isi = np.diff(spikes_base[0]) - cv0 = np.std(isi) / np.mean(isi) - cv1 = np.std(frate) / np.mean(frate) - #embed() - for ff, freq1 in enumerate(freqs1): - freq1 = [freq1] - freq2 = [freqs2[ff]] - #embed() - # time_var = time.time() - - # if printing: - # print(cell ) - # f_corr = create_beat_corr(np.array([freq1[f1]]), np.array([eod_fr])) - # create the second eod_fish1 array analogous to the eod_fish_r array - t1 = time.time() - phaseshift_f1, phaseshift_f2 = get_phaseshifts(a_f1, a_f2, phase_right, phaseshift_fr) - eod_fish1, time_fish_e = eod_fish_e_generation(time_array, a_f1, freq1, f1, - phaseshift_f1, sampling, - stimulus_length, nfft_for_morph, - cell_recording, - fish_morph_harmonics_var, zeros, - mimick, fish_emitter, - thistype='emitter') - eod_fish2, time_fish_j = eod_fish_e_generation(time_array, a_f2, freq2, f2, - phaseshift_f2, sampling, - stimulus_length, nfft_for_morph, - cell_recording, - fish_morph_harmonics_var, zeros, - mimick, fish_jammer, - thistype='jammer') - - eod_stimulus = eod_fish1 + eod_fish2 - - v_mems, offset_new, mat01, mat02, mat012, smoothed01, smoothed02, smoothed012, stimulus_01, stimulus_02, stimulus_012, mat05_01, spikes_01, mat05_02, spikes_02, mat05_012, spikes_012 = get_arrays_for_three(cell, a_f2, a_f1, SAM, eod_stimulus, eod_fish_r, freq2, - eod_fish1, eod_fish2, stimulus_length, offset, model_params, n, - 'sinz', adapt_offset, deltat, f2, trials_nr, time_array, f1, - freq1, eod_fr, reshuffle=reshuffled, dev=dev) - if printing: - print('Generation process' + str(time.time() - t1)) - - - - if 'f1' not in results_diff.keys(): - results_diff['f1'] = freq1 - results_diff['f2'] = freq2 - results_diff['f0'] = eod_fr - results_diff['fr'] = fr - else: - #embed()#.loc[position_diff, 'f1'] - results_diff.loc[g, 'f1'] = freq1[0] - results_diff.loc[g, 'f2'] = freq2[0] - results_diff.loc[g, 'f0'] = eod_fr - results_diff.loc[g, 'fr'] = fr - - if run == 0: - - ################################## - # power spectrum - - if dev_spikes == 'original': - #nfft = 2 ** 15 - - p0, p02, p01, p012, fs = calc_ps(nfft, [np.mean(mat012, axis=0)], - [np.mean(mat01, axis=0)], - [np.mean(mat02, axis=0)], - [np.mean(mat0, axis=0)], - sampling_rate=sampling_rate) - - else: - #nfft = 2 ** 15 - p0, p02, p01, p012, fs = calc_ps(nfft, smoothed012, - smoothed01, smoothed02, smoothed0, - sampling_rate=sampling_rate) - - # pp_mean = np.log - ps.append(p012) - #p_arrays = [p012] - p0_means = [] - for j in range(len(ps)): - sampling = 40000 - - - try: - ax = axes[j] - except: - print('axes something thing') - embed() - - #for i in range(len(p0)): - # ax.plot(fs, ps[j], color='grey') - - if log == 'log': - #p012 = 10 * np.log10(ps[j] / np.max(ps)) - #embed() - p012 = log_calc_psd(log, ps[j], np.max(ps)) - else: - p012 = ps[j] - - p0_mean = p012[0]#np.mean(ps[j], axis=0) - p0_means.append(p0_mean) - try: - ax.plot(fs, p0_mean, color='black', zorder = 100) # plt_peaks(ax[0], p01, fs, 'orange') - except: - print('m something') - embed() - try: - DF1 = np.abs(results_diff.f1.iloc[j] - results_diff.f0.iloc[j]) - DF2 = np.abs(results_diff.f2.iloc[j] - results_diff.f0.iloc[j]) - except: - print('something') - embed() - fr = results_diff.fr.iloc[j] - - #for p in range(len(p0_means)): np.abs(DF2 * 2), - freqs = [np.abs(DF1), - np.abs(DF2), - np.abs(np.abs(DF1) - np.abs(DF2)), - np.abs(DF1) + np.abs(DF2), fr] - colors = [color01, color02, - color01_2, color012, color0]# color02,np.abs(DF2 * 2), color02,'DF2_H1', - labels = [deltaf1_label(), deltaf2_label(), - diff_label(), sum_label(), fbasename_small()] - marker = markers[j]#'DF1_H1','DF1_H4',v - - if len(stack_final) > 0: - adjust_factor_outside = 50#30 - adjust_factor_inside = 1 - - - #1) horizontale, vertikale line wleche mittel aus 10 Punkten - #2) und dann maximum - #3) und dann feurraten abschätzung (es ist die feuerrate) (wegen dem berechnung) - #0) ein bisschen den Punkt verschoben, hat nicht geholfen - - # - # - # - # ich glaube hier könnten wir nochmal die fr abschätzung absichern mit plus minus 1 hz - fr_corrs = [-1, 0, 1] - xi2_diff_all = [] - xi2_sum_all = [] - for fr_corr in fr_corrs: - dfs1_all, dfs2_all, xi_sum_peak, xi_diff_peak, df1_adapt, df2_adapt, diff_final, diff_peak, pos_x_minus, pos_y, sum_final, sum_peak, xi2_diff, xi2_sum = find_diff_and_sum_values( - DF1+fr_corr, DF2, a_f1, a_f2, adjust_factor_inside, adjust_factor_outside, stack_final) - xi2_diff_all.append(xi2_diff) - xi2_sum_all.append(xi2_sum) - xi2_diff = np.max(xi2_diff) - xi2_sum = np.max(xi2_sum_all) - - print('df1 '+str(df1_adapt)+' df2 ' + str(df2_adapt)) - print('\n') - print('df1 '+str(df1_adapt)+' df2 ' + str(df2_adapt)) - print('diff_final '+str(diff_final)) - print('sum_final ' + str(sum_final)) - - # embed() - if log == 'log': - sum_peak = log_calc_psd(log, sum_peak, np.max(ps)) - diff_peak = log_calc_psd(log, diff_peak, np.max(ps)) - - ax.scatter(np.abs(DF1) + np.abs(DF2), sum_peak, color='None', edgecolor='black') - ax.scatter(np.abs(np.abs(DF1) - np.abs(DF2)), diff_peak, color='None', edgecolor='black') - - #embed() - ##################### - # und hier extrahiere ich noch die Peaks der Transferfunktion - #isf_dict['io_cross'] - complex_data = np.array([complex(eval(x)) for x in stack_saved['io_cross']]) - trials = stack_saved.trial_nr.unique()[0] - cross = (np.abs(complex_data)/trials) / (stack_saved['isf_psd']/ trials)# - #complex_data = np.array([complex(eval(x)) for x in ]) - - pos_df1 = np.argmin(np.abs(stack_saved['io_cross'].index - np.abs(DF1))) - pos_df2 = np.argmin(np.abs(stack_saved['io_cross'].index - np.abs(DF2))) - adjust_factor_outside_transfer = 1000000000000#adjust_factor_outside - df1_peak = adjust_factor_outside_transfer*((a_f1 * np.abs(cross[pos_df1])) ** 2) / 4 - df2_peak = adjust_factor_outside_transfer*((a_f2 * np.abs(cross[pos_df2])) ** 2) / 4 - - # embed() - if log == 'log': - df1_peak = log_calc_psd(log, df1_peak, np.max(ps)) - df2_peak = log_calc_psd(log, df2_peak, np.max(ps)) - - print('df1 peak '+str(df1_peak)) - print('df1 peak ' + str(df2_peak)) - ax.scatter(np.abs(DF1), df1_peak, color='None', edgecolor='black') - ax.scatter(np.abs(DF2), df2_peak, color='None', edgecolor='black') - - if len(stack_saved) > 0: - print('todo: tranfervalues noch nicht da') - #f_axis, vals = get_transferfunction(stack_saved) - - - #embed() - plt_peaks_several(freqs, p0_mean, ax, p0_mean, fs, labels, 0, colors, emb=False, marker=marker, add_log=2.5, - exact=False, perc_peaksize=0, ms=ms, clip_on=clip_on, log=log, add_not_log = 350)# perc = 0.2 - - ax.set_xlim(0, 300) - #ax.text(1,1, 'rt_xi=' + str(np.round(sum_peak / diff_peak,2))+' rt_ps=' + str(np.round(p_passed[3] / p_passed[2],2)), ha = 'right', va = 'top', transform=ax.transAxes) - print('sumpeak'+str(np.abs(xi2_sum))+'diffpeak'+str(np.abs(xi2_diff))) - #embed()# - #if j == 0: - # ax.text(1, 1, 'Model', ha='right', va='top', transform=ax.transAxes) - if j < 2: - remove_xticks(ax) - ax.set_xlabel('') - else: - ax.set_xlabel('Frequency [Hz]') - - - if j == 0: - ax.legend(ncol = 6, loc = (-0.2, 1.35))#-0.5 - - - if log == 'log': - ax.set_xlim(xlim_psd)# - ax.set_ylim(ylim_log) - if j in [1, 3]: - remove_yticks(ax) - else: - ax.set_ylabel('dB') - else: - if j in [1, 3]: - remove_yticks(ax) - else: - ax.set_ylabel('PSD [Hz]') - join_y(axes)#[1::] +def plt_data_matrix(ax, axes, cell, grid, ls, lw, perc, stack_final): + ax = plt.subplot(grid[0]) axes.append(ax) - - - return fr, eod_fr, axes - - -def find_diff_and_sum_values(DF1, DF2, a_f1, a_f2, adjust_factor_inside, adjust_factor_outside, stack_final): - - array_len = 2 - # das ist der summenwert - # numbers_sum: da gehen wir quasi über eine diagonale von 5 Punkte und machen da ein mittel draus - numbers_sum = [ - np.arange(-array_len, array_len+1, 1), np.arange(array_len, -(array_len+1), -1)] - - # numbers_diff: da ist die Differenz immer fbase - numbers_diff = [np.arange(-array_len, (array_len+1), 1), np.arange(-array_len, (array_len+1), 1)] - - # numbers_horizontal: da ist die horizontale immer fbase - numbers_horizontal = [np.arange(array_len, -(array_len+1), -1), np.zeros(len(np.arange(array_len, -(array_len+1), -1)))] - # numbers_vertical: und hier die verticale immer fbase - numbers_vertical = [np.zeros(len(np.arange(array_len, -(array_len+1), -1))), - np.arange(array_len, -(array_len+1), -1)] - - - numbers_all = [numbers_sum, numbers_diff, numbers_horizontal, numbers_vertical] - sum_final = [] - diff_final = [] - xi_sum_final = [] - xi_diff_final = [] - dfs1_all = [] - dfs2_all = [] - for n in range(len(numbers_all)): - sum_peaks = [] - diff_peaks = [] - - xi_sum_peaks = [] - xi_diff_peaks = [] - # hier mittel ich über 5 benachbarte Punkte - for nn in range(len(numbers_all[n][0])): - df1_adapt = np.abs(np.abs(DF1) + numbers_all[n][0][nn]) - df2_adapt = np.abs(np.abs(DF2) + numbers_all[n][1][nn]) - dfs1_all.append(df1_adapt) - dfs2_all.append(df2_adapt) - # print() - # print() - # print('\n df1+df2 ' + str(df1_adapt+df2_adapt)) - # print('\n df1-df2 ' + str(df1_adapt - df2_adapt)) - pos_y = np.argmin(np.abs(stack_final.index - df2_adapt)) - pos_x = np.argmin(np.abs(stack_final.keys() - df1_adapt)) - xi2_sum = stack_final.iloc[pos_y, stack_final.keys()[pos_x]] - # stack_final.iloc[pos_x][stack_final.keys()[pos_y]] - xi2_sum = stack_final[stack_final.keys()[pos_x]].iloc[pos_y] - xi_sum_peaks.append(np.abs(xi2_sum)) - sum_peak = adjust_factor_outside * ((adjust_factor_inside * a_f1 * a_f2 * np.abs(xi2_sum)) ** 2) / 4 - sum_peaks.append(sum_peak) - - # embed() - # stack_final.keys()[pos_x_minus] + stack_final.index[pos_y] - pos_x_minus = np.argmin(np.abs(stack_final.keys() + df1_adapt)) - # xi2_diff = stack_final.iloc[pos_x, stack_final.keys()[pos_y_minus]] - xi2_diff = stack_final[stack_final.keys()[pos_x_minus]].iloc[ - pos_y] # stack_final.iloc[pos_x, stack_final.keys()[pos_y_minus]] - xi_diff_peaks.append(np.abs(xi2_diff)) - # stack_final[stack_final.keys()[pos_x_minus]].iloc[pos_y-1] - diff_peak = adjust_factor_outside * ((adjust_factor_inside * a_f1 * a_f2 * np.abs(xi2_diff)) ** 2) / 4 - diff_peaks.append(diff_peak) - diff_final.append(np.mean(diff_peaks)) - sum_final.append(np.mean(sum_peaks)) - xi_diff_final.append(xi_diff_peaks) - xi_sum_final.append(xi_sum_peaks) - # und hier wähle ich die variante mit dem Stärkesten Score - # so kann ich das für alle Punkte vergleichbar halten, - # quasi ohne einen Punkt als auf der Diagonale oder auf der Vertikalen definieren - # zu müssen - # die einzige Einschränkgung ist dass ein punkt immer 2.5 Hz weit weg von einer - # der Nichtlinearien Strukturen sein muss um nicht ausversehen da etwas mit zu integrierne - sum_peak = np.max(sum_final) - diff_peak = np.max(diff_final) - xi_sum_peak = xi_sum_final - xi_diff_peak = xi_diff_final - return dfs1_all, dfs2_all, xi_sum_peak, xi_diff_peak, df1_adapt, df2_adapt, diff_final, diff_peak, pos_x_minus, pos_y, sum_final, sum_peak, xi2_diff, xi2_sum + cell = '2012-07-03-ak-invivo-1' + mat_rev, stack_final_rev = load_stack_data_susept(cell, redo=True, save_name=version_final(), end='_revQuadrant_') + mat, stack = load_stack_data_susept(cell, save_name=version_final(), end='') + new_keys, stack_plot = convert_csv_str_to_float(stack) + # mat = RAM_norm_data(stack['isf'].iloc[0], stack_plot, + # stack['snippets'].unique()[0], stack_here=stack) # + new_keys, stack_plot = convert_csv_str_to_float(stack_final_rev) + # mat_rev = RAM_norm_data(stack_final_rev['isf'].iloc[0], stack_plot, + # stack_final_rev['snippets'].unique()[0], stack_here=stack_final_rev) # + mat, add_nonlin_title, resize_val = rescale_colorbar_and_values(mat) + mat_rev, add_nonlin_title, resize_val = rescale_colorbar_and_values(mat_rev, resize_val=resize_val) + # try: + full_matrix = create_full_matrix2(np.array(mat), np.array(mat_rev)) + # except: + # print('full matrix something') + # embed() + stack_final = get_axis_on_full_matrix(full_matrix, mat) + abs_matrix = np.abs(stack_final) + abs_matrix, add_nonlin_title, resize_val = rescale_colorbar_and_values(abs_matrix, add_nonlin_title='k') + ax.axhline(0, color='white', linestyle=ls, linewidth=lw) + ax.axvline(0, color='white', linestyle=ls, linewidth=lw) + im = plt_RAM_perc(ax, perc, abs_matrix) + cbar, left, bottom, width, height = colorbar_outside(ax, im, add=5, width=0.01) + set_clim_same([im], mats=[abs_matrix], lim_type='up', nr_clim='perc', clims='', percnr=perc_model_full()) + # clim = im.get_clim() + # if clim[1]> 1000: + # todo: change clim values with different Hz values + # embed() + cbar.set_label(nonlin_title(add_nonlin_title=' [' + add_nonlin_title), rotation=90, labelpad=8) + offset = -0.35 + set_ylabel_arrow(ax, xpos=offset, ypos=0.97) + set_xlabel_arrow(ax, xpos=1, ypos=offset) + ''' eod_fr, stack_spikes = plt_data_suscept_single(ax, cbar_label, cell, cells, f, fig, file_names_exclude, lp, title, + width)''' + cbar, left, bottom, width, height = colorbar_outside(ax, im, add=5, width=0.01) + return ax, cell, stack_final + + +def plt_data_peaks_letters(DF1_desired, DF2_desired, ax, color012, color01_2, eod_fr, fr_noise, fr_waves, markers): + DF2_hz = np.abs(np.array(DF1_desired) * eod_fr - eod_fr) + DF1_hz = np.abs(np.array(DF2_desired) * eod_fr - eod_fr) + for f in range(len(DF1_hz)): + ax.plot(fr_noise * DF1_hz[f] / fr_waves, fr_noise * DF2_hz[f] / fr_waves, markers[f], ms=5, + markeredgecolor=color012, + markerfacecolor="None") # , alpha = alphas[f] + ax.plot(-fr_noise * DF1_hz[f] / fr_waves, fr_noise * DF2_hz[f] / fr_waves, markers[f], ms=5, + markeredgecolor=color01_2, + markerfacecolor="None") # , alpha = alphas[f] + + +def plt_data_peaks(DF1_desired_orig, DF2_desired_orig, c1, chose_score, detections, devs, dfs, end, grid, log, ms, + mult_type, nfft, sorted_on, xlim_psd, ylim_log): + DF1_desired, DF2_desired, fr, eod_fr, arrays_len = plt_data_full_model(c1, chose_score, detections, devs, dfs, end, + grid[3], mult_type, sorted_on, ms=ms, + markers=['s', 'o'], clip_on=True, + DF2_desired=DF2_desired_orig, + DF1_desired=DF1_desired_orig, alpha=[1, 1], + log=log, ylim_log=ylim_log, nfft=nfft, + xlim_psd=xlim_psd) + fr_mult = fr / eod_fr + multwise = False + if multwise: + DF1_frmult = np.abs((np.array(DF1_desired) - 1) / fr_mult) + DF2_frmult = np.abs((np.array(DF2_desired) - 1) / fr_mult) + else: + DF1_frmult = np.array(DF1_desired_orig) / fr + DF2_frmult = np.array(DF2_desired_orig) / fr + # embed() + DF1_frmult[0] = 1 + DF2_frmult[1] = 0.32339256 # 1.06949369 + print(DF1_frmult) + print(DF2_frmult) + grid0 = gridspec.GridSpecFromSubplotSpec(len(DF1_desired), 1, wspace=0.15, hspace=0.35, + subplot_spec=grid[2]) + return DF1_desired, DF1_frmult, DF2_desired, DF2_frmult, eod_fr, grid0 def plt_data_full_model(c1, chose_score, detections, devs, dfs, end, grid, mult_type, sorted_on, ms = 14, log = 'log', markers = ['s', 'o'], @@ -1222,12 +531,12 @@ def plt_data_full_model(c1, chose_score, detections, devs, dfs, end, grid, mult_ #labels = labels_all_motivation(DF1, DF2, fr_isi) labels = ['$f_{base}$', - deltaf1_label(), - deltaf2_label(), - sum_label(), - two_deltaf1_label(), - two_deltaf2_label(), - diff_label(), + label_deltaf1(), + label_deltaf2(), + label_sum(), + label_two_deltaf1(), + label_two_deltaf2(), + label_diff(), 'fr_bc', 'fr_given_burst_corr_individual', 'highest_fr_burst_corr_individual', 'fr', 'fr_given', @@ -1257,40 +566,6 @@ def plt_data_full_model(c1, chose_score, detections, devs, dfs, end, grid, mult_ return DF1_desired, DF2_desired, fr, eod_fr, arrays_len -def load_stack_data_susept(cell, save_name, end = '', redo = False): - load_name = load_folder_name('calc_RAM') + '/' + save_name+end - add = '_cell' + cell +end# str(f) # + '_amp_' + str(amp) - #embed() - stack_cell = load_data_susept(load_name + '_' + cell + '.pkl', load_name + '_' + cell, add=add, - load_version='csv', redo = redo) - file_names_exclude = get_file_names_exclude() - stack_cell = stack_cell[~stack_cell['file_name'].isin(file_names_exclude)] - # if len(stack_cell): - file_names = stack_cell.file_name.unique() - #embed() - file_names = exclude_file_name_short(file_names) - cut_off_nr = get_cutoffs_nr(file_names) - try: - maxs = list(map(float, cut_off_nr)) - except: - embed() - file_names = file_names[np.argmax(maxs)] - #embed() - stack_file = stack_cell[stack_cell['file_name'] == file_names] - amps = [np.min(stack_file.amp.unique())] - amps = restrict_punits(cell, amps) - amp = np.min(amps)#[0] - # for amp in amps: - stack_amps = stack_file[stack_file['amp'] == amp] - - lengths = stack_amps.stimulus_length.unique() - trial_nr_double = stack_amps.trial_nr.unique() - trial_nr = np.max(trial_nr_double) - stack_final = stack_amps[ - (stack_amps['stimulus_length'] == np.max(lengths)) & (stack_amps.trial_nr == trial_nr)] - mat, new_keys = get_mat_susept(stack_final) - return mat,stack_final - - if __name__ == '__main__': + #embed() model_full() \ No newline at end of file diff --git a/motivation.pdf b/motivation.pdf index 5b421a6..c362fd9 100644 Binary files a/motivation.pdf and b/motivation.pdf differ diff --git a/motivation.py b/motivation.py index 4bce839..64f0689 100644 --- a/motivation.py +++ b/motivation.py @@ -1,14 +1,17 @@ ##from update_project import ** import numpy as np -from threefish.utils0 import chose_mat_max_value, cr_spikes_mat, default_figsize, find_all_dir_cells, \ - plot_arrays_ROC_psd_single3, plot_shemes4, plt_coherences, save_visualization -from threefish.core_calc_model import find_code_vs_not +from threefish.core_cell_choice import find_all_dir_cells +from threefish.core_reformat import chose_mat_max_value, load_b_public +from threefish.core_plot_suscept import plt_coherences +from threefish.defaults import default_figsize +from threefish.core_time import cr_spikes_mat +from threefish.core import find_code_vs_not import time -from threefish.utils1_suscept import check_var_substract_method, chose_certain_group, circle_plot, colors_suscept_paper_dots, \ - extract_waves, load_cells_three, predefine_grouping_frame, restrict_cell_type, save_arrays_susept, title_motivation, \ - ws_nonlin_systems -from threefish.utils0 import load_b_public +from threefish.utils1_suscept import check_var_substract_method, chose_certain_group, circle_plot, extract_waves, load_cells_three, predefine_grouping_frame, restrict_cell_type, save_arrays_susept, ws_nonlin_systems +from threefish.core_plot_labels import title_motivation +from threefish.core_plot_subplots import colors_suscept_paper_dots, plot_arrays_ROC_psd_single3, plot_shemes4 +from threefish.core_load import save_visualization #from matplotlib import gridspec as gridspec, gridspec, mlab as ml, pyplot as plt, ticker as ticker from matplotlib import pyplot as plt diff --git a/trialnr.pdf b/trialnr.pdf index a133447..5872d6b 100644 Binary files a/trialnr.pdf and b/trialnr.pdf differ diff --git a/trialnr.py b/trialnr.py index 18a4ba1..67372d9 100644 --- a/trialnr.py +++ b/trialnr.py @@ -4,10 +4,13 @@ import numpy as np import pandas as pd from matplotlib import pyplot as plt from plotstyle import plot_style -from threefish.utils0 import default_figsize, default_settings, find_cell_add, get_stack, load_model_susept, \ - overlap_cells, \ - save_visualization # utils0_project -from threefish.core_calc_model import load_folder_name, resave_small_files +from threefish.core_plot_labels import title_find_cell_add +from threefish.defaults import default_figsize, default_settings +from threefish.core_filenames import overlap_cells +from threefish.core_load import save_visualization +from threefish.core_reformat import get_stack, load_model_susept +from threefish.core_calc_model import resave_small_files +from threefish.core import find_folder_name from threefish.utils1_suscept import trial_nrs_ram_model import itertools as it @@ -89,7 +92,7 @@ def trialnr(nffts=['whole'], powers=[1], contrasts=[0], noises_added=[''], D_ext cell = '2012-07-03-ak-invivo-1' print('cell'+str(cell)) cells_given = [cell] - save_name_rev = load_folder_name( + save_name_rev = find_folder_name( 'calc_model') + '/' + 'calc_RAM_model-2__nfft_whole_power_1_RAM_additiv_cv_adapt_factor_scaled_cNoise_0.1_cSig_0.9_cutoff1_300_cutoff2_300no_sinz_length1_TrialsStim_' + str( trial_nr) + '_a_fr_1__trans1s__TrialsNr_1_fft_o_forward_fft_i_forward_Hz_mV_revQuadrant_' # for trial in trials:#.009 @@ -121,9 +124,9 @@ def trialnr(nffts=['whole'], powers=[1], contrasts=[0], noises_added=[''], D_ext for s, sav_name in enumerate(save_names): - save_name = load_folder_name('calc_model') + '/' + sav_name + save_name = find_folder_name('calc_model') + '/' + sav_name - cell_add, cells_save = find_cell_add(cells_given) + cell_add, cells_save = title_find_cell_add(cells_given) perc = 'perc' path = save_name + '.pkl' # '../'+ diff --git a/update_project.py b/update_project.py index f162c15..7b8e270 100644 --- a/update_project.py +++ b/update_project.py @@ -130,9 +130,9 @@ if cont_other_dir == False: embed() #print(new_dir+dir) #try: - shutil.copy2("../suseptibility/" + dir, new_dir + directory) + shutil.copy2("../suseptibility/" + directory,new_dir + directory) #print(new_dir + directory) - #shutil.copy("../suseptibility/" + dir, new_dir + directory) + #shutil.copy("../suseptibility/" + directory,new_dir + directory) #shutil.copyfile("../suseptibility/"+dir, new_dir+dir) #except: # print('shutil stuff')