From da1bc387b4c8bd5e97f983a5acdfb942d8d7ab42 Mon Sep 17 00:00:00 2001 From: xaver Date: Tue, 8 Sep 2020 14:49:58 +0200 Subject: [PATCH] 08.09 --- eigenmannia_jar.py | 116 +++++++++++++++++++++------------------- gain_fit.py | 37 +++++++++++++ jar_functions.py | 23 ++++++++ notes | 18 ++++--- plot_eigenmannia_jar.py | 79 +++++++++++++-------------- sin_response_fit.py | 38 ++++++------- sin_response_specto.py | 26 ++++----- 7 files changed, 202 insertions(+), 135 deletions(-) create mode 100644 gain_fit.py diff --git a/eigenmannia_jar.py b/eigenmannia_jar.py index e7d3a43..131475a 100644 --- a/eigenmannia_jar.py +++ b/eigenmannia_jar.py @@ -3,68 +3,74 @@ import numpy as np import os import nix_helpers as nh from IPython import embed +from matplotlib.mlab import specgram #from tqdm import tqdm from jar_functions import parse_stimuli_dat from jar_functions import norm_function_eigen from jar_functions import mean_noise_cut_eigen from jar_functions import get_time_zeros +from jar_functions import import_data_eigen +from scipy.signal import savgol_filter -base_path = 'D:\\jar_project\\JAR\\eigenmannia\\2015eigen16' +base_path = 'D:\\jar_project\\JAR\\eigenmannia' -datasets = ['2020-07-08-aa', '2020-07-08-as'] +identifier = ['2013eigen13', '2015eigen16', '2015eigen17', '2015eigen19', '2020eigen22', '2020eigen32'] response = [] deltaf = [] - -for dataset in os.listdir(base_path): - datapath = os.path.join(base_path, dataset, '%s.nix' % dataset) - print(datapath) - stimuli_dat = os.path.join(base_path, dataset, 'manualjar-eod.dat') - - df, duration = parse_stimuli_dat(stimuli_dat) - dur = int(duration[0][0:2]) - print(df) - - time, eod = nh.read_eod(datapath, duration = 2000) - - zeropoints = get_time_zeros(time, eod, threshold = np.max(eod)*0.1) - - frequencies = 1 / np.diff(zeropoints) - - # norm, base, jar = norm_function_eigen(frequencies, zeropoints[:-1], onset_point=(dur - dur)+10, offset_point=dur+10) # dm-dm funktioniert nur wenn onset = 0 sec - - # cf, ct = mean_noise_cut_eigen(frequencies, zeropoints[:-1], n=200) - - window = np.ones(101) / 101 - freq = np.convolve(frequencies, window, mode='same') - - ''' # plt.plot(ct, cf) - plt.plot(zeropoints[:-1], freq) - plt.ylabel('EOD_frequency [Hz]') - plt.xlabel('time [s]') - plt.xlim(1, 140) - plt.ylim(np.median(freq) - 10, np.median(freq) + 10) - plt.title('JAR_deltaf_%s' % deltaf) - plt.show() - ''' - j = [] - for idx, i in enumerate(zeropoints): - if i > 20 and i < 80: - j.append(freq[idx]) - - b = [] - for idx, i in enumerate(zeropoints): - if i < 20: - b.append(freq[idx]) - - r = np.median(j) - np.median(b) - response.append(r) - deltaf.append(df[0]) - -res_df1 = sorted(zip(deltaf[:32],response[:32])) -res_df2 = sorted(zip(deltaf[33:],response[33:])) -res_df = sorted(zip(deltaf,response)) - -np.save('res_df1', res_df1) -np.save('res_df2', res_df2) -np.save('res_df', res_df) \ No newline at end of file +for ID in identifier: + for dataset in os.listdir(os.path.join(base_path, ID)): + datapath = os.path.join(base_path, ID, dataset, '%s.nix' % dataset) + print(datapath) + stimuli_dat = os.path.join(base_path, ID, dataset, 'manualjar-eod.dat') + + df, duration = parse_stimuli_dat(stimuli_dat) + dur = int(duration[0][0:2]) + print(df) + + # time, eod = nh.read_eod(datapath, duration = 2000) # anstatt dem import data mit tag manual jar - dann sollte onset wirklich bei 10 sec sein + data, pre_dat, dt = import_data_eigen(datapath) + + nfft = 2**17 + spec, freqs, times = specgram(data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95) + dbspec = 10.0 * np.log10(spec) # in dB + power = dbspec[:, 50] + + fish_p = power[(freqs > 200) & (freqs < 1000)] + fish_f = freqs[(freqs > 200) & (freqs < 1000)] + + index = np.argmax(fish_p) + eodf = fish_f[index] + eodf4 = eodf * 4 + + lim0 = eodf4-20 + lim1 = eodf4+20 + + df = freqs[1] - freqs[0] + ix0 = int(np.floor(lim0/df)) # back to index + ix1 = int(np.ceil(lim1/df)) # back to index + spec4 = dbspec[ix0:ix1, :] + freq4 = freqs[ix0:ix1] + jar4 = freq4[np.argmax(spec4, axis=0)] # all freqs at max specs over axis 0 + jar = jar4 / 4 + jm = jar4 - np.mean(jar4) # data we take + cut_times = times[:len(jar4)] + + #plt.imshow(spec4, cmap='jet', origin='lower', extent=(times[0], times[-1], lim0, lim1), aspect='auto', vmin=-80, vmax=-10) + plt.plot(cut_times, jm) + plt.plot(cut_times, savgol) + #plt.ylim(lim0, lim1) + plt.show() + + + # nicht unbedingt filtern, einfach wie unten median/mean + +''' + res_df = sorted(zip(deltaf,response)) + + np.save('res_df_%s' %ID, res_df) +''' + +# problem: rohdaten(data, pre_data) lassen sich auf grund ihrer 1D-array struktur nicht savgol filtern +# diese bekomm ich nur über specgram in form von freq / time auftragen, was nicht mehr savgol gefiltert werden kann +# jedoch könnte ich trotzdem einfach aus jar4 response herauslesen wobei dies dann weniger gefiltert wäre \ No newline at end of file diff --git a/gain_fit.py b/gain_fit.py new file mode 100644 index 0000000..e1b6434 --- /dev/null +++ b/gain_fit.py @@ -0,0 +1,37 @@ +from scipy import signal +import matplotlib.pyplot as plt +import numpy as np +import pylab +from IPython import embed +from scipy.optimize import curve_fit +from jar_functions import gain_curve_fit + +identifier = ['2018lepto1', + '2018lepto4', + '2018lepto5', + '2018lepto76', + '2018lepto98', + '2019lepto03', + '2019lepto24', + '2019lepto27', + '2019lepto30', + '2020lepto04', + '2020lepto06', + '2020lepto16', + '2020lepto19', + '2020lepto20' + ] + +for ID in identifier: + amf = np.load('amf_%s.npy' %ID) + gain = np.load('gain_%s.npy' %ID) + rms = np.load('rms_%s.npy' %ID) + thresh = np.load('thresh_%s.npy' % ID) + + idx_arr = (rms < thresh) | (rms < np.mean(rms)) + + embed() + sinv, sinc = curve_fit(gain_curve_fit, amf[idx_arr], gain[idx_arr]) + print(sinv[0]) + f_cutoff = 1 / (2*np.pi*sinv[0]) + print(f_cutoff) diff --git a/jar_functions.py b/jar_functions.py index 2fb3cf8..2ebc329 100644 --- a/jar_functions.py +++ b/jar_functions.py @@ -13,6 +13,10 @@ def sin_response(t, f, p, A): r_sin = A*np.sin(2*np.pi*t*f + p) return r_sin +def gain_curve_fit(tau, alpha, amf): + gain = alpha / np.sqrt(1 + (2*np.pi*amf*tau)**2) + return gain + def parse_dataset(dataset_name): assert(os.path.exists(dataset_name)) #see if data exists f = open(dataset_name, 'r') #open data we gave in @@ -265,6 +269,25 @@ def import_data(dataset): return dat, pre_dat, dt #nf.close() +def import_data_eigen(dataset): + import nixio as nix + nf = nix.File.open(dataset, nix.FileMode.ReadOnly) + b = nf.blocks[0] + eod = b.data_arrays['EOD-1'] + dt = eod.dimensions[0].sampling_interval + di = int(10.0/dt) + t = b.tags['ManualJAR_1'] + #amfreq = t.metadata['RePro-Info']['settings']['amfreq'] + dat = [] + pre_dat = [] + for mt in b.multi_tags: + data = mt.retrieve_data(0, 'EOD-1')[:] # data[0] + dat.append(data) + i0 = int(mt.positions[0][0]/dt) + pre_data = eod[i0-di:i0] + pre_dat.append(pre_data) + return dat, pre_dat, dt + def import_amfreq(dataset): import nixio as nix nf = nix.File.open(dataset, nix.FileMode.ReadOnly) diff --git a/notes b/notes index 7dccc12..22a9993 100644 --- a/notes +++ b/notes @@ -1,13 +1,15 @@ -- mit zu hohem RMS rauskicken: evtl nur ein trace rauskicken wenn nur da RMS zu hoch -- 2019lepto27/30 nochmal anschauen, gainpunkt fehlt -- fit für gain kurven: über tau = 1/cutoff_f * 2 * pi --> wie bestimm ich cutoff_f? -- daten von natalie zu eigenmannia mit + / - delta f anschauen ob unterschiede -- unterschiedliche nffts auf anderem rechner laufen lassen evtl um unterschiede zu sehen ++ fit für gain kurven: über tau = 1/cutoff_f * 2 * pi --> wie bestimm ich cutoff_f? ++ daten von natalie zu eigenmannia mit + / - delta f anschauen ob unterschiede long term: -- extra datei mit script drin um fertige daten darzustellen, den fit-code als datenverarbeitung allein verwenden +- extra datei mit script drin um fertige daten darzustellen, den fit-code nur zur datenverarbeitung verwenden - darstellung: specgram --> rausgezogene jarspur darüber --> filterung --> fit und daten zusammen dargestellt, das ganze für verschiedene frequenzen -- größe/evtl. gewicht nachtragen, eod basefrequenz rausziehen und zuweisen ++ größe/evtl. gewicht nachtragen, eod basefrequenz rausziehen und zuweisen --> wie macht man das schnell und nicht manuell? - liste mit eigenschaften der fische (dominanz/größe), messvariablen (temp/conductivity), eodf und evtl ampl machen um diese plotten zu können +- unterschiedliche nffts auf anderem rechner laufen lassen evtl um unterschiede zu sehen -- phase in degree \ No newline at end of file +- phase in degree: phase % (2pi) - modulo 2pi + + +- mit zu hohem RMS rauskicken: evtl nur ein trace rauskicken wenn nur da RMS zu hoch +- 2019lepto27/30: 27 - 0.05Hz (7-27-af, erste dat mit len(dat)=1), 30 - 0.001Hz (7-30-ah mit 0.005 anstatt gewollten 0.001Hz --> fehlt) diff --git a/plot_eigenmannia_jar.py b/plot_eigenmannia_jar.py index f2c2f55..1377a26 100644 --- a/plot_eigenmannia_jar.py +++ b/plot_eigenmannia_jar.py @@ -4,47 +4,48 @@ import os import nix_helpers as nh from IPython import embed -res_df1 = np.load('res_df1.npy') -res_df2 = np.load('res_df2.npy') -res_df = np.load('res_df.npy') +identifier = ['2013eigen13', '2015eigen16', '2015eigen17', '2015eigen19', '2020eigen22', '2020eigen32'] -mres = [] -mdf = [] +for ID in identifier: + res_df = np.load('res_df_%s.npy' %ID) -currf = None -idxlist = [] + mres = [] + mdf = [] -for i, d in enumerate(res_df): - if currf is None or currf == d[0]: - currf = d[0] - idxlist.append(i) + currf = None + idxlist = [] - else: # currf != f - meanres = [] # lists to make mean of - meandf = [] - for x in idxlist: - meanres.append(res_df[x][1]) - meandf.append(res_df[x][0]) - meanedres = np.mean(meanres) - meaneddf = np.mean(meandf) - mres.append(meanedres) - mdf.append(meaneddf) - currf = d[0] # set back for next loop - idxlist = [i] -meanres = [] # lists to make mean of -meandf = [] -for y in idxlist: - meanres.append(res_df[y][1]) - meandf.append(res_df[y][0]) -meanedres = np.mean(meanres) -meaneddf = np.mean(meandf) -mres.append(meanedres) -mdf.append(meaneddf) + for i, d in enumerate(res_df): + if currf is None or currf == d[0]: + currf = d[0] + idxlist.append(i) -plt.plot(mdf, mres, 'o') -plt.xlabel('deltaf [Hz]') -plt.ylabel('JAR_respones [Hz]') -plt.axhline(0, color='grey', lw =1) -plt.axvline(0, color='grey', lw = 1) -plt.title('JAR_response_to_deltaf_eigenmannia') -plt.show() \ No newline at end of file + else: # currf != f + meanres = [] # lists to make mean of + meandf = [] + for x in idxlist: + meanres.append(res_df[x][1]) + meandf.append(res_df[x][0]) + meanedres = np.mean(meanres) + meaneddf = np.mean(meandf) + mres.append(meanedres) + mdf.append(meaneddf) + currf = d[0] # set back for next loop + idxlist = [i] + meanres = [] # lists to make mean of + meandf = [] + for y in idxlist: + meanres.append(res_df[y][1]) + meandf.append(res_df[y][0]) + meanedres = np.mean(meanres) + meaneddf = np.mean(meandf) + mres.append(meanedres) + mdf.append(meaneddf) + + plt.plot(mdf, mres, 'o') + plt.xlabel('deltaf [Hz]') + plt.ylabel('JAR_respones [Hz]') + plt.axhline(0, color='grey', lw =1) + plt.axvline(0, color='grey', lw = 1) + plt.title('JAR_response_to_deltaf_%s' %ID) + plt.show() \ No newline at end of file diff --git a/sin_response_fit.py b/sin_response_fit.py index 960e70b..b5b7058 100644 --- a/sin_response_fit.py +++ b/sin_response_fit.py @@ -6,24 +6,25 @@ from IPython import embed from scipy.optimize import curve_fit from jar_functions import sin_response from jar_functions import mean_noise_cut +from jar_functions import gain_curve_fit def take_second(elem): # function for taking the names out of files return elem[1] -identifier = [#'2018lepto1', - #'2018lepto4', - #'2018lepto5', - #'2018lepto76', - #'2018lepto98', - #'2019lepto03', - #'2019lepto24', - #'2019lepto27', +identifier = ['2018lepto1', + '2018lepto4', + '2018lepto5', + '2018lepto76', + '2018lepto98', + '2019lepto03', + '2019lepto24', + '2019lepto27', '2019lepto30', - #'2020lepto04', - #'2020lepto06', - #'2020lepto16', - #'2020lepto19', - #'2020lepto20' + '2020lepto04', + '2020lepto06', + '2020lepto16', + '2020lepto19', + '2020lepto20' ] for ident in identifier: @@ -54,7 +55,7 @@ for ident in identifier: dt = time[1] - time[0] n = int(1/float(d[1])/dt) - cutf = mean_noise_cut(jm, time, n = n) + cutf = mean_noise_cut(jm, n = n) cutt = time #plt.plot(time, jm-cutf, label='cut amfreq') #plt.plot(time, jm, label='spec') @@ -157,9 +158,10 @@ for ident in identifier: ax1.set_xlabel('envelope_frequency [Hz]') ax1.set_ylabel('RMS [Hz]') plt.legend() - pylab.show() - - np.save('gain_%s' %ident, mgain_arr[idx_arr]) - np.save('amf%s' %ident, amfreq_arr[idx_arr]) + #pylab.show() + #np.save('gain_%s' %ident, mgain_arr[idx_arr]) + #np.save('amf_%s' %ident, amfreq_arr[idx_arr]) + np.save('rms_%s' %ident, rootmeansquare_arr) + np.save('thresh_%s' %ident, threshold_arr) embed() \ No newline at end of file diff --git a/sin_response_specto.py b/sin_response_specto.py index da9417e..1d8b5ca 100644 --- a/sin_response_specto.py +++ b/sin_response_specto.py @@ -8,7 +8,7 @@ import IPython import numpy as np #import DataLoader as dl from IPython import embed -from tqdm import tqdm +#from tqdm import tqdm from scipy.optimize import curve_fit from jar_functions import step_response from jar_functions import sin_response @@ -22,7 +22,7 @@ from jar_functions import average from jar_functions import import_data from jar_functions import import_amfreq -base_path = 'D:\\jar_project\\JAR\\sin\\2019lepto30' +base_path = 'D:\\jar_project\\JAR\\sin\\2019lepto27' time_all = [] freq_all = [] @@ -31,27 +31,23 @@ amfrequencies = [] gains = [] files = [] -for idx, dataset in tqdm(enumerate(os.listdir(base_path))): +for idx, dataset in enumerate(os.listdir(base_path)): if dataset == 'prerecordings': continue datapath = os.path.join(base_path, dataset, '%s.nix' % dataset) - print(datapath) + #print(datapath) data, pre_data, dt = import_data(datapath) + embed() nfft = 2**17 for d, dat in enumerate(data): if len(dat) == 1: - continue - '''if len(data) > 0: print(datapath) - - amfreq = import_amfreq(datapath) - print(amfreq) - continue - else: embed() - ''' + else: + continue + file_name = [] ID = [] @@ -104,10 +100,10 @@ for idx, dataset in tqdm(enumerate(os.listdir(base_path))): # plt.imshow(spec4, cmap='jet', origin='lower', extent=(times[0], times[-1], lim0, lim1), aspect='auto', vmin=-80, vmax=-10) # save data - np.save('%s time' % file_name, cut_times) - np.save('%s' % file_name, jar4) + #np.save('%s time' % file_name, cut_times) + #np.save('%s' % file_name, jar4) # save filenames for this fish -np.save('%s files' %ID[0], files) +#np.save('%s files' %ID[0], files) embed() \ No newline at end of file