From e91b648b5c8624758b2bc4bbfb3cb5f4d905ce6f Mon Sep 17 00:00:00 2001 From: xaver Date: Mon, 6 Jul 2020 18:19:17 +0200 Subject: [PATCH] 06.07 --- jar_functions.py | 73 +++++++++++++++++++++++++++++------------------ second_try.py | 74 +++++++++++++++++++++++++----------------------- 2 files changed, 85 insertions(+), 62 deletions(-) diff --git a/jar_functions.py b/jar_functions.py index 0faf921..4bcb2c6 100644 --- a/jar_functions.py +++ b/jar_functions.py @@ -12,6 +12,7 @@ def parse_dataset(dataset_name): eodfs = [] deltafs = [] stimulusfs = [] + duration = [] # data itself times = [] @@ -31,6 +32,8 @@ def parse_dataset(dataset_name): deltafs.append(float(l.split(':')[-1].strip()[:-2])) #from that all expect the last two signs (Hz unit) if "#" in l and "StimulusFrequency" in l: #this for different metadata in different lists stimulusfs.append(float(l.split(':')[-1].strip()[:-2])) + if "#" in l and "Duration" in l: + duration.append(float(l.split(':')[-1].strip()[:-3])) if '#Key' in l: if len(time) != 0: #therefore empty in the first round @@ -52,12 +55,42 @@ def parse_dataset(dataset_name): amplitudes.append(ampl) #these append the data from the first loop to the final lists, because we overwrite them (?) frequencies.append(freq) - return times, frequencies, amplitudes, eodfs, deltafs, stimulusfs #output of the function + return frequencies, times, amplitudes, eodfs, deltafs, stimulusfs, duration #output of the function +def parse_infodataset(dataset_name): + assert(os.path.exists(dataset_name)) #see if data exists + f = open(dataset_name, 'r') #open data we gave in + lines = f.readlines() #read data + f.close() #? + + identifier = [] + for i in range(len(lines)): + l = lines[i].strip() #all lines of textdata, exclude all empty lines (empty () default for spacebar) + if "#" in l and "Identifier" in l: + identifier.append((l.split(':')[-1].strip()[1:12])) + return identifier + +def mean_loops(start, stop, timespan, frequencies, time): + minimumt = min(len(time[0]), len(time[1])) + # new time with wished timespan because it varies for different loops + tnew = np.arange(start, stop, timespan / minimumt) # 3rd input is stepspacing: + # in case complete measuring time devided by total number of datapoints + # interpolation + f0 = np.interp(tnew, time[0], frequencies[0]) + f1 = np.interp(tnew, time[1], frequencies[1]) + + #new array with frequencies of both loops as two lists put together + frequency = np.array([f0, f1]) + + #making a mean over both loops with the axis 0 (=averaged in y direction, axis=1 would be over x axis) + mf = np.mean(frequency, axis=0) + + return mf, tnew def mean_noise_cut(frequencies, time, n): cutf = [] cutt = [] + for k in np.arange(0, len(frequencies), n): t = time[k] f = np.mean(frequencies[k:k+n]) @@ -72,6 +105,18 @@ def step_response(t, a1, a2, tau1, tau2): r_step[t<0] = 0 return r_step +def norm_function(cf_arr, ct_arr, onset_point, offset_point): + onset_end = onset_point - 10 + offset_start = offset_point - 10 + + base = np.mean(cf_arr[(ct_arr >= onset_end) & (ct_arr < onset_point)]) + + ground = cf_arr - base + + jar = np.mean(ground[(ct_arr >= offset_start) & (ct_arr < offset_point)]) + + norm = ground / jar + return norm def base_eod(frequencies, time, onset_point): base_eod = [] @@ -94,32 +139,6 @@ def JAR_eod(frequencies, time, offset_point): return jar_eod -def mean_loops(start, stop, timespan, frequencies, time): - minimumt = min(len(time[0]), len(time[1])) - # new time with wished timespan because it varies for different loops - tnew = np.arange(start, stop, timespan / minimumt) # 3rd input is stepspacing: - # in case complete measuring time devided by total number of datapoints - # interpolation - f0 = np.interp(tnew, time[0], frequencies[0]) - f1 = np.interp(tnew, time[1], frequencies[1]) - #new array with frequencies of both loops as two lists put together - frequency = np.array([f0, f1]) - #making a mean over both loops with the axis 0 (=averaged in y direction, axis=1 would be over x axis) - mf = np.mean(frequency, axis=0) - return mf, tnew - - -def norm_function(cf_arr, ct_arr, onset_point, offset_point): - onset_end = onset_point - 10 - offset_start = offset_point - 10 - - base = np.mean(cf_arr[(ct_arr >= onset_end) & (ct_arr < onset_point)]) - ground = cf_arr - base - - jar = np.mean(cf_arr[(ct_arr >= offset_start) & (ct_arr < offset_point)]) - - norm = ground / jar - return norm \ No newline at end of file diff --git a/second_try.py b/second_try.py index b4bcc59..174bd65 100644 --- a/second_try.py +++ b/second_try.py @@ -6,70 +6,74 @@ import numpy as np from IPython import embed from scipy.optimize import curve_fit from jar_functions import parse_dataset -from jar_functions import mean_noise_cut -from jar_functions import step_response -from jar_functions import JAR_eod -from jar_functions import base_eod +from jar_functions import parse_infodataset from jar_functions import mean_loops +from jar_functions import mean_noise_cut from jar_functions import norm_function +from jar_functions import step_response +datasets = [(os.path.join('D:\\jar_project\\JAR\\2020-06-22-ab\\beats-eod.dat')), + (os.path.join('D:\\jar_project\\JAR\\2020-06-22-ac\\beats-eod.dat'))] +infodatasets = [(os.path.join('D:\\jar_project\\JAR\\2020-06-22-ac\\info.dat'))] -datasets = [(os.path.join('D:\\jar_project\\JAR\\2020-06-22-ac\\beats-eod.dat'))] - -eodf = [] -deltaf = [] -stimulusf = [] time = [] -frequency_mean= [] -amplitude = [] +frequency_mean = [] + +constant_factors = [] +time_constants = [] start = -10 stop = 200 timespan = 210 +for infodataset in infodatasets: + i= parse_infodataset(infodataset) + identifier = i[0] for dataset in datasets: #input of the function - t, f, a, e, d, s = parse_dataset(dataset) - mf , tnew = mean_loops(start, stop, timespan, f, t) -embed() + frequency, time, amplitude, eodf, deltaf, stimulusf, duration = parse_dataset(dataset) + mf , tnew = mean_loops(start, stop, timespan, frequency, time) + dm = np.mean(duration) + frequency_mean.append(mf) + time.append(tnew) -for i in range(len(mf)): - for n in [500, 1000, 1500]: - cf, ct = mean_noise_cut(mf[i], time[i], n=n) +for i in range(len(frequency_mean)): + cf, ct = mean_noise_cut(frequency_mean[i], time[i], n=1000) - cf_arr = np.array(cf) - ct_arr = np.array(ct) + cf_arr = np.array(cf) + ct_arr = np.array(ct) - norm = norm_function(cf_arr, ct_arr, onset_point = 0, offset_point = 100) + norm = norm_function(cf_arr, ct_arr, onset_point = dm - dm, offset_point = dm) #dm-dm funktioniert nur wenn onset = 0 sec - plt.plot(ct_arr, norm, label='n=%d' % n) + plt.plot(ct_arr, norm) #, label='n=%d' % n) - #r_step = step_response(t=ct_arr, a1=0.58, a2=0.47, tau1=11.7, tau2=60) + sv, sc = curve_fit(step_response, ct_arr[ct_arr < 100], norm[ct_arr < 100]) #step_values and step_cov + a = sv[:2] + tau = np.array(sorted(sv[2:], reverse=False)) + values = np.array([a, tau]) + values_flat = values.flatten() - #plt.plot(ct_arr[ct_arr < 100], r_step[ct_arr < 100], label='fit: n=%d' % n) + plt.plot(ct_arr [ct_arr < 100], step_response(ct_arr, *sv)[ct_arr < 100], 'r-', label='fit: a1=%.2f, a2=%.2f, tau1=%.2f, tau2=%.2f' % tuple(values_flat)) - step_values, step_cov = curve_fit(step_response, ct_arr[ct_arr < 100], norm [ct_arr < 100]) + print('a1, a2, tau1, tau2', values_flat) + constant_factors.append(a) + time_constants.append(tau) - plt.plot(ct_arr [ct_arr < 100], step_response(ct_arr, *step_values)[ct_arr < 100], 'r-', label='fit: a1=%.2f, a2=%.2f, tau1=%.2f, tau2=%.2f' % tuple(step_values)) - print(step_values) const_line = plt.axhline(y=0.632) - -'plotting' plt.xlim([-10,220]) -#plt.ylim([400, 1000]) plt.xlabel('time [s]') plt.ylabel('rel. JAR magnitude') -#plt.title('fit_function(a1=0)') -#plt.savefig('fit_function(a1=0)') +plt.title('relative JAR') +plt.savefig('relative JAR') plt.legend(loc = 'lower right') plt.show() embed() -# noch mehr in funktionen reinhauen (quasi nur noch plotting und funktionen einlesen) -# zeitkonstanten nach groß und klein sortieren -# onset dauer auslesen -# ID aus info.dat auslesen + # alle daten einlesen durch große for schleife (auch average über alle fische?) # für einzelne fische fit kontrollieren +#Fragen: wie offset point wenn nicht start bei 0 sec? +#wie a1, tau1,.. ohne array? (funkt wegen dimensionen wenn ichs nochmal in liste appende) +