This commit is contained in:
xaver 2020-09-10 11:17:54 +02:00
parent da1bc387b4
commit 03dc051388
7 changed files with 222 additions and 62 deletions

View File

@ -28,48 +28,87 @@ for ID in identifier:
dur = int(duration[0][0:2])
print(df)
# time, eod = nh.read_eod(datapath, duration = 2000) # anstatt dem import data mit tag manual jar - dann sollte onset wirklich bei 10 sec sein
data, pre_dat, dt = import_data_eigen(datapath)
# base with nh.read_eod
time, eod = nh.read_eod(datapath, duration = 2000) # anstatt dem import data mit tag manual jar - dann sollte onset wirklich bei 10 sec sein
zeropoints = get_time_zeros(time, eod, threshold=np.max(eod) * 0.1)
frequencies = 1 / np.diff(zeropoints)
window = np.ones(101) / 101
freq = np.convolve(frequencies, window, mode='same')
data, pre_data, dt = import_data_eigen(datapath)
# data
nfft = 2**17
spec, freqs, times = specgram(data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec = 10.0 * np.log10(spec) # in dB
power = dbspec[:, 50]
fish_p = power[(freqs > 200) & (freqs < 1000)]
fish_f = freqs[(freqs > 200) & (freqs < 1000)]
index = np.argmax(fish_p)
eodf = fish_f[index]
eodf4 = eodf * 4
lim0 = eodf4-20
lim1 = eodf4+20
df = freqs[1] - freqs[0]
ix0 = int(np.floor(lim0/df)) # back to index
ix1 = int(np.ceil(lim1/df)) # back to index
spec4 = dbspec[ix0:ix1, :]
freq4 = freqs[ix0:ix1]
jar4 = freq4[np.argmax(spec4, axis=0)] # all freqs at max specs over axis 0
jar = jar4 / 4
spec_0, freqs_0, times_0 = specgram(data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec_0 = 10.0 * np.log10(spec_0) # in dB
power_0 = dbspec_0[:, 25]
fish_p_0 = power_0[(freqs_0 > 200) & (freqs_0 < 1000)]
fish_f_0 = freqs_0[(freqs_0 > 200) & (freqs_0 < 1000)]
index_0 = np.argmax(fish_p_0)
eodf_0 = fish_f_0[index_0]
eodf4_0 = eodf_0 * 4
lim0_0 = eodf4_0-20
lim1_0 = eodf4_0+20
df_0= freqs_0[1] - freqs_0[0]
ix0_0 = int(np.floor(lim0_0/df_0)) # back to index
ix1_0 = int(np.ceil(lim1_0/df_0)) # back to index
spec4_0= dbspec_0[ix0_0:ix1_0, :]
freq4_0 = freqs_0[ix0_0:ix1_0]
jar4 = freq4_0[np.argmax(spec4_0, axis=0)] # all freqs at max specs over axis 0
jm = jar4 - np.mean(jar4) # data we take
cut_times = times[:len(jar4)]
cut_time_jar = times_0[:len(jar4)]
#plt.imshow(spec4_0, cmap='jet', origin='lower', extent=(times[0], times[-1], lim0, lim1), aspect='auto', vmin=-80, vmax=-10)
#plt.plot(cut_time_jar, jar4)
#plt.ylim(lim0_0, lim1_0)
# pre_data
nfft = 2 ** 17
spec_1, freqs_1, times_1 = specgram(pre_data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec_1 = 10.0 * np.log10(spec_1) # in dB
power_1 = dbspec_1[:, 25]
fish_p_1 = power_1[(freqs_1 > 200) & (freqs_1 < 1000)]
fish_f_1 = freqs_1[(freqs_1 > 200) & (freqs_1 < 1000)]
index1 = np.argmax(fish_p_1)
eodf_1 = fish_f_1[index1]
eodf4_1 = eodf_1 * 4
lim0_1 = eodf4_1 - 20
lim1_1 = eodf4_1 + 20
df_1 = freqs_1[1] - freqs_1[0]
ix0_1 = int(np.floor(lim0_1 / df_1)) # back to index
ix1_1 = int(np.ceil(lim1_1 / df_1)) # back to index
spec4_1 = dbspec_1[ix0_1:ix1_1, :]
freq4_1 = freqs_1[ix0_1:ix1_1]
base4 = freq4_1[np.argmax(spec4_1, axis=0)] # all freqs at max specs over axis 0
bm = base4 - np.mean(base4) # data we take
cut_time_base = times_1[:len(base4)] - times_1[-1]
#plt.imshow(spec4, cmap='jet', origin='lower', extent=(times[0], times[-1], lim0, lim1), aspect='auto', vmin=-80, vmax=-10)
plt.plot(cut_times, jm)
plt.plot(cut_times, savgol)
#plt.ylim(lim0, lim1)
plt.show()
#plt.plot(cut_time_base, base4)
j = []
for idx, i in enumerate(times_0):
if i > 45 and i < 55:
j.append(jm[idx])
# nicht unbedingt filtern, einfach wie unten median/mean
r = np.median(j) - np.median(bm)
'''
res_df = sorted(zip(deltaf,response))
deltaf.append(df[0])
response.append(r)
embed()
res_df = sorted(zip(deltaf,response))#
np.save('res_df_%s' %ID, res_df)
'''
np.save('res_df_%s_new' %ID, res_df)
# problem: rohdaten(data, pre_data) lassen sich auf grund ihrer 1D-array struktur nicht savgol filtern
# diese bekomm ich nur über specgram in form von freq / time auftragen, was nicht mehr savgol gefiltert werden kann

91
eigenmannia_jar_savgol.py Normal file
View File

@ -0,0 +1,91 @@
import matplotlib.pyplot as plt
import numpy as np
import os
import nix_helpers as nh
from IPython import embed
from matplotlib.mlab import specgram
# from tqdm import tqdm
from jar_functions import parse_stimuli_dat
from jar_functions import norm_function_eigen
from jar_functions import mean_noise_cut_eigen
from jar_functions import get_time_zeros
from jar_functions import import_data_eigen
from jar_functions import get_new_zero_crossings
from scipy.signal import savgol_filter
base_path = 'D:\\jar_project\\JAR\\eigenmannia'
identifier = ['2013eigen13', '2015eigen16', '2015eigen17', '2015eigen19', '2020eigen22', '2020eigen32']
response = []
deltaf = []
for ID in identifier:
for dataset in os.listdir(os.path.join(base_path, ID)):
datapath = os.path.join(base_path, ID, dataset, '%s.nix' % dataset)
print(datapath)
stimuli_dat = os.path.join(base_path, ID, dataset, 'manualjar-eod.dat')
df, duration = parse_stimuli_dat(stimuli_dat)
dur = int(duration[0][0:2])
print(df)
data, pre_data, dt = import_data_eigen(datapath)
# data
nfft = 2 ** 17
spec_0, freqs_0, times_0 = specgram(data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec_0 = 10.0 * np.log10(spec_0) # in dB
power_0 = dbspec_0[:, 25]
fish_p_0 = power_0[(freqs_0 > 200) & (freqs_0 < 1000)]
fish_f_0 = freqs_0[(freqs_0 > 200) & (freqs_0 < 1000)]
index_0 = np.argmax(fish_p_0)
eodf_0 = fish_f_0[index_0]
eodf4_0 = eodf_0 * 4
lim0_0 = eodf4_0 - 20
lim1_0 = eodf4_0 + 20
df_0 = freqs_0[1] - freqs_0[0]
ix0_0 = int(np.floor(lim0_0 / df_0)) # back to index
ix1_0 = int(np.ceil(lim1_0 / df_0)) # back to index
spec4_0 = dbspec_0[ix0_0:ix1_0, :]
freq4_0 = freqs_0[ix0_0:ix1_0]
jar4 = freq4_0[np.argmax(spec4_0, axis=0)] # all freqs at max specs over axis 0
jm = jar4 - np.mean(jar4) # data we take
cut_time_jar = times_0[:len(jar4)]
# pre_data: base with nh.read_eod
time, eod = nh.read_eod(datapath, duration=2000) # anstatt dem import data mit tag manual jar - dann sollte onset wirklich bei 10 sec sein
wl = int(0.001 / (time[1] - time[0]) + 1)
filtered_eod = savgol_filter(eod, wl, 5, deriv=0, delta=time[1] - time[0])
zero_line_threshold = np.mean(eod)
time_zero, zero_idx = get_new_zero_crossings(time, filtered_eod, threshold=zero_line_threshold)
eod_interval = np.diff(time_zero)
time_zero = time_zero[:-1]
center_eod_time = time_zero + 0.5 * eod_interval
frequencies = 1 / eod_interval
j = []
for idx, i in enumerate(times_0):
if i > 45 and i < 55:
j.append(jm[idx])
b = []
for idx, i in enumerate(time_zero):
if i < 10:
b.append(frequencies[idx])
bm = b - np.mean(b)
r = np.median(j) - np.median(bm)
embed()
res_df = sorted(zip(deltaf, response)) #
np.save('res_df_%s_new' % ID, res_df)
# problem: rohdaten(data, pre_data) lassen sich auf grund ihrer 1D-array struktur nicht savgol filtern
# diese bekomm ich nur über specgram in form von freq / time auftragen, was nicht mehr savgol gefiltert werden kann
# jedoch könnte ich trotzdem einfach aus jar4 response herauslesen wobei dies dann weniger gefiltert wäre

4
eigenmannia_response.py Normal file
View File

@ -0,0 +1,4 @@
import matplotlib.pyplot as plt
import numpy as np
import os
from IPython import embed

View File

@ -21,17 +21,24 @@ identifier = ['2018lepto1',
'2020lepto19',
'2020lepto20'
]
tau = []
f_c = []
for ID in identifier:
print(ID)
amf = np.load('amf_%s.npy' %ID)
gain = np.load('gain_%s.npy' %ID)
rms = np.load('rms_%s.npy' %ID)
thresh = np.load('thresh_%s.npy' % ID)
idx_arr = (rms < thresh) | (rms < np.mean(rms))
embed()
sinv, sinc = curve_fit(gain_curve_fit, amf[idx_arr], gain[idx_arr])
print(sinv[0])
'''fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(amf, gain, 'o')
ax.set_yscale('log')
ax.set_xscale('log')
plt.show()
'''
sinv, sinc = curve_fit(gain_curve_fit, amf, gain)
print('tau:', sinv[0])
tau.append(sinv[0])
f_cutoff = 1 / (2*np.pi*sinv[0])
print(f_cutoff)
print('f_cutoff:', f_cutoff)
f_c.append(f_cutoff)
#welche zeitkonstante ist das? was ist mit der zweiten?

View File

@ -222,6 +222,26 @@ def get_time_zeros (time, ampl, threshold = 0.0):
return zeropoints
def get_new_zero_crossings(time, ampl, threshold=1):
"""
Ermittelt die Zeitpunkte der Nullpunkte der EOD-Kurve
param time: Zeitachse der Datei
param eod: EOD-Kurve aus Datei
return zeropoints: Liste mit Nullpunkten der EOD-Kurve
"""
new_time = time[:-1]
new_amp = ampl[:-1]
zero_idx = np.where((ampl[:-1] <= threshold) & (ampl[1:] > threshold))[0]
dx = np.mean(np.diff(new_time))
zeropoints = new_time[zero_idx]
for index, zeit_index in enumerate(zero_idx): # Daten glätten
dy = ampl[zeit_index + 1] - ampl[zeit_index]
m = dy / dx
x = (threshold - ampl[zeit_index]) / m
zeropoints[index] += x
return zeropoints, zero_idx
def sort_values(values):
a = values[:2]
tau = np.array(sorted(values[2:], reverse=False))

View File

@ -7,7 +7,7 @@ from IPython import embed
identifier = ['2013eigen13', '2015eigen16', '2015eigen17', '2015eigen19', '2020eigen22', '2020eigen32']
for ID in identifier:
res_df = np.load('res_df_%s.npy' %ID)
res_df = np.load('res_df_%s_new.npy' %ID)
mres = []
mdf = []

View File

@ -11,20 +11,20 @@ from jar_functions import gain_curve_fit
def take_second(elem): # function for taking the names out of files
return elem[1]
identifier = ['2018lepto1',
'2018lepto4',
'2018lepto5',
'2018lepto76',
'2018lepto98',
'2019lepto03',
'2019lepto24',
'2019lepto27',
'2019lepto30',
identifier = [#'2018lepto1',
#'2018lepto4',
#'2018lepto5',
#'2018lepto76',
#'2018lepto98',
#'2019lepto03',
#'2019lepto24',
#'2019lepto27',
#'2019lepto30',
'2020lepto04',
'2020lepto06',
'2020lepto16',
'2020lepto19',
'2020lepto20'
#'2020lepto06',
#'2020lepto16',
#'2020lepto19',
#'2020lepto20'
]
for ident in identifier:
@ -158,10 +158,9 @@ for ident in identifier:
ax1.set_xlabel('envelope_frequency [Hz]')
ax1.set_ylabel('RMS [Hz]')
plt.legend()
#pylab.show()
pylab.show()
np.save('gain_%s' %ident, mgain_arr[idx_arr])
np.save('amf_%s' %ident, amfreq_arr[idx_arr])
#np.save('gain_%s' %ident, mgain_arr[idx_arr])
#np.save('amf_%s' %ident, amfreq_arr[idx_arr])
np.save('rms_%s' %ident, rootmeansquare_arr)
np.save('thresh_%s' %ident, threshold_arr)
embed()