76 lines
2.7 KiB
Python
76 lines
2.7 KiB
Python
import matplotlib.pyplot as plt
|
|
import numpy as np
|
|
import os
|
|
import nix_helpers as nh
|
|
from IPython import embed
|
|
from matplotlib.mlab import specgram
|
|
#from tqdm import tqdm
|
|
from jar_functions import parse_stimuli_dat
|
|
from jar_functions import norm_function_eigen
|
|
from jar_functions import mean_noise_cut_eigen
|
|
from jar_functions import get_time_zeros
|
|
from jar_functions import import_data_eigen
|
|
from scipy.signal import savgol_filter
|
|
|
|
base_path = 'D:\\jar_project\\JAR\\eigenmannia'
|
|
|
|
identifier = ['2013eigen13', '2015eigen16', '2015eigen17', '2015eigen19', '2020eigen22', '2020eigen32']
|
|
|
|
response = []
|
|
deltaf = []
|
|
for ID in identifier:
|
|
for dataset in os.listdir(os.path.join(base_path, ID)):
|
|
datapath = os.path.join(base_path, ID, dataset, '%s.nix' % dataset)
|
|
print(datapath)
|
|
stimuli_dat = os.path.join(base_path, ID, dataset, 'manualjar-eod.dat')
|
|
|
|
df, duration = parse_stimuli_dat(stimuli_dat)
|
|
dur = int(duration[0][0:2])
|
|
print(df)
|
|
|
|
# time, eod = nh.read_eod(datapath, duration = 2000) # anstatt dem import data mit tag manual jar - dann sollte onset wirklich bei 10 sec sein
|
|
data, pre_dat, dt = import_data_eigen(datapath)
|
|
|
|
nfft = 2**17
|
|
spec, freqs, times = specgram(data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
|
|
dbspec = 10.0 * np.log10(spec) # in dB
|
|
power = dbspec[:, 50]
|
|
|
|
fish_p = power[(freqs > 200) & (freqs < 1000)]
|
|
fish_f = freqs[(freqs > 200) & (freqs < 1000)]
|
|
|
|
index = np.argmax(fish_p)
|
|
eodf = fish_f[index]
|
|
eodf4 = eodf * 4
|
|
|
|
lim0 = eodf4-20
|
|
lim1 = eodf4+20
|
|
|
|
df = freqs[1] - freqs[0]
|
|
ix0 = int(np.floor(lim0/df)) # back to index
|
|
ix1 = int(np.ceil(lim1/df)) # back to index
|
|
spec4 = dbspec[ix0:ix1, :]
|
|
freq4 = freqs[ix0:ix1]
|
|
jar4 = freq4[np.argmax(spec4, axis=0)] # all freqs at max specs over axis 0
|
|
jar = jar4 / 4
|
|
jm = jar4 - np.mean(jar4) # data we take
|
|
cut_times = times[:len(jar4)]
|
|
|
|
#plt.imshow(spec4, cmap='jet', origin='lower', extent=(times[0], times[-1], lim0, lim1), aspect='auto', vmin=-80, vmax=-10)
|
|
plt.plot(cut_times, jm)
|
|
plt.plot(cut_times, savgol)
|
|
#plt.ylim(lim0, lim1)
|
|
plt.show()
|
|
|
|
|
|
# nicht unbedingt filtern, einfach wie unten median/mean
|
|
|
|
'''
|
|
res_df = sorted(zip(deltaf,response))
|
|
|
|
np.save('res_df_%s' %ID, res_df)
|
|
'''
|
|
|
|
# problem: rohdaten(data, pre_data) lassen sich auf grund ihrer 1D-array struktur nicht savgol filtern
|
|
# diese bekomm ich nur über specgram in form von freq / time auftragen, was nicht mehr savgol gefiltert werden kann
|
|
# jedoch könnte ich trotzdem einfach aus jar4 response herauslesen wobei dies dann weniger gefiltert wäre |