This commit is contained in:
xaver 2020-09-16 17:01:35 +02:00
parent ea00a01fd9
commit a5a9cfbe73
13 changed files with 74 additions and 1167 deletions

View File

@ -0,0 +1,65 @@
import os
import numpy as np
from IPython import embed
import matplotlib.pyplot as plt
import nix_helpers as nh
from jar_functions import get_time_zeros
from jar_functions import parse_dataset
from jar_functions import mean_traces
from jar_functions import mean_noise_cut_eigen
base_path = 'D:\\jar_project\\JAR\\sin'
identifier = ['2018lepto1',
'2018lepto4',
'2018lepto5',
'2018lepto76',
'2018lepto98',
'2019lepto03',
'2019lepto24',
'2019lepto27',
'2019lepto30',
'2020lepto04',
'2020lepto06',
'2020lepto16',
'2020lepto19',
'2020lepto20'
]
for ID in identifier:
base = []
for dataset in os.listdir(os.path.join(base_path, ID)):
if dataset == 'prerecordings':
continue
datapath = os.path.join(base_path, ID, dataset, 'beats-eod.dat')
print(datapath)
try:
o = open(datapath)
except IOError:
continue
frequency, time, amplitude, eodf, deltaf, stimulusf, duration, pause = parse_dataset(datapath)
dm = np.mean(duration)
pm = np.mean(pause)
timespan = dm + pm
start = np.mean([t[0] for t in time])
stop = np.mean([t[-1] for t in time])
mf, tnew = mean_traces(start, stop, timespan, frequency, time) # maybe fixed timespan/sampling rate
cf, ct = mean_noise_cut_eigen(mf, tnew, 1250)
f = []
for idx, i in enumerate(ct):
if i > -45 and i < -5:
f.append(cf[idx])
ff = np.mean(f)
base.append(ff)
plt.plot(ct, cf)
plt.show()
base_eod = np.mean(base)
print(ID)
print(base_eod)
embed()

View File

@ -1,127 +0,0 @@
import matplotlib.pyplot as plt
import numpy as np
import os
import nix_helpers as nh
from IPython import embed
from matplotlib.mlab import specgram
#from tqdm import tqdm
from jar_functions import parse_stimuli_dat
from jar_functions import norm_function_eigen
from jar_functions import mean_noise_cut_eigen
from jar_functions import get_time_zeros
from jar_functions import import_data_eigen
from scipy.signal import savgol_filter
base_path = 'D:\\jar_project\\JAR\\eigenmannia\\deltaf'
identifier = ['2013eigen13', '2015eigen16', '2015eigen17', '2015eigen19', '2020eigen22', '2020eigen32']
response = []
deltaf = []
for ID in identifier:
for dataset in os.listdir(os.path.join(base_path, ID)):
datapath = os.path.join(base_path, ID, dataset, '%s.nix' % dataset)
print(datapath)
stimuli_dat = os.path.join(base_path, ID, dataset, 'manualjar-eod.dat')
df, duration = parse_stimuli_dat(stimuli_dat)
dur = int(duration[0][0:2])
print(df)
# base with nh.read_eod
time, eod = nh.read_eod(datapath, duration = 2000) # anstatt dem import data mit tag manual jar - dann sollte onset wirklich bei 10 sec sein
dt = time[1] - time[0]
nfft = 2 **17
spec_0, freqs_0, times_0 = specgram(eod, Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec_0 = 10.0 * np.log10(spec_0) # in dB
plt.imshow(dbspec_0, cmap='jet', origin='lower', extent=(times_0[0], times_0[-1], 0, 1500), aspect='auto',
vmin=-80, vmax=-10)
plt.show()
zeropoints = get_time_zeros(time, eod, threshold=np.max(eod) * 0.1)
frequencies = 1 / np.diff(zeropoints)
window = np.ones(101) / 101
freq = np.convolve(frequencies, window, mode='same')
data, pre_data, dt = import_data_eigen(datapath)
# data
nfft = 2**17
spec_0, freqs_0, times_0 = specgram(data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec_0 = 10.0 * np.log10(spec_0) # in dB
power_0 = dbspec_0[:, 25]
fish_p_0 = power_0[(freqs_0 > 200) & (freqs_0 < 1000)]
fish_f_0 = freqs_0[(freqs_0 > 200) & (freqs_0 < 1000)]
index_0 = np.argmax(fish_p_0)
eodf_0 = fish_f_0[index_0]
eodf4_0 = eodf_0 * 4
lim0_0 = eodf4_0-20
lim1_0 = eodf4_0+20
df_0= freqs_0[1] - freqs_0[0]
ix0_0 = int(np.floor(lim0_0/df_0)) # back to index
ix1_0 = int(np.ceil(lim1_0/df_0)) # back to index
spec4_0= dbspec_0[ix0_0:ix1_0, :]
freq4_0 = freqs_0[ix0_0:ix1_0]
jar4 = freq4_0[np.argmax(spec4_0, axis=0)] # all freqs at max specs over axis 0
jm = jar4 - np.mean(jar4) # data we take
cut_time_jar = times_0[:len(jar4)]
#plt.imshow(spec4, cmap='jet', origin='lower', extent=(times[0], times[-1], lim0, lim1), aspect='auto', vmin=-80,
#vmax=-10)
#plt.imshow(spec4_0, cmap='jet', origin='lower', extent=(times_0[0], times_0[-1], lim0_0, lim1_0), aspect='auto', vmin=-80, vmax=-10)
plt.plot(cut_time_jar, jm)
#plt.ylim(lim0_0, lim1_0)
# pre_data
nfft = 2 ** 17
spec_1, freqs_1, times_1 = specgram(pre_data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec_1 = 10.0 * np.log10(spec_1) # in dB
power_1 = dbspec_1[:, 25]
fish_p_1 = power_1[(freqs_1 > 200) & (freqs_1 < 500)]
fish_f_1 = freqs_1[(freqs_1 > 200) & (freqs_1 < 500)]
index1 = np.argmax(fish_p_1)
eodf_1 = fish_f_1[index1]
eodf4_1 = eodf_1 * 4
lim0_1 = eodf4_1 - 20
lim1_1 = eodf4_1 + 20
df_1 = freqs_1[1] - freqs_1[0]
ix0_1 = int(np.floor(lim0_1 / df_1)) # back to index
ix1_1 = int(np.ceil(lim1_1 / df_1)) # back to index
spec4_1 = dbspec_1[ix0_1:ix1_1, :]
freq4_1 = freqs_1[ix0_1:ix1_1]
base4 = freq4_1[np.argmax(spec4_1, axis=0)] # all freqs at max specs over axis 0
bm = base4 - np.mean(base4) # data we take
cut_time_base = times_1[:len(base4)] - times_1[-1]
plt.plot(cut_time_base, bm)
j = []
for idx, i in enumerate(times_0):
if i > 45 and i < 55:
j.append(jm[idx])
plt.plot(j)
plt.show()
r = np.median(j) - np.median(bm)
deltaf.append(df[0])
response.append(r)
embed()
res_df = sorted(zip(deltaf,response))#
np.save('res_df_%s_new' %ID, res_df)
# problem: rohdaten(data, pre_data) lassen sich auf grund ihrer 1D-array struktur nicht savgol filtern
# diese bekomm ich nur über specgram in form von freq / time auftragen, was nicht mehr savgol gefiltert werden kann
# jedoch könnte ich trotzdem einfach aus jar4 response herauslesen wobei dies dann weniger gefiltert wäre

View File

@ -1,91 +0,0 @@
import matplotlib.pyplot as plt
import numpy as np
import os
import nix_helpers as nh
from IPython import embed
from matplotlib.mlab import specgram
# from tqdm import tqdm
from jar_functions import parse_stimuli_dat
from jar_functions import norm_function_eigen
from jar_functions import mean_noise_cut_eigen
from jar_functions import get_time_zeros
from jar_functions import import_data_eigen
from jar_functions import get_new_zero_crossings
from scipy.signal import savgol_filter
base_path = 'D:\\jar_project\\JAR\\eigenmannia\\deltaf'
identifier = ['2013eigen13', '2015eigen16', '2015eigen17', '2015eigen19', '2020eigen22', '2020eigen32']
response = []
deltaf = []
for ID in identifier:
for dataset in os.listdir(os.path.join(base_path, ID)):
datapath = os.path.join(base_path, ID, dataset, '%s.nix' % dataset)
print(datapath)
stimuli_dat = os.path.join(base_path, ID, dataset, 'manualjar-eod.dat')
df, duration = parse_stimuli_dat(stimuli_dat)
dur = int(duration[0][0:2])
print(df)
data, pre_data, dt = import_data_eigen(datapath)
# data
nfft = 2 ** 17
spec_0, freqs_0, times_0 = specgram(data[0], Fs=1 / dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec_0 = 10.0 * np.log10(spec_0) # in dB
power_0 = dbspec_0[:, 25]
fish_p_0 = power_0[(freqs_0 > 200) & (freqs_0 < 1000)]
fish_f_0 = freqs_0[(freqs_0 > 200) & (freqs_0 < 1000)]
index_0 = np.argmax(fish_p_0)
eodf_0 = fish_f_0[index_0]
eodf4_0 = eodf_0 * 4
lim0_0 = eodf4_0 - 20
lim1_0 = eodf4_0 + 20
df_0 = freqs_0[1] - freqs_0[0]
ix0_0 = int(np.floor(lim0_0 / df_0)) # back to index
ix1_0 = int(np.ceil(lim1_0 / df_0)) # back to index
spec4_0 = dbspec_0[ix0_0:ix1_0, :]
freq4_0 = freqs_0[ix0_0:ix1_0]
jar4 = freq4_0[np.argmax(spec4_0, axis=0)] # all freqs at max specs over axis 0
jm = jar4 - np.mean(jar4) # data we take
cut_time_jar = times_0[:len(jar4)]
# pre_data: base with nh.read_eod
time, eod = nh.read_eod(datapath, duration=2000) # anstatt dem import data mit tag manual jar - dann sollte onset wirklich bei 10 sec sein
wl = int(0.001 / (time[1] - time[0]) + 1)
filtered_eod = savgol_filter(eod, wl, 5, deriv=0, delta=time[1] - time[0])
zero_line_threshold = np.mean(eod)
time_zero, zero_idx = get_new_zero_crossings(time, filtered_eod, threshold=zero_line_threshold)
eod_interval = np.diff(time_zero)
time_zero = time_zero[:-1]
center_eod_time = time_zero + 0.5 * eod_interval
frequencies = 1 / eod_interval
j = []
for idx, i in enumerate(times_0):
if i > 45 and i < 55:
j.append(jm[idx])
b = []
for idx, i in enumerate(time_zero):
if i < 10:
b.append(frequencies[idx])
bm = b - np.mean(b)
r = np.median(j) - np.median(bm)
embed()
res_df = sorted(zip(deltaf, response)) #
np.save('res_df_%s_new' % ID, res_df)
# problem: rohdaten(data, pre_data) lassen sich auf grund ihrer 1D-array struktur nicht savgol filtern
# diese bekomm ich nur über specgram in form von freq / time auftragen, was nicht mehr savgol gefiltert werden kann
# jedoch könnte ich trotzdem einfach aus jar4 response herauslesen wobei dies dann weniger gefiltert wäre

View File

@ -1,4 +0,0 @@
import matplotlib.pyplot as plt
import numpy as np
import os
from IPython import embed

View File

@ -1,48 +0,0 @@
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import pylab
from IPython import embed
from scipy.optimize import curve_fit
from jar_functions import gain_curve_fit
identifier = ['2018lepto1',
'2018lepto4',
'2018lepto5',
'2018lepto76',
'2018lepto98',
'2019lepto03',
'2019lepto24',
'2019lepto27',
'2019lepto30',
'2020lepto04',
'2020lepto06',
'2020lepto16',
'2020lepto19',
'2020lepto20'
]
tau = []
f_c = []
for ID in identifier:
print(ID)
amf = np.load('amf_%s.npy' %ID)
gain = np.load('gain_%s.npy' %ID)
sinv, sinc = curve_fit(gain_curve_fit, amf, gain)
print('tau:', sinv[0])
tau.append(sinv[0])
f_cutoff = 1 / (2*np.pi*sinv[0])
print('f_cutoff:', f_cutoff)
f_c.append(f_cutoff)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(amf, gain, 'o')
amff = np.logspace(-3, 0, 200)
ax.plot(amff, gain_curve_fit(amff, *sinv))
ax.set_yscale('log')
ax.set_xscale('log')
plt.show()
#welche zeitkonstante ist das? was ist mit der zweiten? --> eher zweite zeitkonstante obwohl werte so klein?

View File

@ -1,325 +0,0 @@
import os #compability with windows
from IPython import embed
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def step_response(t, a1, a2, tau1, tau2):
r_step = (a1*(1 - np.exp(-t/tau1))) + (a2*(1 - np.exp(-t/tau2)))
r_step[t<0] = 0
return r_step
def sin_response(t, f, p, A):
r_sin = A*np.sin(2*np.pi*t*f + p)
return r_sin
def gain_curve_fit(amf, tau, alpha):
gain = alpha / np.sqrt(1 + (2*np.pi*amf*tau)**2)
return gain
def parse_dataset(dataset_name):
assert(os.path.exists(dataset_name)) #see if data exists
f = open(dataset_name, 'r') #open data we gave in
lines = f.readlines() #read data
f.close() #?
# metadata lists for every loop
eodfs = []
deltafs = []
stimulusfs = []
duration = []
pause = []
# data itself
times = []
frequencies = []
amplitudes = []
# temporary lists with data we put in the lists above
time = []
ampl = []
freq = []
for i in range(len(lines)):
l = lines[i].strip() #all lines of textdata, exclude all empty lines (empty () default for spacebar)
if "#" in l and "EODf" in l: #if line starts with # EODf:
eodfs.append(float(l.split(':')[-1].strip()[:-2])) #append: line splitted by ':' the 2nd part ([-1],
if "#" in l and "Delta f" in l: #which got striped so we sure there is no space at the end,
deltafs.append(float(l.split(':')[-1].strip()[:-2])) #from that all expect the last two signs (Hz unit)
if "#" in l and "StimulusFrequency" in l: #this for different metadata in different lists
stimulusfs.append(float(l.split(':')[-1].strip()[:-2]))
if "#" in l and "Duration" in l:
duration.append(float(l.split(':')[-1].strip()[:-3]))
if "#" in l and "Pause" in l:
pause.append(float(l.split(':')[-1].strip()[:-3]))
if '#Key' in l:
if len(time) != 0: #therefore empty in the first round
times.append(np.array(time)) #2nd loop means time != 0, so we put the times/amplitudes/frequencies to
amplitudes.append(np.array(ampl)) #the data of the first loop
frequencies.append(np.array(freq))
time = [] #temporary lists to overwrite the lists with the same name we made before
ampl = [] #so they are empty again
freq = []
if len(l) > 0 and l[0] is not '#': #line not empty and doesnt start with #
temporary = list(map(float, l.split())) #temporary list where we got 3 index splitted by spacebar, map to find them
time.append(temporary[0]) #temporary lists with the data at that place, respectively
freq.append(temporary[1])
ampl.append(temporary[2])
times.append(np.array(time)) #append data from one list to another
amplitudes.append(np.array(ampl)) #these append the data from the first loop to the final lists, because we overwrite them (?)
frequencies.append(np.array(freq))
return frequencies, times, amplitudes, eodfs, deltafs, stimulusfs, duration, pause #output of the function
def parse_infodataset(dataset_name):
assert(os.path.exists(dataset_name)) #see if data exists
f = open(dataset_name, 'r') #open data we gave in
lines = f.readlines() #read data
f.close() #?
identifier = []
for i in range(len(lines)):
l = lines[i].strip() #all lines of textdata, exclude all empty lines (empty () default for spacebar)
if "#" in l and "Identifier" in l:
identifier.append((l.split(':')[-1].strip()))
return identifier
def parse_stimuli_dat(dataset_name):
assert (os.path.exists(dataset_name)) # see if data exists
f = open(dataset_name, 'r') # open data we gave in
lines = f.readlines() # read data
f.close() # ?
deltaf = []
duration = []
for i in range(len(lines)):
l = lines[i].strip() # all lines of textdata, exclude all empty lines (empty () default for spacebar)
if "#" in l and "Delta f" in l:
ll = (l.split(':')[-1].strip())
deltaf.append(float(ll.split('.')[0]))
if '#' in l and 'duration' in l:
duration.append((l.split(':')[-1].strip()))
return deltaf, duration
def mean_traces(start, stop, timespan, frequencies, time):
minimumt = min([len(time[k]) for k in range(len(time))])
tnew = np.arange(start, stop, timespan / minimumt)
frequency = np.zeros((len(frequencies), len(tnew)))
for k in range(len(frequencies)):
ft = time[k][frequencies[k] > -5]
fn = frequencies[k][frequencies[k] > -5]
frequency[k,:] = np.interp(tnew, ft, fn)
mf = np.mean(frequency, axis=0)
return mf, tnew
def mean_noise_cut_eigen(frequencies, time, n):
cutf = []
cutt = []
for k in np.arange(0, len(frequencies), n):
t = time[k]
f = np.mean(frequencies[k:k+n])
cutf.append(f)
cutt.append(t)
return cutf, cutt
def mean_noise_cut(frequencies, n):
cutf = np.zeros(len(frequencies))
for k in range(0, len(frequencies) - n):
kk = int(k)
f = np.mean(frequencies[kk:kk+n])
kkk = int(kk+n/2)
if k == 0:
cutf[:kkk] = f
cutf[kkk] = f
cutf[kkk:] = f
return cutf
def norm_function(f, t, onset_point, offset_point):
onset_end = onset_point - 10
offset_start = offset_point - 10
norm = []
for j in range(len(f)):
base = np.median(f[j][(t[j] >= onset_end) & (t[j] < onset_point)])
ground = f[j] - base
jar = np.median(ground[(t[j] >= offset_start) & (t[j] < offset_point)])
normed = ground / jar
norm.append(normed)
return norm, base, jar
def norm_function_eigen(f, t, onset_point, offset_point):
onset_end = onset_point - 10
offset_start = offset_point - 10
base = np.median(f[(t >= onset_end) & (t < onset_point)])
ground = f - base
jar = np.median(ground[(t >= offset_start) & (t < offset_point)])
norm = ground / jar
return norm
def base_eod(frequencies, time, onset_point):
base_eod = []
onset_end = onset_point - 10
base = np.median(frequencies[(time >= onset_end) & (time < onset_point)])
base_eod.append(base)
return base_eod
def JAR_eod(frequencies, time, offset_point):
jar_eod = []
offset_start = offset_point - 10
jar = np.median(frequencies[(time >= offset_start) & (time < offset_point)])
jar_eod.append(jar)
return jar_eod
def get_time_zeros (time, ampl, threshold = 0.0):
"""
Ermittelt die Zeitpunkte der Nullpunkte der EOD-Kurve
param time: Zeitachse der Datei
param eod: EOD-Kurve aus Datei
return zeropoints: Liste mit Nullpunkten der EOD-Kurve
"""
#Xavers gedöns
new_time = time[:-1]
if len(new_time) != (len(ampl[:-1]) | len(ampl[1:])):
new_time = time [:-2]
zeropoints = new_time[(ampl[:-1] >= threshold) & (ampl[1:] < threshold)]
dx = np.mean(np.diff(new_time))
for index in range(len(zeropoints)): # Daten glätten
zeit_index = int(zeropoints[index] / dx)
if ampl[zeit_index] < threshold:
dy = ampl[zeit_index + 1] - ampl[zeit_index]
else:
dy = ampl[zeit_index] - ampl[zeit_index - 1]
m = (dy / dx)
x = (threshold - ampl[zeit_index]) / m
zeropoints[index] += x
return zeropoints
def get_new_zero_crossings(time, ampl, threshold=1):
"""
Ermittelt die Zeitpunkte der Nullpunkte der EOD-Kurve
param time: Zeitachse der Datei
param eod: EOD-Kurve aus Datei
return zeropoints: Liste mit Nullpunkten der EOD-Kurve
"""
new_time = time[:-1]
new_amp = ampl[:-1]
zero_idx = np.where((ampl[:-1] <= threshold) & (ampl[1:] > threshold))[0]
dx = np.mean(np.diff(new_time))
zeropoints = new_time[zero_idx]
for index, zeit_index in enumerate(zero_idx): # Daten glätten
dy = ampl[zeit_index + 1] - ampl[zeit_index]
m = dy / dx
x = (threshold - ampl[zeit_index]) / m
zeropoints[index] += x
return zeropoints, zero_idx
def sort_values(values):
a = values[:2]
tau = np.array(sorted(values[2:], reverse=False))
values = np.array([a, tau])
values_flat = values.flatten()
return values_flat
def average(freq_all, time_all, start, stop, timespan, dm):
mf_all, tnew_all = mean_traces(start, stop, timespan, freq_all, time_all)
plt.plot(tnew_all, mf_all, color='b', label='average', ls='dashed')
# fit for average
sv_all, sc_all = curve_fit(step_response, tnew_all[tnew_all < dm], mf_all[tnew_all < dm],
bounds=(0.0, np.inf)) # step_values and step_cov
values_all = sort_values(sv_all)
plt.plot(tnew_all[tnew_all < dm], step_response(tnew_all, *sv_all)[tnew_all < dm], color = 'g', lw = 2,
label='average_fit: a1=%.2f, a2=%.2f, tau1=%.2f, tau2=%.2f' % tuple(values_all))
print('average: a1, a2, tau1, tau2', values_all)
return mf_all, tnew_all, values_all
def import_data(dataset):
import nixio as nix
nf = nix.File.open(dataset, nix.FileMode.ReadOnly)
b = nf.blocks[0]
eod = b.data_arrays['EOD-1']
dt = eod.dimensions[0].sampling_interval
di = int(50.0/dt)
t = b.tags['Beats_1']
amfreq = t.metadata['RePro-Info']['settings']['amfreq']
dat = []
pre_dat = []
for mt in b.multi_tags:
data = mt.retrieve_data(0, 'EOD-1')[:] # data[0]
dat.append(data)
i0 = int(mt.positions[0][0]/dt)
pre_data = eod[i0-di:i0]
pre_dat.append(pre_data)
return dat, pre_dat, dt
#nf.close()
def import_data_eigen(dataset):
import nixio as nix
nf = nix.File.open(dataset, nix.FileMode.ReadOnly)
b = nf.blocks[0]
eod = b.data_arrays['EOD-1']
dt = eod.dimensions[0].sampling_interval
di = int(10.0/dt)
t = b.tags['ManualJAR_1']
#amfreq = t.metadata['RePro-Info']['settings']['amfreq']
dat = []
pre_dat = []
for mt in b.multi_tags:
data = mt.retrieve_data(0, 'EOD-1')[:] # data[0]
dat.append(data)
i0 = int(mt.positions[0][0]/dt)
pre_data = eod[i0-di:i0]
pre_dat.append(pre_data)
return dat, pre_dat, dt
def import_amfreq(dataset):
import nixio as nix
nf = nix.File.open(dataset, nix.FileMode.ReadOnly)
b = nf.blocks[0]
eod = b.data_arrays['EOD-1']
dt = eod.dimensions[0].sampling_interval
di = int(50.0/dt)
t = b.tags['Beats_1']
amfreq = t.metadata['RePro-Info']['settings']['amfreq']
return amfreq
if __name__ == '__main__':
import_data(os.path.join('JAR', '2020-07-21-ak', '2020-07-21-ak.nix'))

14
notes
View File

@ -1,10 +1,14 @@
+ daten von natalie zu eigenmannia mit + / - delta f anschauen ob unterschiede
+ größe/gewicht/dominanz/temp in csv und über split aufteilen und mit ID verknüpfen oder mit pandar,
eod basefrequenz rausziehen, scatter plot gegen cutoff frequency, ...
+ cutoff frequencies rausziehen und zu gain_all plotten, dann punkte so aussortieren dass uniform
verteilt ist um zu zeigen wie metzen chacron zu dem ergebnis gekommen sind (hoffentlich)
dabei noch absolutwerte von cutoff und tau verwenden da wir wurzel in formel nehmen
+ specgram von pre_data neben specgram von data machen um zu sehen ob analyse fehler oder fehler in import_data
- cutoff - dominance score
- cutoff - basefrequency
- gain - dominance_score: für gain predict machen pro fish?
+ eigenmannia: specgram von pre_data neben specgram von data machen um zu sehen ob analyse fehler oder fehler in import_data
- erkenntnis: hab bei bm/jm nicht den gleichen mean abgezogen..
- an sich res_df besser, jedoch immer noch relativ variabel
- -2Hz bei meisten negative JAR?
- evtl. doch mean anstatt median für response am ende?
+ look at 5Hz data - compare
long term:
- extra datei mit script drin um fertige daten darzustellen, den fit-code nur zur datenverarbeitung verwenden

View File

@ -1,51 +0,0 @@
import matplotlib.pyplot as plt
import numpy as np
import os
import nix_helpers as nh
from IPython import embed
identifier = ['2013eigen13', '2015eigen16', '2015eigen17', '2015eigen19', '2020eigen22', '2020eigen32']
for ID in identifier:
res_df = np.load('res_df_%s.npy' %ID)
mres = []
mdf = []
currf = None
idxlist = []
for i, d in enumerate(res_df):
if currf is None or currf == d[0]:
currf = d[0]
idxlist.append(i)
else: # currf != f
meanres = [] # lists to make mean of
meandf = []
for x in idxlist:
meanres.append(res_df[x][1])
meandf.append(res_df[x][0])
meanedres = np.mean(meanres)
meaneddf = np.mean(meandf)
mres.append(meanedres)
mdf.append(meaneddf)
currf = d[0] # set back for next loop
idxlist = [i]
meanres = [] # lists to make mean of
meandf = []
for y in idxlist:
meanres.append(res_df[y][1])
meandf.append(res_df[y][0])
meanedres = np.mean(meanres)
meaneddf = np.mean(meandf)
mres.append(meanedres)
mdf.append(meaneddf)
plt.plot(mdf, mres, 'o')
plt.xlabel('deltaf [Hz]')
plt.ylabel('JAR_respones [Hz]')
plt.axhline(0, color='grey', lw =1)
plt.axvline(0, color='grey', lw = 1)
plt.title('JAR_response_to_deltaf_%s' %ID)
plt.show()

View File

@ -1,48 +0,0 @@
import os
import numpy as np
import matplotlib.pyplot as plt
from IPython import embed
from jar_functions import mean_noise_cut
from matplotlib.mlab import specgram
import DataLoader as dl
#print(np.logspace(-3, 1, 10))
'''for idx, dataset in enumerate(datasets):
datapath = os.path.join(base_path, dataset)
for info, key, time, data in dl.iload_traces(datapath, repro='Beats', before=0.0, after=0.0):
'''
'''base_path = 'D:\\jar_project\\JAR'
#nicht: -5Hz delta f, 19-aa, 22-ae, 22-ad (?)
datasets = [#'2020-06-19-aa', #-5Hz delta f, horrible fit
#'2020-06-19-ab', #-5Hz delta f, bad fit
#'2020-06-22-aa', #-5Hz delta f, bad fit
#'2020-06-22-ab', #-5Hz delta f, bad fit
#'2020-06-22-ac', #-15Hz delta f, good fit
#'2020-06-22-ad', #-15Hz delta f, horrible fit
#'2020-06-22-ae', #-15Hz delta f, horrible fit
'2020-06-22-af', #-15Hz delta f, good fit
#'2020-07-21-ak' #sin
]
for idx, dataset in enumerate(datasets):
datapath = os.path.join(base_path, dataset)
for info, key, time, data in dl.iload_traces(datapath, repro='Beats', before=0.0, after=0.0):
print(data[0])
dat = np.arange(100)
for d in range(int(len(data)/10)):
nfft = 2
spec, freqs, times = specgram(data[0][d*10:(d+1)*10], NFFT=nfft, noverlap=nfft*0.5)
#print(freqs)
#print(times)
embed()'''
g = [1.2917623576698833, -5.479055166593157, -2.689492238578325, -0.11604244418416806, -0.05353823781665627]
a = [0.2, 0.002, 0.02, 0.5, 1.0]
np.save('g.npy', g)
print(np.load('g.npy'))

View File

@ -1,79 +0,0 @@
import matplotlib.pyplot as plt
import numpy as np
import pylab
from IPython import embed
def avgNestedLists(nested_vals):
"""
Averages a 2-D array and returns a 1-D array of all of the columns
averaged together, regardless of their dimensions.
"""
output = []
maximum = 0
for lst in nested_vals:
if len(lst) > maximum:
maximum = len(lst)
for index in range(maximum): # Go through each index of longest list
temp = []
for lst in nested_vals: # Go through each list
if index < len(lst): # If not an index error
temp.append(lst[index])
output.append(np.nanmean(temp))
return output
identifier = ['2018lepto4',
'2018lepto1',
'2018lepto5',
'2018lepto76',
'2018lepto98',
'2019lepto03',
'2019lepto24',
'2019lepto27',
'2019lepto30',
'2020lepto04',
'2020lepto06',
'2020lepto16',
'2020lepto19',
'2020lepto20'
]
amf = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]
all = []
new_all = []
for ident in identifier:
data = np.load('gain_%s.npy' %ident)
max = np.max(data)
new_data = data / max
all.append(data)
new_all.append(new_data)
av = avgNestedLists(all)
new_av = avgNestedLists(new_all)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(amf, av, 'o', label = 'not normed')
ax.plot(amf, new_av, 'o', label = 'normed')
ax.legend()
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('gaincurve_average_allfish')
ax.set_ylabel('gain [Hz/(mV/cm)]')
ax.set_xlabel('envelope_frequency [Hz]')
plt.show()
embed()
'''len_arr = []
for a in all:
len_arr.append(len(a))
max_a = np.max(len_arr)
arr = np.ma.empty((1,len(all),max_a))
arr.mask = True
for x, a in enumerate(all):
arr[:a.shape[0],x] = arr[0][x]
embed()
print(arr.mean(axis = 2))
embed()'''

View File

@ -1,166 +0,0 @@
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import pylab
from IPython import embed
from scipy.optimize import curve_fit
from jar_functions import sin_response
from jar_functions import mean_noise_cut
from jar_functions import gain_curve_fit
def take_second(elem): # function for taking the names out of files
return elem[1]
identifier = [#'2018lepto1',
#'2018lepto4',
#'2018lepto5',
#'2018lepto76',
#'2018lepto98',
#'2019lepto03',
#'2019lepto24',
#'2019lepto27',
#'2019lepto30',
'2020lepto04',
#'2020lepto06',
#'2020lepto16',
#'2020lepto19',
#'2020lepto20'
]
for ident in identifier:
predict = []
rootmeansquare = []
threshold = []
gain = []
mgain = []
phaseshift = []
mphaseshift = []
amfreq = []
amf = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]
currf = None
idxlist = []
data = sorted(np.load('%s files.npy' %ident), key = take_second) # list with filenames in it
for i, d in enumerate(data):
dd = list(d)
jar = np.load('%s.npy' %dd) # load data for every file name
jm = jar - np.mean(jar) # low-pass filtering by subtracting mean
print(dd)
time = np.load('%s time.npy' %dd) # time file
dt = time[1] - time[0]
n = int(1/float(d[1])/dt)
cutf = mean_noise_cut(jm, n = n)
cutt = time
#plt.plot(time, jm-cutf, label='cut amfreq')
#plt.plot(time, jm, label='spec')
#plt.legend()
#plt.show()
sinv, sinc = curve_fit(sin_response, time, jm - cutf, [float(d[1]), 2, 0.5]) # fitting
print('frequency, phaseshift, amplitude:', sinv)
p = np.sqrt(sinv[1]**2)
A = np.sqrt(sinv[2] ** 2)
f = float(d[1])
phaseshift.append(p)
gain.append(A)
if f not in amfreq:
amfreq.append(f)
# root mean square
RMS = np.sqrt(np.mean(((jm - cutf) - sin_response(cutt, sinv[0], sinv[1], sinv[2]))**2))
thresh = A / np.sqrt(2)
#plt.plot(time, sin_response(time, *sinv), label='fit: f=%f, p=%.2f, A=%.2f' % tuple(sinv))
#plt.legend()
#plt.show()
# mean over same amfreqs for phase and gain
if currf is None or currf == d[1]:
currf = d[1]
idxlist.append(i)
else: # currf != f
meanf = [] # lists to make mean of
meanp = []
meanrms = []
meanthresh = []
for x in idxlist:
meanf.append(gain[x])
meanp.append(phaseshift[x])
meanrms.append(RMS)
meanthresh.append(thresh)
meanedf = np.mean(meanf)
meanedp = np.mean(meanp)
meanedrms = np.mean(meanrms)
meanedthresh = np.mean(meanthresh)
mgain.append(meanedf)
mphaseshift.append(meanedp)
rootmeansquare.append(meanedrms)
threshold.append(meanedthresh)
currf = d[1] # set back for next loop
idxlist = [i]
meanf = []
meanp = []
meanrms = []
meanthresh = []
for y in idxlist:
meanf.append(gain[y])
meanp.append(phaseshift[y])
meanrms.append(RMS)
meanthresh.append(thresh)
meanedf = np.mean(meanf)
meanedp = np.mean(meanp)
meanedrms = np.mean(meanrms)
meanedthresh = np.mean(meanthresh)
mgain.append(meanedf)
mphaseshift.append(meanedp)
rootmeansquare.append(meanedrms)
threshold.append(meanedthresh)
# predict of gain
for f in amf:
G = np.max(mgain) / np.sqrt(1 + (2*((np.pi*f*3.14)**2)))
predict.append(G)
# as arrays
mgain_arr = np.array(mgain)
amfreq_arr = np.array(amfreq)
rootmeansquare_arr = np.array(rootmeansquare)
threshold_arr = np.array(threshold)
# condition needed to be fulfilled: RMS < threshold or RMS < mean(RMS)
idx_arr = (rootmeansquare_arr < threshold_arr) | (rootmeansquare_arr < np.mean(rootmeansquare_arr))
fig = plt.figure()
ax0 = fig.add_subplot(2, 1, 1)
ax0.plot(amfreq_arr[idx_arr], mgain_arr[idx_arr], 'o')
#ax0.plot(amf, predict)
ax0.set_yscale('log')
ax0.set_xscale('log')
ax0.set_title('%s' % data[0][0])
ax0.set_ylabel('gain [Hz/(mV/cm)]')
ax0.set_xlabel('envelope_frequency [Hz]')
#plt.savefig('%s gain' % data[0][0])
ax1 = fig.add_subplot(2, 1, 2, sharex = ax0)
ax1.plot(amfreq, threshold, 'o-', label = 'threshold', color = 'b')
ax1.set_xscale('log')
ax1.plot(amfreq, rootmeansquare, 'o-', label = 'RMS', color ='orange')
ax1.set_xscale('log')
ax1.set_xlabel('envelope_frequency [Hz]')
ax1.set_ylabel('RMS [Hz]')
plt.legend()
pylab.show()
np.save('gain_%s' %ident, mgain_arr[idx_arr])
np.save('amf_%s' %ident, amfreq_arr[idx_arr])
embed()

View File

@ -1,108 +0,0 @@
import matplotlib.pyplot as plt
import matplotlib as cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib.mlab import specgram
import os
import glob
import IPython
import numpy as np
#import DataLoader as dl
from IPython import embed
#from tqdm import tqdm
from scipy.optimize import curve_fit
from jar_functions import step_response
from jar_functions import sin_response
from jar_functions import parse_dataset
from jar_functions import parse_infodataset
from jar_functions import mean_traces
from jar_functions import mean_noise_cut
from jar_functions import norm_function
from jar_functions import sort_values
from jar_functions import average
from jar_functions import import_data
from jar_functions import import_amfreq
base_path = 'D:\\jar_project\\JAR\\eigenmannia\\sin\\2015eigen8'
time_all = []
freq_all = []
amfrequencies = []
gains = []
files = []
for idx, dataset in enumerate(os.listdir(base_path)):
if dataset == 'prerecordings':
continue
datapath = os.path.join(base_path, dataset, '%s.nix' % dataset)
#print(datapath)
data, pre_data, dt = import_data(datapath)
nfft = 2**17
for d, dat in enumerate(data):
if len(dat) == 1:
print(datapath)
file_name = []
ID = []
# identifier for file_name
infodatapath = os.path.join(base_path, dataset, 'info.dat')
i = parse_infodataset(infodatapath)
identifier = i[0]
if not identifier[1:-2] in ID:
ID.append(identifier[1:-1])
# file_name
file_name.append(ID[0])
amfreq = import_amfreq(datapath)
print(amfreq)
file_name.append(str(amfreq))
file_name.append(str(d))
files.append(file_name)
# spectogram
if float(amfreq) < 0.01:
spec, freqs, times = specgram(dat, Fs=1/dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.8)
else:
spec, freqs, times = specgram(dat, Fs=1/dt, detrend='mean', NFFT=nfft, noverlap=nfft * 0.95)
dbspec = 10.0*np.log10(spec) # in dB
power = dbspec[:, 50]
fish_p = power[(freqs > 400) & (freqs < 1000)]
fish_f = freqs[(freqs > 400) & (freqs < 1000)]
index = np.argmax(fish_p)
eodf = fish_f[index]
eodf4 = eodf * 4
lim0 = eodf4-10
lim1 = eodf4+15
df = freqs[1] - freqs[0]
ix0 = int(np.floor(lim0/df)) # back to index
ix1 = int(np.ceil(lim1/df)) # back to index
spec4 = dbspec[ix0:ix1, :]
freq4 = freqs[ix0:ix1]
jar4 = freq4[np.argmax(spec4, axis=0)] # all freqs at max specs over axis 0
jar = jar4 / 4
jm = jar4 - np.mean(jar4) # data we take
cut_times = times[:len(jar4)]
plt.imshow(spec4, cmap='jet', origin='lower', extent=(times[0], times[-1], lim0, lim1), aspect='auto', vmin=-80, vmax=-10)
plt.plot(cut_times, jar4)
plt.show()
# save data
#np.save('%s time' % file_name, cut_times)
#np.save('%s' % file_name, jar4)
# save filenames for this fish
#np.save('%s files' %ID[0], files)
embed()

View File

@ -1,115 +0,0 @@
import matplotlib.pyplot as plt
import matplotlib as cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import os
import glob
import IPython
import numpy as np
from IPython import embed
from scipy.optimize import curve_fit
from jar_functions import parse_dataset
from jar_functions import parse_infodataset
from jar_functions import mean_traces
from jar_functions import mean_noise_cut
from jar_functions import norm_function
from jar_functions import step_response
from jar_functions import sort_values
from jar_functions import average
base_path = 'D:\\jar_project\\JAR\\step\\step_2018lepto98'
#nicht: -5Hz delta f, 19-aa, 22-ae, 22-ad (?)
datasets = [#'2020-06-19-aa', #-5Hz delta f, horrible fit
#'2020-06-19-ab', #-5Hz delta f, bad fit
#'2020-06-22-aa', #-5Hz delta f, bad fit
#'2020-06-22-ab', #-5Hz delta f, bad fit
#'2020-06-22-ac', #-15Hz delta f, good fit
#'2020-06-22-ad', #-15Hz delta f, horrible fit
#'2020-06-22-ae', #-15Hz delta f, horrible fit
#'2020-06-22-af', #-15Hz delta f, good fit
'2020-07-13-ad',
'2020-07-13-ae',
'2020-07-13-af',
'2020-07-13-ag',
'2020-07-13-ah',
'2020-07-13-ai',
'2020-07-13-aj',
#'2020-07-13-ak',
#'2020-07-13-al',
'2020-07-13-am',
#'2020-07-13-an',
#'2020-07-13-ao'
]
#dat = glob.glob('D:\\jar_project\\JAR\\2020*\\beats-eod.dat')
#infodat = glob.glob('D:\\jar_project\\JAR\\2020*\\info.dat')
time_all = []
freq_all = []
ID = []
col = ['dimgrey', 'grey', 'darkgrey', 'silver', 'lightgrey', 'gainsboro', 'whitesmoke']
labels = zip(ID, datasets)
for infodataset in datasets:
infodataset = os.path.join(base_path, infodataset, 'info.dat')
i = parse_infodataset(infodataset)
identifier = i[0]
ID.append(identifier)
for idx, dataset in enumerate(datasets):
dataset = os.path.join(base_path, dataset, 'beats-eod.dat')
#input of the function
frequency, time, amplitude, eodf, deltaf, stimulusf, duration, pause = parse_dataset(dataset)
dm = np.mean(duration)
pm = np.mean(pause)
timespan = dm + pm
start = np.mean([t[0] for t in time])
stop = np.mean([t[-1] for t in time])
norm = norm_function(frequency, time, onset_point=dm - dm, offset_point=dm) # dm-dm funktioniert nur wenn onset = 0 sec
mf, tnew = mean_traces(start, stop, timespan, norm, time) # maybe fixed timespan/sampling rate
cf, ct = mean_noise_cut(mf, n=1250)
cf_arr = np.array(cf)
ct_arr = np.array(ct)
freq_all.append(cf_arr)
time_all.append(ct_arr)
plt.plot(ct_arr, cf_arr, label='fish=%s' % datasets[idx]) #, color = col[idx]
sv, sc = curve_fit(step_response, ct_arr[ct_arr < dm], cf_arr[ct_arr < dm], [1.0, 1.0, 5.0, 50.0], bounds=(0.0, np.inf)) # step_values and step_cov
# sorted a and tau
values = sort_values(sv)
# fit for each trace
#plt.plot(ct_arr[ct_arr < dm], step_response(ct_arr[ct_arr < dm], *sv), label='fit: a1=%.2f, a2=%.2f, tau1=%.2f, tau2=%.2f' % tuple(values))
plt.plot(ft, step_response(ft, *sv), color='orange', label='fit: a1=%.2f, a2=%.2f, tau1=%.2f, tau2=%.2f' % tuple(values))
print('fish: a1, a2, tau1, tau2', values)
# average over all fish
mf_all, tnew_all, values_all = average(freq_all, time_all, start, stop, timespan, dm)
#const_line = plt.axhline(y = 0.632)
stimulus_duration = plt.hlines(y = -0.25, xmin = 0, xmax = 100, color = 'r', label = 'stimulus_duration')
base_line = plt.axhline(y = 0, color = 'black', ls = 'dotted', linewidth = '1')
plt.xlim([-10,220])
plt.xlabel('time [s]')
plt.ylabel('rel. JAR magnitude')
plt.title('relative JAR')
plt.savefig('relative JAR')
plt.legend(loc = 'lower right')
plt.show()
embed()
# natalie fragen ob sie bei verschiedenen Amplituden messen kann (siehe tim)