This commit is contained in:
xaver 2020-07-24 14:30:37 +02:00
parent 3f68ac8c1a
commit 0d550e2a5d
4 changed files with 161 additions and 32 deletions

View File

@ -169,3 +169,119 @@ def average(freq_all, time_all, start, stop, timespan, dm):
print('average: a1, a2, tau1, tau2', values_all)
return mf_all, tnew_all, values_all
def iload_traces(basedir, repro='', before=0.0, after=0.0):
"""
returns:
info : metadata from stimuli.dat
key : key from stimuli.dat
time : an array for the time axis
data : the data of all traces of a single trial
"""
p = re.compile('([-+]?\d*\.\d+|\d+)\s*(\w+)')
reproid = 'RePro'
deltat = None
# open traces files:
sf = []
for trace in range(1, 1000000):
if path.isfile('%s/trace-%i.raw' % (basedir, trace)):
sf.append(open('%s/trace-%i.raw' % (basedir, trace), 'rb'))
else:
break
basecols = None
baserp = True
for info, key, dat in iload('%s\\stimuli.dat' % (basedir,)):
if deltat is None:
deltat, tunit = p.match(info[0]['sample interval%i' % 1]).groups()
deltat = float(deltat)
if tunit == 'ms':
deltat *= 0.001
if 'repro' in info[-1] or 'RePro' in info[-1]:
if not reproid in info[-1]:
reproid = 'repro'
if len(repro) > 0 and repro != info[-1][reproid] and \
basecols is None:
continue
baserp = (info[-1][reproid] == 'BaselineActivity')
duration_indices = [i for i, x in enumerate(key[2]) if x == "duration"]
else:
baserp = True
duration_indices = []
if dat.shape == (1, 1) and dat[0, 0] == 0:
warnings.warn("iload_traces: Encountered incomplete '-0' trial.")
yield info, key, array([])
continue
if baserp:
basecols = []
basekey = key
baseinfo = info
if len(dat) == 0:
for trace in range(len(sf)):
basecols.append(0)
for d in dat:
if not baserp and not basecols is None:
x = []
xl = []
for trace in range(len(sf)):
col = int(d[trace])
sf[trace].seek(basecols[trace] * 4)
buffer = sf[trace].read((col - basecols[trace]) * 4)
tmp = fromstring(buffer, float32)
x.append(tmp)
xl.append(len(tmp))
ml = min(xl)
for k in range(len(x)):
if len(x[k]) > ml:
warnings.warn("trunkated trace %d to %d" % (k, ml))
x[k] = x[k][:ml]
xtime = arange(0.0, len(x[0])) * deltat - before
yield baseinfo, basekey, xtime, asarray(x)
basecols = None
if len(repro) > 0 and repro != info[-1][reproid]:
break
durations = [d[i] for i in duration_indices if not isnan(d[i])]
if not baserp:
if len(durations) > 0:
duration = max(durations)
if duration < 0.001: # if the duration is less than 1ms
warnings.warn(
"iload_traces: Skipping one trial because its duration is <1ms and therefore it is probably rubbish")
continue
l = int(before / deltat)
r = int((duration + after) / deltat)
else:
continue
x = []
xl = []
for trace in range(len(sf)):
col = int(d[trace])
if baserp:
if col < 0:
col = 0
basecols.append(col)
continue
sf[trace].seek((col - l) * 4)
buffer = sf[trace].read((l + r) * 4)
tmp = fromstring(buffer, float32)
x.append(tmp)
xl.append(len(tmp))
if baserp:
break
ml = min(xl)
for k in range(len(x)):
if len(x[k]) > ml:
warnings.warn("trunkated trace %d to %d" % (k, ml))
x[k] = x[k][:ml]
time = arange(0.0, len(x[0])) * deltat - before
yield info, key, time, asarray(x)
for trace in range(len(sf)):
sf[trace].close()

View File

@ -36,10 +36,13 @@ mean1 = np.mean(z, axis=1)
print(mean0)
print(mean1)
'''
''''''
t = [600, 650]
x = 1 - np.exp(t / 11)
print(x)
a, b ,c,d = normalized_JAR(fre)
a, b ,c,d = normalized_JAR(fre)
'''
print(np.logspace(-3, 1, 10))
embed()

View File

@ -23,9 +23,9 @@ datasets = [#'2020-06-19-aa', #-5Hz delta f, horrible fit
#'2020-06-19-ab', #-5Hz delta f, bad fit
#'2020-06-22-aa', #-5Hz delta f, bad fit
#'2020-06-22-ab', #-5Hz delta f, bad fit
'2020-06-22-ac', #-15Hz delta f, good fit
'2020-06-22-ad', #-15Hz delta f, horrible fit
'2020-06-22-ae', #-15Hz delta f, horrible fit
#'2020-06-22-ac', #-15Hz delta f, good fit
#'2020-06-22-ad', #-15Hz delta f, horrible fit
#'2020-06-22-ae', #-15Hz delta f, horrible fit
'2020-06-22-af' #-15Hz delta f, good fit
]
@ -58,7 +58,7 @@ for idx, dataset in enumerate(datasets):
norm = norm_function(frequency, time, onset_point=dm - dm, offset_point=dm) # dm-dm funktioniert nur wenn onset = 0 sec
mf , tnew = mean_traces(start, stop, timespan, norm, time) # maybe fixed timespan/sampling rate
mf, tnew = mean_traces(start, stop, timespan, norm, time) # maybe fixed timespan/sampling rate
cf, ct = mean_noise_cut(mf, tnew, n=1250)

View File

@ -25,10 +25,11 @@ datasets = [#'2020-06-19-aa', #-5Hz delta f, horrible fit
#'2020-06-19-ab', #-5Hz delta f, bad fit
#'2020-06-22-aa', #-5Hz delta f, bad fit
#'2020-06-22-ab', #-5Hz delta f, bad fit
'2020-06-22-ac', #-15Hz delta f, good fit
#'2020-06-22-ad', #-15Hz delta f, horrible fit
#'2020-06-22-ae', #-15Hz delta f, horrible fit
#'2020-06-22-af' #-15Hz delta f, good fit
#'2020-06-22-ac', #-15Hz delta f, good fit
#'2020-06-22-ad', #-15Hz delta f, horrible fit
#'2020-06-22-ae', #-15Hz delta f, horrible fit
#'2020-06-22-af', #-15Hz delta f, good fit
'2020-07-21-ak' #sin
]
#dat = glob.glob('D:\\jar_project\\JAR\\2020*\\beats-eod.dat')
@ -51,33 +52,42 @@ for infodataset in datasets:
for idx, dataset in enumerate(datasets):
datapath = os.path.join(base_path, dataset)
for info, key, time, data in dl.iload_traces(datapath, repro='Beats', before=0.0, after=0.0):
print( info[1]['RePro'] )
print(data.shape)
#plt.plot(time, data[0]) # V(t)
#plt.show()
nfft = 50000
nfft1 = 5000
spec, freqs, times = specgram(data[0], Fs=1.0/(time[1]-time[0]), detrend='mean', NFFT=nfft, noverlap=nfft//2)
spec1, freqs1, times1 = specgram(data[0], Fs=1.0 / (time[1] - time[0]), detrend='mean', NFFT=nfft1, noverlap=nfft1 //2)
nfft = 2**18
spec, freqs, times = specgram(data[0], Fs=1.0/(time[1]-time[0]), detrend='mean', NFFT=nfft, noverlap=nfft*0.8)
dbspec = 10.0*np.log10(spec) # in dB
dbspec1 = 10.0 * np.log10(spec1) # in dB
print(np.min(dbspec), np.max(dbspec))
#plt.imshow(dbspec, cmap='jet', origin='lower', extent=(times[0], times[-1], freqs[0], freqs[-1]), aspect='auto', vmin=-80, vmax=-30)
plt.imshow(dbspec1, cmap='jet', origin='lower', extent=(times1[0], times1[-1], freqs1[0], freqs1[-1]), aspect='auto', vmin=-80, vmax=-30 )
# interpolation, vmin, vmax
# plot decibel as function of frequency for one time slot: wieso auflösung von frequenzen schlechter wenn nfft hoch?
# wird frequenzauflösung besser bei höherem nfft, auch da bei nfft hoch df klein und somit hohe auflösung?
# zeitliche auflösung schlechter mit größerem nfft
#for k in range(len(times)):
#plt.plot(freqs, dbspec[:,100], label = '0')
#plt.plot(freqs1, dbspec1[:, 100], label = '1')
power = dbspec[:, 10]
fish_p = power[(freqs > 400) & (freqs < 1000)]
fish_f = freqs[(freqs > 400) & (freqs < 1000)]
index = np.argmax(fish_p)
eodf = fish_f[index]
eodf4 = eodf * 4
print(eodf4)
lim0 = eodf4-20
lim1 = eodf4+20
plt.imshow(dbspec, cmap='jet', origin='lower', extent=(times[0], times[-1], freqs[0], freqs[-1]), aspect='auto', vmin=-80, vmax=-30)
# control of plt.imshow
df = freqs[1] - freqs[0]
df1 = freqs1[1] - freqs1[0]
print(df)
print(df1)
plt.legend()
ix = int(np.round(eodf4/df))
ix0 = int(np.round(lim0/df))
ix1 = int(np.round(lim1/df))
spec4 = dbspec[ix0:ix1, :]
freq4 = freqs[ix0:ix1]
jar4 = freq4[np.argmax(spec4, axis=0)]
jar = jar4 / 4
plt.plot(times, jar4, '-k')
#plt.plot(freqs, dbspec[:,100], label = '0')
#plt.legend()
plt.ylim(lim0, lim1)
plt.show()
embed()