This commit is contained in:
xaver 2020-06-28 19:47:59 +02:00
parent 09b17a04eb
commit 25b2f43e71
3 changed files with 37 additions and 20 deletions

View File

@ -1,5 +1,5 @@
import os #compability with windows
from IPython import embed
def parse_dataset(dataset_name):
assert(os.path.exists(dataset_name)) #see if data exists
@ -7,15 +7,18 @@ def parse_dataset(dataset_name):
lines = f.readlines() #read data
f.close() #?
eodfs = [] #metadata lists for every loop
# metadata lists for every loop
eodfs = []
deltafs = []
stimulusfs = []
times = [] #data itself
# data itself
times = []
frequencies = []
amplitudes = []
time = [] #temporary lists with data we put in the lists above
# temporary lists with data we put in the lists above
time = []
ampl = []
freq = []
@ -29,13 +32,16 @@ def parse_dataset(dataset_name):
stimulusfs.append(float(l.split(':')[-1].strip()[:-2]))
if '#Key' in l:
#print('KEY')
if len(time) != 0: #therefore empty in the first round
times.append(time) #2nd loop means time != 0, so we put the times/amplitudes/frequencies to
amplitudes.append(ampl) #the data of the first loop
frequencies.append(freq)
time = [] #temporary lists with the data of the 2nd loop which we put to the lists above
ampl = []
time = [] #temporary lists to overwrite the lists with the same name we made before
ampl = [] #so they are empty again
freq = []
print(len(times))
if len(l) > 0 and l[0] is not '#': #line not empty and doesnt start with #
temporary = list(map(float, l.split())) #temporary list where we got 3 index splitted by spacebar, map to find them
@ -44,7 +50,16 @@ def parse_dataset(dataset_name):
ampl.append(temporary[2])
times.append(time) #append data from one list to another
amplitudes.append(ampl)
amplitudes.append(ampl) #these append the data from the first loop to the final lists, because we overwrite them (?)
frequencies.append(freq)
embed()
minimum = min(len(frequency[0]), len(frequency[1]))
f1 = frequencies[0][:minimum]
f2 = frequencies[1][:minimum]
#print(len(time))
print(len(times))
embed()
return times, frequencies, amplitudes, eodfs, deltafs, stimulusfs #output of the function

View File

@ -1,4 +1,5 @@
import numpy as np
"""
#second_try scratch
minimum = min(len(frequency[0]), len(frequency[1]))
f1 = frequency[0][:minimum]
@ -14,4 +15,10 @@ for i in range(len(minimum)):
for f in frequency:
print(np.mean(f))
# mean_f = np.mean(x) for x in zip(freqeuncies1, frequencies2)
# mean_f = np.mean(x) for x in zip(freqeuncies1, frequencies2)
"""
g = [1, 2]
h = [3, 4]
z = np.array([1, 2], [3, 4], dtype=object)

View File

@ -29,22 +29,17 @@ for dataset in datasets:
deltaf.append(d)
stimulusf.append(s)
embed()
#nächste Schritt: weitere Messungen einfügen und dann über alle Trials mitteln, evtl. normiert darstellen (frequency / baseline frequency?)?
#Zeitkonstante: von sec. 0 bis 63%? relative JAR
mean = np.mean(frequency, axis=0)
#embed()
#evtl. normiert darstellen (frequency / baseline frequency?)?
#Zeitkonstante: von sec. 0 bis 63%? relative JAR
'''
plt.plot(t, f)
plt.plot(time, frequency)
plt.xlabel('time [s]')
plt.ylabel('frequency [Hz]')
plt.xlim([-10,200])
plt.title('second try because first try was sold out')
plt.show()'''
plt.show()