jar_project/jar_functions.py
2020-06-29 16:38:20 +02:00

72 lines
3.2 KiB
Python

import os #compability with windows
from IPython import embed
def parse_dataset(dataset_name):
assert(os.path.exists(dataset_name)) #see if data exists
f = open(dataset_name, 'r') #open data we gave in
lines = f.readlines() #read data
f.close() #?
# metadata lists for every loop
eodfs = []
deltafs = []
stimulusfs = []
# data itself
times = []
frequencies = []
amplitudes = []
# temporary lists with data we put in the lists above
time = []
ampl = []
freq = []
for i in range(len(lines)):
l = lines[i].strip() #all lines of textdata, exclude all empty lines (empty () default for spacebar)
if "#" in l and "EODf" in l: #if line starts with # EODf:
eodfs.append(float(l.split(':')[-1].strip()[:-2])) #append: line splitted by ':' the 2nd part ([-1],
if "#" in l and "Delta f" in l: #which got striped so we sure there is no space at the end,
deltafs.append(float(l.split(':')[-1].strip()[:-2])) #from that all expect the last two signs (Hz unit)
if "#" in l and "StimulusFrequency" in l: #this for different metadata in different lists
stimulusfs.append(float(l.split(':')[-1].strip()[:-2]))
if '#Key' in l:
#print('KEY')
if len(time) != 0: #therefore empty in the first round
times.append(time) #2nd loop means time != 0, so we put the times/amplitudes/frequencies to
amplitudes.append(ampl) #the data of the first loop
frequencies.append(freq)
time = [] #temporary lists to overwrite the lists with the same name we made before
ampl = [] #so they are empty again
freq = []
print(len(times))
if len(l) > 0 and l[0] is not '#': #line not empty and doesnt start with #
temporary = list(map(float, l.split())) #temporary list where we got 3 index splitted by spacebar, map to find them
time.append(temporary[0]) #temporary lists with the data at that place, respectively
freq.append(temporary[1])
ampl.append(temporary[2])
times.append(time) #append data from one list to another
amplitudes.append(ampl) #these append the data from the first loop to the final lists, because we overwrite them (?)
frequencies.append(freq)
return times, frequencies, amplitudes, eodfs, deltafs, stimulusfs #output of the function
def noise_reduce(dataset_name):
assert (os.path.exists(dataset_name)) # see if data exists
f = open(dataset_name, 'r') # open data we gave in
lines = f.readlines() # read data
f.close()
n = 10
cutf = []
for i in np.arange(0, len(dataset_name), n): #dataset_name sollte Frequenzen sein?
mean = np.mean(dataset_name[i:i+n]) #sollte nach i+n weitergehen?
cutf.append(mean)
return cutf