25.06
This commit is contained in:
		
							parent
							
								
									8b97b8f304
								
							
						
					
					
						commit
						539cc9ed09
					
				| @ -1,24 +1,50 @@ | ||||
| import os | ||||
| import os   #compability with windows | ||||
| 
 | ||||
| 
 | ||||
| def parse_dataset(dataset_name): | ||||
|     assert(os.path.exists(dataset_name)) | ||||
|     f = open(dataset_name, 'r') | ||||
|     lines = f.readlines() | ||||
|     f.close() | ||||
|     assert(os.path.exists(dataset_name))        #see if data exists | ||||
|     f = open(dataset_name, 'r')                 #open data we gave in | ||||
|     lines = f.readlines()                       #read data | ||||
|     f.close()                                   #? | ||||
| 
 | ||||
|     time = [] | ||||
|     frequency = [] | ||||
|     amplitude = [] | ||||
|     eodfs = []          #metadata lists for every loop | ||||
|     deltafs = [] | ||||
|     stimulusfs = [] | ||||
| 
 | ||||
|     times = []          #data itself | ||||
|     frequencies = [] | ||||
|     amplitudes = [] | ||||
| 
 | ||||
|     time = []           #temporary lists with data we put in the lists above | ||||
|     ampl = [] | ||||
|     freq = [] | ||||
| 
 | ||||
|     for i in range(len(lines)): | ||||
|         l = lines[i].strip() | ||||
|         l = lines[i].strip()                                        #all lines of textdata, exclude all empty lines (empty () default for spacebar) | ||||
|         if "#" in l and "EODf" in l:                                #if line starts with # EODf: | ||||
|             eodfs.append(float(l.split(':')[-1].strip()[:-2]))      #append: line splitted by ':' the 2nd part ([-1], | ||||
|         if "#" in l and "Delta f" in l:                             #which got striped so we sure there is no space at the end, | ||||
|             deltafs.append(float(l.split(':')[-1].strip()[:-2]))    #from that all expect the last two signs (Hz unit) | ||||
|         if "#" in l and "StimulusFrequency" in l:                   #this for different metadata in different lists | ||||
|             stimulusfs.append(float(l.split(':')[-1].strip()[:-2])) | ||||
| 
 | ||||
|         if len(l) > 0 and l[0] is not '#': | ||||
|             temp = list(map(float, l.split())) | ||||
|         if '#Key' in l: | ||||
|             if len(time) != 0:              #therefore empty in the first round | ||||
|                 times.append(time)          #2nd loop means time != 0, so we put the times/amplitudes/frequencies to | ||||
|                 amplitudes.append(ampl)     #the data of the first loop | ||||
|                 frequencies.append(freq) | ||||
|             time = []                       #temporary lists with the data of the 2nd loop which we put to the lists above | ||||
|             ampl = [] | ||||
|             freq = [] | ||||
| 
 | ||||
|             time.append(temp[0]) | ||||
|             frequency.append(temp[1]) | ||||
|             amplitude.append(temp[2]) | ||||
|         if len(l) > 0 and l[0] is not '#':              #line not empty and doesnt start with # | ||||
|             temporary = list(map(float, l.split()))     #temporary list where we got 3 index splitted by spacebar, map to find them | ||||
|             time.append(temporary[0])                   #temporary lists with the data at that place, respectively | ||||
|             freq.append(temporary[1]) | ||||
|             ampl.append(temporary[2]) | ||||
| 
 | ||||
|     return time, frequency, amplitude | ||||
|     times.append(time)          #append data from one list to another | ||||
|     amplitudes.append(ampl) | ||||
|     frequencies.append(freq) | ||||
| 
 | ||||
|     return times, frequencies, amplitudes, eodfs, deltafs, stimulusfs       #output of the function | ||||
| @ -7,15 +7,44 @@ from IPython import embed | ||||
| from jar_functions import parse_dataset | ||||
| 
 | ||||
| 
 | ||||
| dataset = os.path.join('D:\\', 'jar_project', 'JAR', '2020-06-22-ac', 'beats-eod.dat') | ||||
| 
 | ||||
| t, f, a = parse_dataset(dataset) | ||||
| 
 | ||||
| avg_frequency = np.mean(f) | ||||
| print(avg_frequency) | ||||
| 
 | ||||
| datasets = [(os.path.join('D:\\jar_project\\JAR\\2020-06-22-ab\\beats-eod.dat'))] | ||||
| #           (os.path.join('D:\\jar_project\\JAR\\2020-06-22-ac\\beats-eod.dat'))] | ||||
| 
 | ||||
| time = [] | ||||
| frequency = [] | ||||
| amplitude = [] | ||||
| 
 | ||||
| for dataset in datasets: | ||||
|     t, f, a, e= parse_dataset(dataset) | ||||
|     embed() | ||||
|     time.append(t) | ||||
|     frequency.append(f) | ||||
|     amplitude.append(a) | ||||
| 
 | ||||
| 
 | ||||
| minimum = min(len(frequency[0]), len(frequency[1])) | ||||
| f1 = frequency[0][:minimum] | ||||
| f2 = frequency[1][:minimum] | ||||
| 
 | ||||
| frequency = f1 + f2 | ||||
| embed() | ||||
| #frequency = np.array(frequency) | ||||
| 
 | ||||
| mean = np.mean(frequency, axis=0) | ||||
| 
 | ||||
| for i in range(len(minimum)): | ||||
|     mean(frequency[0][i], frequency[1][i]) | ||||
| 
 | ||||
| for f in frequency: | ||||
|     print(np.mean(f)) | ||||
| 
 | ||||
| # mean_f = np.mean(x) for x in zip(freqeuncies1, frequencies2) | ||||
| 
 | ||||
| 
 | ||||
| embed() | ||||
| 
 | ||||
| #nächste Schritt: weitere Messungen einfügen und dann über alle Trials mitteln, evtl. normiert darstellen (frequency / baseline frequency?)? | ||||
| #Zeitkonstante: von sec. 0 bis 63%? relative JAR | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| @ -23,10 +52,11 @@ print(avg_frequency) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| ''' | ||||
| plt.plot(t, f) | ||||
| 
 | ||||
| plt.xlabel('time [s]') | ||||
| plt.ylabel('frequency [Hz]') | ||||
| plt.xlim([-10,200]) | ||||
| plt.title('second try because first try was sold out') | ||||
| plt.show() | ||||
| plt.show()''' | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user