commit all existing code
This commit is contained in:
283
introduction/introductionBaseline.py
Normal file
283
introduction/introductionBaseline.py
Normal file
@@ -0,0 +1,283 @@
|
||||
|
||||
import pyrelacs.DataLoader as dl
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from IPython import embed
|
||||
import os
|
||||
import helperFunctions as hf
|
||||
from thunderfish.eventdetection import detect_peaks
|
||||
|
||||
|
||||
SAVEPATH = ""
|
||||
|
||||
|
||||
def get_savepath():
|
||||
global SAVEPATH
|
||||
return SAVEPATH
|
||||
|
||||
|
||||
def set_savepath(new_path):
|
||||
global SAVEPATH
|
||||
SAVEPATH = new_path
|
||||
|
||||
|
||||
def main():
|
||||
for folder in hf.get_subfolder_paths("data/"):
|
||||
filepath = folder + "/basespikes1.dat"
|
||||
set_savepath("figures/" + folder.split('/')[1] + "/")
|
||||
|
||||
print("Folder:", folder)
|
||||
|
||||
if not os.path.exists(get_savepath()):
|
||||
os.makedirs(get_savepath())
|
||||
|
||||
spiketimes = []
|
||||
|
||||
ran = False
|
||||
for metadata, key, data in dl.iload(filepath):
|
||||
ran = True
|
||||
spikes = data[:, 0]
|
||||
spiketimes.append(spikes) # save for calculation of vector strength
|
||||
metadata = metadata[0]
|
||||
#print(metadata)
|
||||
# print('firing frequency1:', metadata['firing frequency1'])
|
||||
# print(mean_firing_rate(spikes))
|
||||
|
||||
# print('Coefficient of Variation (CV):', metadata['CV1'])
|
||||
# print(calculate_coefficient_of_variation(spikes))
|
||||
|
||||
if not ran:
|
||||
print("------------ DIDN'T RUN")
|
||||
|
||||
isi_histogram(spiketimes)
|
||||
|
||||
times, eods = hf.get_traces(folder, 2, 'BaselineActivity')
|
||||
times, v1s = hf.get_traces(folder, 1, 'BaselineActivity')
|
||||
|
||||
vs = calculate_vector_strength(times, eods, spiketimes, v1s)
|
||||
|
||||
# print("Calculated vector strength:", vs)
|
||||
|
||||
|
||||
def mean_firing_rate(spiketimes):
|
||||
# mean firing rate (number of spikes per time)
|
||||
return len(spiketimes)/spiketimes[-1]*1000
|
||||
|
||||
|
||||
def calculate_coefficient_of_variation(spiketimes):
|
||||
# CV (stddev of ISI divided by mean ISI (np.diff(spiketimes))
|
||||
isi = np.diff(spiketimes)
|
||||
std = np.std(isi)
|
||||
mean = np.mean(isi)
|
||||
|
||||
return std/mean
|
||||
|
||||
|
||||
def isi_histogram(spiketimes):
|
||||
# ISI histogram (play around with binsize! < 1ms)
|
||||
|
||||
isi = []
|
||||
for spike_list in spiketimes:
|
||||
isi.extend(np.diff(spike_list))
|
||||
maximum = max(isi)
|
||||
bins = np.arange(0, maximum*1.01, 0.1)
|
||||
|
||||
plt.title('Phase locking of ISI without stimulus')
|
||||
plt.xlabel('ISI in ms')
|
||||
plt.ylabel('Count')
|
||||
plt.hist(isi, bins=bins)
|
||||
plt.savefig(get_savepath() + 'phase_locking_without_stimulus.png')
|
||||
plt.close()
|
||||
|
||||
|
||||
def calculate_vector_strength(times, eods, spiketimes, v1s):
|
||||
# Vectorstaerke (use EOD frequency from header (metadata)) VS > 0.8
|
||||
# dl.iload_traces(repro='BaselineActivity')
|
||||
|
||||
relative_spike_times = []
|
||||
eod_durations = []
|
||||
|
||||
if len(times) == 0:
|
||||
print("-----LENGTH OF TIMES = 0")
|
||||
|
||||
for recording in range(len(times)):
|
||||
|
||||
rel_spikes, eod_durs = eods_around_spikes(times[recording], eods[recording], spiketimes[recording])
|
||||
relative_spike_times.extend(rel_spikes)
|
||||
eod_durations.extend(eod_durs)
|
||||
|
||||
vs = __vector_strength__(rel_spikes, eod_durs)
|
||||
phases = calculate_phases(rel_spikes, eod_durs)
|
||||
plot_polar(phases, "test_phase_locking_" + str(recording) + "_with_vs:" + str(round(vs, 3)) + ".png")
|
||||
|
||||
print("VS of recording", recording, ":", vs)
|
||||
|
||||
plot_phaselocking_testfigures(times[recording], eods[recording], spiketimes[recording], v1s[recording])
|
||||
|
||||
return __vector_strength__(relative_spike_times, eod_durations)
|
||||
|
||||
|
||||
def eods_around_spikes(time, eod, spiketimes):
|
||||
eod_durations = []
|
||||
relative_spike_times = []
|
||||
|
||||
for spike in spiketimes:
|
||||
index = spike * 20 # time in s given timestamp of spike in ms - recorded at 20kHz -> timestamp/1000*20000 = idx
|
||||
|
||||
if index != np.round(index):
|
||||
print("INDEX NOT AN INTEGER in eods_around_spikes! index:", index)
|
||||
continue
|
||||
index = int(index)
|
||||
|
||||
start_time, end_time = search_eod_start_and_end_times(time, eod, index)
|
||||
|
||||
eod_durations.append(end_time-start_time)
|
||||
relative_spike_times.append(spike/1000 - start_time)
|
||||
|
||||
return relative_spike_times, eod_durations
|
||||
|
||||
|
||||
def search_eod_start_and_end_times(time, eod, index):
|
||||
# TODO might break if a spike is in the cut off first or last eod!
|
||||
|
||||
# search start_time:
|
||||
previous = index
|
||||
working_idx = index-1
|
||||
while True:
|
||||
if eod[working_idx] < 0 < eod[previous]:
|
||||
first_value = eod[working_idx]
|
||||
second_value = eod[previous]
|
||||
|
||||
dif = second_value - first_value
|
||||
part = np.abs(first_value/dif)
|
||||
|
||||
time_dif = np.abs(time[previous] - time[working_idx])
|
||||
start_time = time[working_idx] + time_dif*part
|
||||
|
||||
break
|
||||
|
||||
previous = working_idx
|
||||
working_idx -= 1
|
||||
|
||||
# search end_time
|
||||
previous = index
|
||||
working_idx = index + 1
|
||||
while True:
|
||||
if eod[previous] < 0 < eod[working_idx]:
|
||||
first_value = eod[previous]
|
||||
second_value = eod[working_idx]
|
||||
|
||||
dif = second_value - first_value
|
||||
part = np.abs(first_value / dif)
|
||||
|
||||
time_dif = np.abs(time[previous] - time[working_idx])
|
||||
end_time = time[working_idx] + time_dif * part
|
||||
|
||||
break
|
||||
|
||||
previous = working_idx
|
||||
working_idx += 1
|
||||
|
||||
return start_time, end_time
|
||||
|
||||
|
||||
def search_closest_index(array, value, start=0, end=-1):
|
||||
# searches the array to find the closest value in the array to the given value and returns its index.
|
||||
# expects sorted array!
|
||||
# start hast to be smaller than end
|
||||
|
||||
if end == -1:
|
||||
end = len(array)-1
|
||||
|
||||
while True:
|
||||
if end-start <= 1:
|
||||
return end if np.abs(array[end]-value) < np.abs(array[start]-value) else start
|
||||
|
||||
middle = int(np.floor((end-start)/2)+start)
|
||||
if array[middle] == value:
|
||||
return middle
|
||||
elif array[middle] > value:
|
||||
end = middle
|
||||
continue
|
||||
else:
|
||||
start = middle
|
||||
continue
|
||||
|
||||
|
||||
def __vector_strength__(relative_spike_times, eod_durations):
|
||||
# adapted from Ramona
|
||||
|
||||
n = len(relative_spike_times)
|
||||
if n == 0:
|
||||
return 0
|
||||
|
||||
phase_times = np.zeros(n)
|
||||
|
||||
for i in range(n):
|
||||
phase_times[i] = (relative_spike_times[i] / eod_durations[i]) * 2 * np.pi
|
||||
vs = np.sqrt((1 / n * sum(np.cos(phase_times))) ** 2 + (1 / n * sum(np.sin(phase_times))) ** 2)
|
||||
|
||||
return vs
|
||||
|
||||
|
||||
def calculate_phases(relative_spike_times, eod_durations):
|
||||
phase_times = np.zeros(len(relative_spike_times))
|
||||
|
||||
for i in range(len(relative_spike_times)):
|
||||
phase_times[i] = (relative_spike_times[i] / eod_durations[i]) * 2 * np.pi
|
||||
|
||||
return phase_times
|
||||
|
||||
|
||||
def plot_polar(phases, name=""):
|
||||
fig = plt.figure()
|
||||
ax = fig.add_subplot(111, polar=True)
|
||||
# r = np.arange(0, 1, 0.001)
|
||||
# theta = 2 * 2 * np.pi * r
|
||||
# line, = ax.plot(theta, r, color='#ee8d18', lw=3)
|
||||
bins = np.arange(0, np.pi*2, 0.05)
|
||||
ax.hist(phases, bins=bins)
|
||||
if name == "":
|
||||
plt.show()
|
||||
else:
|
||||
plt.savefig(get_savepath() + name)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_phaselocking_testfigures(time, eod, spiketimes, v1):
|
||||
eod_start_times = []
|
||||
eod_end_times = []
|
||||
|
||||
for spike in spiketimes:
|
||||
index = spike * 20 # time in s given timestamp of spike in ms - recorded at 20kHz -> timestamp/1000*20000 = idx
|
||||
|
||||
if index != np.round(index):
|
||||
print("INDEX NOT AN INTEGER in eods_around_spikes! index:", index)
|
||||
continue
|
||||
index = int(index)
|
||||
|
||||
start_time, end_time = search_eod_start_and_end_times(time, eod, index)
|
||||
|
||||
eod_start_times.append(start_time)
|
||||
eod_end_times.append(end_time)
|
||||
|
||||
cutoff_in_sec = 2
|
||||
sampling = 20000
|
||||
max_idx = cutoff_in_sec*sampling
|
||||
spikes_part = [x/1000 for x in spiketimes if x/1000 < cutoff_in_sec]
|
||||
count_spikes = len(spikes_part)
|
||||
print(spiketimes)
|
||||
print(len(spikes_part))
|
||||
|
||||
x_axis = time[0:max_idx]
|
||||
plt.plot(spikes_part, np.ones(len(spikes_part))*-20, 'o')
|
||||
plt.plot(x_axis, v1[0:max_idx])
|
||||
plt.plot(eod_start_times[: count_spikes], np.zeros(count_spikes), 'o')
|
||||
plt.plot(eod_end_times[: count_spikes], np.zeros(count_spikes), 'o')
|
||||
|
||||
plt.show()
|
||||
plt.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
298
introduction/introductionFICurve.py
Normal file
298
introduction/introductionFICurve.py
Normal file
@@ -0,0 +1,298 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import pyrelacs.DataLoader as dl
|
||||
import os
|
||||
import helperFunctions as hf
|
||||
from IPython import embed
|
||||
from scipy.optimize import curve_fit
|
||||
import warnings
|
||||
|
||||
SAMPLING_INTERVAL = 1/20000
|
||||
STIMULUS_START = 0
|
||||
STIMULUS_DURATION = 0.400
|
||||
PRE_DURATION = 0.250
|
||||
TOTAL_DURATION = 1.25
|
||||
|
||||
|
||||
def main():
|
||||
for folder in hf.get_subfolder_paths("data/"):
|
||||
filepath = folder + "/fispikes1.dat"
|
||||
set_savepath("figures/" + folder.split('/')[1] + "/")
|
||||
print("Folder:", folder)
|
||||
|
||||
if not os.path.exists(get_savepath()):
|
||||
os.makedirs(get_savepath())
|
||||
|
||||
spiketimes = []
|
||||
intensities = []
|
||||
index = -1
|
||||
for metadata, key, data in dl.iload(filepath):
|
||||
# embed()
|
||||
if len(metadata) != 0:
|
||||
|
||||
metadata_index = 0
|
||||
if '----- Control --------------------------------------------------------' in metadata[0].keys():
|
||||
metadata_index = 1
|
||||
|
||||
print(metadata)
|
||||
i = float(metadata[metadata_index]['intensity'][:-2])
|
||||
intensities.append(i)
|
||||
spiketimes.append([])
|
||||
index += 1
|
||||
|
||||
spiketimes[index].append(data[:, 0]/1000)
|
||||
|
||||
intensities, spiketimes = hf.merge_similar_intensities(intensities, spiketimes)
|
||||
|
||||
# Sort the lists so that intensities are increasing
|
||||
x = [list(x) for x in zip(*sorted(zip(intensities, spiketimes), key=lambda pair: pair[0]))]
|
||||
intensities = x[0]
|
||||
spiketimes = x[1]
|
||||
|
||||
mean_frequencies = calculate_mean_frequencies(intensities, spiketimes)
|
||||
popt, pcov = fit_exponential(intensities, mean_frequencies)
|
||||
plot_frequency_curve(intensities, mean_frequencies)
|
||||
|
||||
f_baseline = calculate_f_baseline(mean_frequencies)
|
||||
f_infinity = calculate_f_infinity(mean_frequencies)
|
||||
f_zero = calculate_f_zero(mean_frequencies)
|
||||
|
||||
# plot_fi_curve(intensities, f_baseline, f_zero, f_infinity)
|
||||
|
||||
|
||||
# TODO !!
|
||||
def fit_exponential(intensities, mean_frequencies):
|
||||
start_idx = int((PRE_DURATION + STIMULUS_START+0.005) / SAMPLING_INTERVAL)
|
||||
end_idx = int((PRE_DURATION + STIMULUS_START + 0.1) / SAMPLING_INTERVAL)
|
||||
time_constants = []
|
||||
#print(start_idx, end_idx)
|
||||
|
||||
popts = []
|
||||
pcovs = []
|
||||
for i in range(len(mean_frequencies)):
|
||||
freq = mean_frequencies[i]
|
||||
y_values = freq[start_idx:end_idx+1]
|
||||
x_values = np.arange(start_idx*SAMPLING_INTERVAL, end_idx*SAMPLING_INTERVAL, SAMPLING_INTERVAL)
|
||||
try:
|
||||
popt, pcov = curve_fit(exponential_function, x_values, y_values, p0=(1/(np.power(1, 10)), .5, 50, 180), maxfev=10000)
|
||||
except RuntimeError:
|
||||
print("RuntimeError happened in fit_exponential.")
|
||||
continue
|
||||
#print(popt)
|
||||
#print(pcov)
|
||||
#print()
|
||||
|
||||
popts.append(popt)
|
||||
pcovs.append(pcov)
|
||||
|
||||
plt.plot(np.arange(-PRE_DURATION, TOTAL_DURATION, SAMPLING_INTERVAL), freq)
|
||||
plt.plot(x_values-PRE_DURATION, [exponential_function(x, popt[0], popt[1], popt[2], popt[3]) for x in x_values])
|
||||
# plt.show()
|
||||
save_path = get_savepath() + "exponential_fits/"
|
||||
if not os.path.exists(save_path):
|
||||
os.makedirs(save_path)
|
||||
plt.savefig(save_path + "fit_intensity:" + str(round(intensities[i], 4)) + ".png")
|
||||
plt.close()
|
||||
|
||||
return popts, pcovs
|
||||
|
||||
|
||||
def calculate_mean_frequency(freqs):
|
||||
mean_freq = [sum(e) / len(e) for e in zip(*freqs)]
|
||||
|
||||
return mean_freq
|
||||
|
||||
|
||||
def gaussian_kernel(sigma, dt):
|
||||
x = np.arange(-4. * sigma, 4. * sigma, dt)
|
||||
y = np.exp(-0.5 * (x / sigma) ** 2) / np.sqrt(2. * np.pi) / sigma
|
||||
return y
|
||||
|
||||
|
||||
def calculate_kernel_frequency(spiketimes, time, sampling_interval):
|
||||
sp = spiketimes
|
||||
t = time # Probably goes from -200ms to some amount of ms in the positive ~1200?
|
||||
dt = sampling_interval
|
||||
kernel_width = 0.01 # kernel width is a time in seconds how sharp the frequency should be counted
|
||||
|
||||
binary = np.zeros(t.shape)
|
||||
spike_indices = ((sp - t[0]) / dt).astype(int)
|
||||
binary[spike_indices[(spike_indices >= 0) & (spike_indices < len(binary))]] = 1
|
||||
g = gaussian_kernel(kernel_width, dt)
|
||||
|
||||
rate = np.convolve(binary, g, mode='same')
|
||||
|
||||
return rate
|
||||
|
||||
|
||||
def calculate_isi_frequency(spiketimes, time):
|
||||
first_isi = spiketimes[0] - (-PRE_DURATION) # diff to the start at 0
|
||||
last_isi = TOTAL_DURATION - spiketimes[-1] # diff from the last spike to the end of time :D
|
||||
isis = [first_isi]
|
||||
isis.extend(np.diff(spiketimes))
|
||||
isis.append(last_isi)
|
||||
|
||||
if np.isnan(first_isi):
|
||||
print(spiketimes[:10])
|
||||
print(isis[0:10])
|
||||
quit()
|
||||
|
||||
rate = []
|
||||
for isi in isis:
|
||||
if isi == 0:
|
||||
print("probably a problem")
|
||||
isi = 0.0000000001
|
||||
freq = 1/isi
|
||||
frequency_step = int(round(isi*(1/SAMPLING_INTERVAL)))*[freq]
|
||||
rate.extend(frequency_step)
|
||||
|
||||
|
||||
#plt.plot((np.arange(len(rate))-PRE_DURATION)/(1/SAMPLING_INTERVAL), rate)
|
||||
#plt.plot([sum(isis[:i+1]) for i in range(len(isis))], [200 for i in isis], 'o')
|
||||
#plt.plot(time, [100 for t in time])
|
||||
#plt.show()
|
||||
|
||||
if len(rate) != len(time):
|
||||
if "12-13-af" in get_savepath():
|
||||
warnings.warn("preStimulus duration > 0 still not supported")
|
||||
return [1]*len(time)
|
||||
else:
|
||||
print(len(rate), len(time), len(rate) - len(time))
|
||||
print(rate)
|
||||
print(isis)
|
||||
print("Quitting because time and rate aren't the same length")
|
||||
quit()
|
||||
|
||||
return rate
|
||||
|
||||
|
||||
def calculate_mean_frequencies(intensities, spiketimes):
|
||||
time = np.arange(-PRE_DURATION, TOTAL_DURATION, SAMPLING_INTERVAL)
|
||||
|
||||
mean_frequencies = []
|
||||
for i in range(len(intensities)):
|
||||
freqs = []
|
||||
for spikes in spiketimes[i]:
|
||||
if len(spikes) < 2:
|
||||
continue
|
||||
freq = calculate_isi_frequency(spikes, time)
|
||||
freqs.append(freq)
|
||||
|
||||
mf = calculate_mean_frequency(freqs)
|
||||
mean_frequencies.append(mf)
|
||||
|
||||
return mean_frequencies
|
||||
|
||||
|
||||
def calculate_f_baseline(mean_frequencies):
|
||||
buffer_time = 0.05
|
||||
start_idx = int(0.05/SAMPLING_INTERVAL)
|
||||
end_idx = int((PRE_DURATION - STIMULUS_START - buffer_time)/SAMPLING_INTERVAL)
|
||||
|
||||
f_zeros = []
|
||||
for freq in mean_frequencies:
|
||||
f_0 = np.mean(freq[start_idx:end_idx])
|
||||
f_zeros.append(f_0)
|
||||
|
||||
return f_zeros
|
||||
|
||||
|
||||
def calculate_f_infinity(mean_frequencies):
|
||||
buffer_time = 0.05
|
||||
start_idx = int((PRE_DURATION + STIMULUS_START + STIMULUS_DURATION - 0.15 - buffer_time) / SAMPLING_INTERVAL)
|
||||
end_idx = int((PRE_DURATION + STIMULUS_START + STIMULUS_DURATION - buffer_time) / SAMPLING_INTERVAL)
|
||||
|
||||
f_infinity = []
|
||||
for freq in mean_frequencies:
|
||||
f_inf = np.mean(freq[start_idx:end_idx])
|
||||
f_infinity.append(f_inf)
|
||||
|
||||
return f_infinity
|
||||
|
||||
|
||||
def calculate_f_zero(mean_frequencies):
|
||||
buffer_time = 0.1
|
||||
start_idx = int((PRE_DURATION + STIMULUS_START - buffer_time) / SAMPLING_INTERVAL)
|
||||
end_idx = int((PRE_DURATION + STIMULUS_START + buffer_time) / SAMPLING_INTERVAL)
|
||||
f_peaks = []
|
||||
for freq in mean_frequencies:
|
||||
fp = np.mean(freq[start_idx-500:start_idx])
|
||||
for i in range(start_idx+1, end_idx):
|
||||
if abs(freq[i] - freq[start_idx]) > abs(fp - freq[start_idx]):
|
||||
fp = freq[i]
|
||||
f_peaks.append(fp)
|
||||
return f_peaks
|
||||
|
||||
|
||||
def plot_fi_curve(intensities, f_baseline, f_zero, f_infinity):
|
||||
plt.plot(intensities, f_baseline, label="f_baseline")
|
||||
plt.plot(intensities, f_zero, 'o', label="f_zero")
|
||||
plt.plot(intensities, f_infinity, label="f_infinity")
|
||||
|
||||
max_f0 = float(max(f_zero))
|
||||
mean_int = float(np.mean(intensities))
|
||||
start_k = float(((f_zero[-1] - f_zero[0]) / (intensities[-1] - intensities[0])*4)/f_zero[-1])
|
||||
|
||||
popt, pcov = curve_fit(fill_boltzmann, intensities, f_zero, p0=(max_f0, start_k, mean_int), maxfev=10000)
|
||||
print(popt)
|
||||
min_x = min(intensities)
|
||||
max_x = max(intensities)
|
||||
step = (max_x - min_x) / 5000
|
||||
x_values_boltzmann_fit = np.arange(min_x, max_x, step)
|
||||
plt.plot(x_values_boltzmann_fit, [fill_boltzmann(i, popt[0], popt[1], popt[2]) for i in x_values_boltzmann_fit], label='fit')
|
||||
|
||||
plt.title("FI-Curve")
|
||||
plt.ylabel("Frequency in Hz")
|
||||
plt.xlabel("Intensity in mV")
|
||||
plt.legend()
|
||||
# plt.show()
|
||||
plt.savefig(get_savepath() + "fi_curve.png")
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_frequency_curve(intensities, mean_frequencies):
|
||||
colors = ["red", "green", "blue", "violet", "orange", "grey"]
|
||||
|
||||
time = np.arange(-PRE_DURATION, TOTAL_DURATION, SAMPLING_INTERVAL)
|
||||
|
||||
for i in range(len(intensities)):
|
||||
plt.plot(time, mean_frequencies[i], color=colors[i % 6], label=str(intensities[i]))
|
||||
|
||||
plt.plot((0, 0), (0, 500), color="black")
|
||||
plt.plot((0.4, 0.4), (0, 500), color="black")
|
||||
plt.legend()
|
||||
plt.xlabel("Time in seconds")
|
||||
plt.ylabel("Frequency in Hz")
|
||||
plt.title("Frequency curve")
|
||||
|
||||
plt.savefig(get_savepath() + "mean_frequency_curves.png")
|
||||
plt.close()
|
||||
|
||||
|
||||
def exponential_function(x, a, b, c, d):
|
||||
return a*np.exp(-c*(x-b))+d
|
||||
|
||||
|
||||
def upper_boltzmann(x, f_max, k, x_zero):
|
||||
return f_max * np.clip((2 / (1+np.power(np.e, -k*(x - x_zero)))) - 1, 0, None)
|
||||
|
||||
|
||||
def fill_boltzmann(x, f_max, k, x_zero):
|
||||
return f_max * (1 / (1 + np.power(np.e, -k * (x - x_zero))))
|
||||
|
||||
|
||||
SAVEPATH = ""
|
||||
|
||||
|
||||
def get_savepath():
|
||||
global SAVEPATH
|
||||
return SAVEPATH
|
||||
|
||||
|
||||
def set_savepath(new_path):
|
||||
global SAVEPATH
|
||||
SAVEPATH = new_path
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
20
introduction/janExample.py
Normal file
20
introduction/janExample.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import pyrelacs.DataLoader as dl
|
||||
|
||||
for metadata, key, data in dl.iload('2012-06-27-ah-invivo-1/basespikes1.dat'):
|
||||
print(data.shape)
|
||||
break
|
||||
|
||||
# mean firing rate (number of spikes per time)
|
||||
# CV (stdev of ISI divided by mean ISI (np.diff(spiketimes))
|
||||
# ISI histogram (play around with binsize! < 1ms)
|
||||
# Vectorstaerke (use EOD frequency from header (metadata)) VS > 0.8
|
||||
# dl.iload_traces(repro='BaselineActivity')
|
||||
|
||||
def test():
|
||||
for metadata, key, data in dl.iload('data/2012-06-27-ah-invivo-1/basespikes1.dat'):
|
||||
print(data.shape)
|
||||
for i in metadata:
|
||||
for key in i.keys():
|
||||
print(key, ":", i[key])
|
||||
|
||||
break
|
||||
Reference in New Issue
Block a user