still mistakes when normalizing
This commit is contained in:
parent
15e7fbc51f
commit
54a78ec296
@ -4,14 +4,21 @@ from read_chirp_data import *
|
|||||||
from utility import *
|
from utility import *
|
||||||
from IPython import embed
|
from IPython import embed
|
||||||
|
|
||||||
|
# define sampling rate and data path
|
||||||
sampling_rate = 40 #kHz
|
sampling_rate = 40 #kHz
|
||||||
data_dir = "../data"
|
data_dir = "../data"
|
||||||
dataset = "2018-11-09-ad-invivo-1"
|
dataset = "2018-11-09-ad-invivo-1"
|
||||||
|
# parameters for binning, smoothing and plotting
|
||||||
|
num_bin = 12
|
||||||
|
window = sampling_rate
|
||||||
|
time_axis = np.arange(-50, 50, 1/sampling_rate)
|
||||||
|
|
||||||
|
# read data from files
|
||||||
spikes = read_chirp_spikes(os.path.join(data_dir, dataset))
|
spikes = read_chirp_spikes(os.path.join(data_dir, dataset))
|
||||||
eod = read_chirp_eod(os.path.join(data_dir, dataset))
|
eod = read_chirp_eod(os.path.join(data_dir, dataset))
|
||||||
chirp_times = read_chirp_times(os.path.join(data_dir, dataset))
|
chirp_times = read_chirp_times(os.path.join(data_dir, dataset))
|
||||||
|
|
||||||
|
# make a delta f map for the quite more complicated keys
|
||||||
df_map = {}
|
df_map = {}
|
||||||
for k in spikes.keys():
|
for k in spikes.keys():
|
||||||
df = k[1]
|
df = k[1]
|
||||||
@ -20,56 +27,55 @@ for k in spikes.keys():
|
|||||||
else:
|
else:
|
||||||
df_map[df] = [k]
|
df_map[df] = [k]
|
||||||
|
|
||||||
# make phases together, 12 phases
|
# differentiate between phases
|
||||||
phase_vec = np.arange(0, 1+1/12, 1/12)
|
phase_vec = np.arange(0, 1+1/num_bin, 1/num_bin)
|
||||||
cut_range = np.arange(-50*sampling_rate, 50*sampling_rate, 1)
|
cut_range = np.arange(-50*sampling_rate, 50*sampling_rate, 1)
|
||||||
|
|
||||||
|
# make dictionaries for spiketimes
|
||||||
df_phase_time = {}
|
df_phase_time = {}
|
||||||
df_phase_binary = {}
|
df_phase_binary = {}
|
||||||
|
|
||||||
|
# iterate over delta f, repetition, phases and a single chirp
|
||||||
for deltaf in df_map.keys():
|
for deltaf in df_map.keys():
|
||||||
df_phase_time[deltaf] = {}
|
df_phase_time[deltaf] = {}
|
||||||
df_phase_binary[deltaf] = {}
|
df_phase_binary[deltaf] = {}
|
||||||
for rep in df_map[deltaf]:
|
for rep in df_map[deltaf]:
|
||||||
for phase in spikes[rep]:
|
for phase in spikes[rep]:
|
||||||
#print(phase)
|
for idx in np.arange(num_bin):
|
||||||
for idx in range(len(phase_vec)-1):
|
# check the phase
|
||||||
if phase[1] > phase_vec[idx] and phase[1] < phase_vec[idx+1]:
|
if phase[1] > phase_vec[idx] and phase[1] < phase_vec[idx+1]:
|
||||||
|
|
||||||
|
# get spikes between 50 ms befor and after the chirp
|
||||||
spikes_to_cut = np.asarray(spikes[rep][phase])
|
spikes_to_cut = np.asarray(spikes[rep][phase])
|
||||||
spikes_cut = spikes_to_cut[(spikes_to_cut > -50) & (spikes_to_cut < 50)]
|
spikes_cut = spikes_to_cut[(spikes_to_cut > -50) & (spikes_to_cut < 50)]
|
||||||
spikes_idx = np.round(spikes_cut*sampling_rate)
|
spikes_idx = np.round(spikes_cut*sampling_rate)
|
||||||
|
# also save as binary, 0 no spike, 1 spike
|
||||||
binary_spikes = np.isin(cut_range, spikes_idx)*1
|
binary_spikes = np.isin(cut_range, spikes_idx)*1
|
||||||
|
|
||||||
if phase_vec[idx] in df_phase_time[deltaf].keys():
|
# add the spikes to the dictionaries with the correct df and phase
|
||||||
df_phase_time[deltaf][phase_vec[idx]].append(spikes_cut)
|
if idx in df_phase_time[deltaf].keys():
|
||||||
df_phase_binary[deltaf][phase_vec[idx]] = np.vstack((df_phase_binary[deltaf][phase_vec[idx]], binary_spikes))
|
df_phase_time[deltaf][idx].append(spikes_cut)
|
||||||
|
df_phase_binary[deltaf][idx] = np.vstack((df_phase_binary[deltaf][idx], binary_spikes))
|
||||||
else:
|
else:
|
||||||
df_phase_time[deltaf][phase_vec[idx]] = [spikes_cut]
|
df_phase_time[deltaf][idx] = [spikes_cut]
|
||||||
df_phase_binary[deltaf][phase_vec[idx]] = binary_spikes
|
df_phase_binary[deltaf][idx] = binary_spikes
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
plot_trials = df_phase_time['-50Hz'][0.0]
|
|
||||||
plot_trials_binary = np.mean(df_phase_binary['-50Hz'][0.0], axis=0)
|
|
||||||
|
|
||||||
window = 100
|
|
||||||
smoothed_spikes = smooth(plot_trials_binary, window)
|
|
||||||
time_axis = np.arange(-50, 50, 1/sampling_rate)
|
|
||||||
|
|
||||||
fig, ax = plt.subplots()
|
# for plotting iterate over delta f and phases
|
||||||
for i, trial in enumerate(plot_trials):
|
for df in df_phase_time.keys():
|
||||||
ax.scatter(trial, np.ones(len(trial))+i, marker='|', color='k')
|
for phase in df_phase_time[df].keys():
|
||||||
ax.plot(time_axis, smoothed_spikes)
|
plot_trials = df_phase_time[df][phase]
|
||||||
plt.show()
|
plot_trials_binary = np.mean(df_phase_binary[df][phase], axis=0)
|
||||||
|
|
||||||
|
smoothed_spikes = smooth(plot_trials_binary, window)
|
||||||
|
|
||||||
#window = np.mean(np.diff(plot_spikes))
|
fig, ax = plt.subplots(2, 1)
|
||||||
#time_vec = np.arange(plot_spikes[0], plot_spikes[-1]+window, window)
|
for i, trial in enumerate(plot_trials):
|
||||||
|
ax[0].scatter(trial, np.ones(len(trial))+i, marker='|', color='k')
|
||||||
|
ax[1].plot(time_axis, smoothed_spikes)
|
||||||
|
|
||||||
#ax.plot(time_vec, smoothed_spikes)
|
ax[0].set_title(df)
|
||||||
|
ax[0].set_ylabel('repetition', fontsize=12)
|
||||||
|
|
||||||
#embed()
|
ax[1].set_xlabel('time [ms]', fontsize=12)
|
||||||
#exit()
|
ax[1].set_ylabel('firing rate [?]', fontsize=12)
|
||||||
#hist_data = plt.hist(plot_spikes, bins=np.arange(-200, 400, 20))
|
plt.show()
|
||||||
#ax.plot(hist_data[1][:-1], hist_data[0])
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
from IPython import embed
|
||||||
|
|
||||||
|
|
||||||
def zero_crossing(eod, time):
|
def zero_crossing(eod, time):
|
||||||
@ -29,7 +30,8 @@ def smooth(data, window):
|
|||||||
sigma = window
|
sigma = window
|
||||||
time_gauss = np.arange(-4 * sigma, 4 * sigma, 1)
|
time_gauss = np.arange(-4 * sigma, 4 * sigma, 1)
|
||||||
gauss = gaussian(time_gauss, mu, sigma)
|
gauss = gaussian(time_gauss, mu, sigma)
|
||||||
smoothed_data = np.convolve(data, gauss, 'same')
|
gauss_norm = gauss/(np.sum(gauss)/len(gauss))
|
||||||
|
smoothed_data = np.convolve(data, gauss_norm, 'same')
|
||||||
return smoothed_data
|
return smoothed_data
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user