This commit is contained in:
Ramona 2018-11-28 17:45:45 +01:00
parent 5cd62554fa
commit 85b359cdb6
3 changed files with 156 additions and 150 deletions

View File

@ -9,7 +9,7 @@ from IPython import embed
inch_factor = 2.54 inch_factor = 2.54
data_dir = '../data' data_dir = '../data'
#dataset = '2018-11-09-ad-invivo-1' #dataset = '2018-11-09-ad-invivo-1'
dataset = '2018-11-14-al-invivo-1' dataset = '2018-11-13-aa-invivo-1'
# read eod and time of baseline # read eod and time of baseline
time, eod = read_baseline_eod(os.path.join(data_dir, dataset)) time, eod = read_baseline_eod(os.path.join(data_dir, dataset))
@ -29,7 +29,6 @@ ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False) ax.spines["right"].set_visible(False)
fig.tight_layout() fig.tight_layout()
plt.show() plt.show()
plt.show()
#plt.savefig('isis.pdf') #plt.savefig('isis.pdf')
exit() exit()
plt.savefig('isis.png') plt.savefig('isis.png')

View File

@ -13,7 +13,7 @@ cut_range = np.arange(-cut_window * sampling_rate, 0, 1)
window = 1 window = 1
# norm: -150, 150, 300 aa, #ac, aj?? # norm: -150, 150, 300 aa, #ac, aj??
data = ["2018-11-13-al-invivo-1"]#, "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1", "2018-11-13-ai-invivo-1", data = ["2018-11-13-aa-invivo-1"]#, "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1", "2018-11-13-ai-invivo-1",
#"2018-11-13-ak-invivo-1", "2018-11-13-al-invivo-1"] #"2018-11-13-ak-invivo-1", "2018-11-13-al-invivo-1"]
''' '''
@ -67,10 +67,10 @@ for dataset in data:
binary_spikes = np.isin(cut_range, spikes_idx) * 1 binary_spikes = np.isin(cut_range, spikes_idx) * 1
smoothed_data = smooth(binary_spikes, window, 1 / sampling_rate) smoothed_data = smooth(binary_spikes, window, 1 / sampling_rate)
train = smoothed_data[window:beat_window+window] train = smoothed_data[window:beat_window+window]
norm_train = train*1000#/spikerate norm_train = train*1000/spikerate
rep_rates.append(np.std(norm_train))#/spikerate) rep_rates.append(np.std(norm_train))#/spikerate)
break break
df_rate = np.median(rep_rates)/spikerate df_rate = np.mean(rep_rates)
#embed() #embed()
#exit() #exit()
if df in rates.keys(): if df in rates.keys():

View File

@ -8,7 +8,8 @@ from IPython import embed
# define sampling rate and data path # define sampling rate and data path
sampling_rate = 40 #kHz sampling_rate = 40 #kHz
data_dir = "../data" data_dir = "../data"
#dataset = "2018-11-13-al-invivo-1" dataset = "2018-11-13-ah-invivo-1"
''' '''
data = ["2018-11-09-ad-invivo-1", "2018-11-09-ae-invivo-1", "2018-11-09-ag-invivo-1", "2018-11-13-aa-invivo-1", data = ["2018-11-09-ad-invivo-1", "2018-11-09-ae-invivo-1", "2018-11-09-ag-invivo-1", "2018-11-13-aa-invivo-1",
"2018-11-13-ac-invivo-1", "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1", "2018-11-13-ai-invivo-1", "2018-11-13-ac-invivo-1", "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1", "2018-11-13-ai-invivo-1",
@ -18,9 +19,10 @@ data = ["2018-11-09-ad-invivo-1", "2018-11-09-ae-invivo-1", "2018-11-09-ag-inviv
"2018-11-20-aa-invivo-1", "2018-11-20-ab-invivo-1", "2018-11-20-ac-invivo-1", "2018-11-20-ad-invivo-1", "2018-11-20-aa-invivo-1", "2018-11-20-ab-invivo-1", "2018-11-20-ac-invivo-1", "2018-11-20-ad-invivo-1",
"2018-11-20-ae-invivo-1", "2018-11-20-af-invivo-1", "2018-11-20-ag-invivo-1", "2018-11-20-ah-invivo-1", "2018-11-20-ae-invivo-1", "2018-11-20-af-invivo-1", "2018-11-20-ag-invivo-1", "2018-11-20-ah-invivo-1",
"2018-11-20-ai-invivo-1"] "2018-11-20-ai-invivo-1"]
'''
data = ["2018-11-13-aa-invivo-1", "2018-11-13-ac-invivo-1", "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1", data = ["2018-11-13-aa-invivo-1", "2018-11-13-ac-invivo-1", "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1",
"2018-11-13-ai-invivo-1", "2018-11-13-aj-invivo-1", "2018-11-13-ak-invivo-1", "2018-11-13-al-invivo-1"] "2018-11-13-ai-invivo-1", "2018-11-13-aj-invivo-1", "2018-11-13-ak-invivo-1", "2018-11-13-al-invivo-1"]
'''
# parameters for binning, smoothing and plotting # parameters for binning, smoothing and plotting
cut_window = 20 cut_window = 20
@ -53,12 +55,12 @@ df_phase_binary = {}
#embed() #embed()
#exit() #exit()
for dataset in data: #for dataset in data:
spikes = read_chirp_spikes(os.path.join(data_dir, dataset)) spikes = read_chirp_spikes(os.path.join(data_dir, dataset))
df_map = map_keys(spikes) df_map = map_keys(spikes)
print(dataset) print(dataset)
# iterate over delta f, repetition, phases and a single chirp # iterate over delta f, repetition, phases and a single chirp
for deltaf in df_map.keys(): for deltaf in df_map.keys():
df_phase_time[deltaf] = {} df_phase_time[deltaf] = {}
df_phase_binary[deltaf] = {} df_phase_binary[deltaf] = {}
for rep in df_map[deltaf]: for rep in df_map[deltaf]:
@ -87,12 +89,12 @@ for dataset in data:
df_phase_binary[deltaf][idx] = binary_spikes df_phase_binary[deltaf][idx] = binary_spikes
# make dictionaries for csi and beat # make dictionaries for csi and beat
csi_trains = {} csi_trains = {}
csi_rates = {} csi_rates = {}
beat = {} beat = {}
# for plotting and calculating iterate over delta f and phases # for plotting and calculating iterate over delta f and phases
for df in df_phase_time.keys(): for df in df_phase_time.keys():
csi_trains[df] = [] csi_trains[df] = []
csi_rates[df] = [] csi_rates[df] = []
beat[df] = [] beat[df] = []
@ -168,32 +170,37 @@ for dataset in data:
plt.show() plt.show()
''' '''
''' upper_limit = np.max(sorted(csi_rates.keys()))+30
fig, ax = plt.subplots() lower_limit = np.min(sorted(csi_rates.keys()))-30
for i, k in enumerate(sorted(csi_rates.keys())):
ax.scatter(np.ones(len(csi_rates[k]))*i, csi_rates[k], s=20) fig, ax = plt.subplots()
for i, k in enumerate(sorted(csi_rates.keys())):
ax.scatter(np.ones(len(csi_rates[k]))*k, csi_rates[k], s=20)
#ax.plot(i, np.mean(csi_rates[k]), 'o', markersize=15) #ax.plot(i, np.mean(csi_rates[k]), 'o', markersize=15)
ax.legend(sorted(csi_rates.keys()), loc='upper left', bbox_to_anchor=(1.04, 1)) #ax.legend(sorted(csi_rates.keys()), loc='upper left', bbox_to_anchor=(1.04, 1))
ax.plot(np.arange(-1, len(csi_rates.keys())+1), np.zeros(len(csi_rates.keys())+2), 'silver', linewidth=2, linestyle='--') ax.plot([lower_limit, upper_limit], np.zeros(2), 'silver', linewidth=2, linestyle='--')
#ax.set_xticklabels(sorted(csi_rates.keys())) #ax.set_xticklabels(sorted(csi_rates.keys()))
fig.tight_layout() fig.tight_layout()
plt.show() plt.show()
fig, ax = plt.subplots() '''
for i, k in enumerate(sorted(csi_trains.keys())): fig, ax = plt.subplots()
for i, k in enumerate(sorted(csi_trains.keys())):
ax.plot(np.ones(len(csi_trains[k]))*i, csi_trains[k], 'o') ax.plot(np.ones(len(csi_trains[k]))*i, csi_trains[k], 'o')
#ax.plot(i, np.mean(csi_trains[k]), 'o', markersize=15) #ax.plot(i, np.mean(csi_trains[k]), 'o', markersize=15)
ax.legend(sorted(csi_trains.keys()), loc='upper left', bbox_to_anchor=(1.04, 1)) ax.legend(sorted(csi_trains.keys()), loc='upper left', bbox_to_anchor=(1.04, 1))
ax.plot(np.arange(-1, len(csi_trains.keys())+1), np.zeros(len(csi_trains.keys())+2), 'silver', linewidth=2, linestyle='--') ax.plot(np.arange(-1, len(csi_trains.keys())+1), np.zeros(len(csi_trains.keys())+2), 'silver', linewidth=2, linestyle='--')
#ax.set_xticklabels(sorted(csi_trains.keys())) #ax.set_xticklabels(sorted(csi_trains.keys()))
fig.tight_layout() fig.tight_layout()
plt.show() plt.show()
''' '''
fig, ax = plt.subplots() '''
for i, k in enumerate(sorted(beat.keys())): fig, ax = plt.subplots()
for i, k in enumerate(sorted(beat.keys())):
ax.plot(np.ones(len(beat[k]))*i, beat[k], 'o') ax.plot(np.ones(len(beat[k]))*i, beat[k], 'o')
ax.legend(sorted(beat.keys()), loc='upper left', bbox_to_anchor=(1.04, 1)) ax.legend(sorted(beat.keys()), loc='upper left', bbox_to_anchor=(1.04, 1))
#ax.set_xticklabels(sorted(csi_trains.keys())) #ax.set_xticklabels(sorted(csi_trains.keys()))
fig.tight_layout() fig.tight_layout()
plt.show() plt.show()
'''