This commit is contained in:
Ramona 2018-11-28 16:32:15 +01:00
parent 99274191f8
commit 7bb027ba22
3 changed files with 35 additions and 13 deletions

View File

@ -5,7 +5,13 @@ import numpy as np
data_dir = '../data'
#dataset = '2018-11-09-ad-invivo-1'
data = ["2018-11-09-ad-invivo-1", "2018-11-13-aa-invivo-1", "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1", "2018-11-13-ai-invivo-1", "2018-11-13-ak-invivo-1", "2018-11-13-al-invivo-1", "2018-11-14-ac-invivo-1", "2018-11-14-af-invivo-1", "2018-11-14-ag-invivo-1", "2018-11-14-ak-invivo-1", "2018-11-14-al-invivo-1", "2018-11-14-am-invivo-1", "2018-11-14-an-invivo-1", "2018-11-20-ab-invivo-1", "2018-11-20-ac-invivo-1", "2018-11-20-ad-invivo-1", "2018-11-20-af-invivo-1", "2018-11-20-ag-invivo-1", "2018-11-20-ah-invivo-1", "2018-11-20-ai-invivo-1"]
data = ["2018-11-09-ad-invivo-1",
"2018-11-13-aa-invivo-1", "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1", "2018-11-13-ai-invivo-1",
"2018-11-13-ak-invivo-1", "2018-11-13-al-invivo-1",
"2018-11-14-ac-invivo-1", "2018-11-14-af-invivo-1", "2018-11-14-ag-invivo-1", "2018-11-14-ak-invivo-1",
"2018-11-14-al-invivo-1", "2018-11-14-am-invivo-1", "2018-11-14-an-invivo-1",
"2018-11-20-ab-invivo-1", "2018-11-20-ac-invivo-1", "2018-11-20-ad-invivo-1", "2018-11-20-af-invivo-1",
"2018-11-20-ag-invivo-1", "2018-11-20-ah-invivo-1", "2018-11-20-ai-invivo-1"]
for dataset in data:
# read eod and time of baseline

View File

@ -8,7 +8,8 @@ from IPython import embed
# plot and data values
inch_factor = 2.54
data_dir = '../data'
dataset = '2018-11-09-ad-invivo-1'
#dataset = '2018-11-09-ad-invivo-1'
dataset = '2018-11-14-al-invivo-1'
# read eod and time of baseline
time, eod = read_baseline_eod(os.path.join(data_dir, dataset))
@ -29,8 +30,9 @@ plt.yticks(fontsize = 18)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
fig.tight_layout()
plt.show()
#plt.show()
plt.savefig('isis.pdf')
#plt.savefig('isis.pdf')
exit()
# calculate coefficient of variation

View File

@ -1,6 +1,7 @@
import matplotlib.pyplot as plt
import numpy as np
from read_chirp_data import *
from read_baseline_data import *
from utility import *
from IPython import embed
@ -11,27 +12,37 @@ cut_window = 40
cut_range = np.arange(-cut_window * sampling_rate, 0, 1)
window = 1
'''
# norm: -150, 150, 300
data = ["2018-11-13-aa-invivo-1", "2018-11-13-ac-invivo-1","2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1",
"2018-11-13-ai-invivo-1", "2018-11-13-aj-invivo-1", "2018-11-13-ak-invivo-1", "2018-11-13-al-invivo-1"]
# norm: -150, 150, 300 aa, #ac, aj??
data = ["2018-11-13-al-invivo-1"]#, "2018-11-13-ad-invivo-1", "2018-11-13-ah-invivo-1", "2018-11-13-ai-invivo-1",
#"2018-11-13-ak-invivo-1", "2018-11-13-al-invivo-1"]
'''
# norm: -50
data = ["2018-11-20-aa-invivo-1", "2018-11-20-ab-invivo-1", "2018-11-20-ac-invivo-1","2018-11-20-ad-invivo-1",
"2018-11-20-ae-invivo-1", "2018-11-20-af-invivo-1", "2018-11-20-ag-invivo-1", "2018-11-20-ah-invivo-1",
"2018-11-20-ai-invivo-1"]
'''
data = ["2018-11-14-aa-invivo-1", "2018-11-14-ac-invivo-1", "2018-11-14-ad-invivo-1", "2018-11-14-af-invivo-1",
"2018-11-14-ag-invivo-1", "2018-11-14-ah-invivo-1", "2018-11-14-ai-invivo-1", "2018-11-14-ak-invivo-1",
"2018-11-14-al-invivo-1", "2018-11-14-am-invivo-1", "2018-11-14-an-invivo-1"]
'''
#data = ["2018-11-09-ad-invivo-1", "2018-11-14-af-invivo-1"]
rates = {}
for dataset in data:
print(dataset)
# read baseline spikes
base_spikes = read_baseline_spikes(os.path.join(data_dir, dataset))
base_spikes = base_spikes[1000:2000]
spikerate = len(base_spikes)/base_spikes[-1]
print(spikerate)
# read spikes during chirp stimulation
spikes = read_chirp_spikes(os.path.join(data_dir, dataset))
df_map = map_keys(spikes)
print(dataset)
# iterate over df
for df in df_map.keys():
'''
if df == 50:
@ -39,8 +50,8 @@ for dataset in data:
else:
continue
'''
print(df)
#print(df)
rep_rates = []
beat_duration = int(abs(1 / df) * 1000)
beat_window = 0
@ -56,9 +67,12 @@ for dataset in data:
binary_spikes = np.isin(cut_range, spikes_idx) * 1
smoothed_data = smooth(binary_spikes, window, 1 / sampling_rate)
train = smoothed_data[window:beat_window+window]
rep_rates.append(np.std(train))
norm_train = train*1000#/spikerate
rep_rates.append(np.std(norm_train))#/spikerate)
break
df_rate = np.mean(rep_rates)
df_rate = np.median(rep_rates)/spikerate
#embed()
#exit()
if df in rates.keys():
rates[df].append(df_rate)
else: