movingcomb/icr_analysis.py
2018-12-07 14:11:03 +01:00

110 lines
3.8 KiB
Python

import numpy as np
from IPython import embed
from scipy.signal import convolve
import matplotlib.mlab as mlab
def avg_nested_lists(nested_vals):
"""
Averages a 2-D array and returns a 1-D array of all of the columns
averaged together, regardless of their dimensions.
"""
output = []
maximum = 0
for lst in nested_vals:
if len(lst) > maximum:
maximum = len(lst)
for index in range(maximum): # Go through each index of longest list
temp = []
for lst in nested_vals: # Go through each list
if index < len(lst): # If not an index error
temp.append(lst[index])
output.append(np.nanmean(temp))
return output
def fourier_psd(avg_convolve_spikes, sampling_rate):
p, freq = mlab.psd(avg_convolve_spikes, NFFT=sampling_rate * 3, noverlap=sampling_rate * 1.5,
Fs=sampling_rate,
detrend=mlab.detrend_mean)
std_four = np.std(freq[5:])
mn_four = np.mean(freq)
return p, freq, std_four, mn_four
def kernel_estimation_mise(all_spike_trains, sampling_rate):
for spike_train in all_spike_trains:
spike_train = spike_train[1]
spike_train = spike_train - spike_train[0] # changing spike train to start at 0 (subtracting baseline)
# Boolean list in length of trial length, where 1 means spike happened, 0 means no spike
trial_length = int((spike_train[-1] - spike_train[0]) * sampling_rate)
trial_bool = np.zeros(trial_length + 1)
spike_indx = (spike_train * sampling_rate).astype(np.int)
trial_bool[spike_indx] = 1
bin_sizes = np.arange(2, len(trial_bool)/2, dtype=int)
#
cost_averages = []
bin_sizes = np.arange(1, (len(trial_bool)/2), dtype=int)
for bin in bin_sizes:
cost_per_bin = []
start_win = 0
stop_win = int(bin)
bin_slides = np.arange(bin)
for slid in bin_slides:
embed()
quit()
# #spike_count = np.sum(trial_bool[start_win:stop_win])
# #spike_var = np.var(trial_bool[start_win:stop_win])
#
# start_win = start_win + bin
# stop_win = stop_win + bin
# cost = (2*mean_bin - var_bin)/(bin**2)
# cost_per_bin.append(cost)
# cost_averages.append(np.mean(cost_per_bin))
#sigma = best_bin/sampling_rate/2
def gaussian_convolve(all_spike_trains, fxn, sampling_rate, time):
"""
Takes an array of spike trains of different sizes,
convolves it with a gaussian, returns the average gaussian convolve spikes
"""
all_convolve_spikes = []
all_pos = []
for spike_train in all_spike_trains:
spike_train = spike_train[1]
all_pos.append(spike_train[0])
spike_train = spike_train - spike_train[0] # changing spike train to start at 0 (subtracting baseline)
# Boolean list in length of trial length, where 1 means spike happened, 0 means no spike
trial_length = int((spike_train[-1] - spike_train[0]) * sampling_rate)
trial_bool = np.zeros(trial_length + 1)
spike_indx = (spike_train * sampling_rate).astype(np.int)
trial_bool[spike_indx] = 1
# convolve gaussian with boolean spike list
time_cutoff = int(time * sampling_rate) # time for which trial runs
convolve_spikes = np.asarray(convolve(trial_bool, fxn, mode='valid'))
all_convolve_spikes.append(convolve_spikes[0:time_cutoff])
# for trials which are shorter than the trial time
cutoff = min([len(i) for i in all_convolve_spikes])
for ix, convolved in enumerate(all_convolve_spikes):
all_convolve_spikes[ix] = all_convolve_spikes[ix][:cutoff]
avg_convolve_spikes = np.mean(all_convolve_spikes, 0)
return avg_convolve_spikes