diff --git a/icr_analysis.py b/icr_analysis.py
index 940063c..a537fe5 100644
--- a/icr_analysis.py
+++ b/icr_analysis.py
@@ -4,7 +4,7 @@ from scipy.signal import convolve
 import matplotlib.mlab as mlab
 
 
-def avgNestedLists(nested_vals):
+def avg_nested_lists(nested_vals):
     """
     Averages a 2-D array and returns a 1-D array of all of the columns
     averaged together, regardless of their dimensions.
@@ -22,39 +22,88 @@ def avgNestedLists(nested_vals):
         output.append(np.nanmean(temp))
     return output
 
+def fourier_psd(avg_convolve_spikes, sampling_rate):
+
+    p, freq = mlab.psd(avg_convolve_spikes, NFFT=sampling_rate * 3, noverlap=sampling_rate * 1.5,
+                       Fs=sampling_rate,
+                       detrend=mlab.detrend_mean)
+    std_four = np.std(freq[5:])
+    mn_four = np.mean(freq)
+
+    return p, freq, std_four, mn_four
+
+
+def kernel_estimation_mise(all_spike_trains, sampling_rate):
+
+    for spike_train in all_spike_trains:
+
+        spike_train = spike_train[1]
+        spike_train = spike_train - spike_train[0]  # changing spike train to start at 0 (subtracting baseline)
+
+        # Boolean list in length of trial length, where 1 means spike happened, 0 means no spike
+        trial_length = int((spike_train[-1] - spike_train[0]) * sampling_rate)
+        trial_bool = np.zeros(trial_length + 1)
+        spike_indx = (spike_train * sampling_rate).astype(np.int)
+        trial_bool[spike_indx] = 1
+
+        bin_sizes = np.arange(2, len(trial_bool)/2, dtype=int)
+        #
+        cost_averages = []
+        bin_sizes = np.arange(1, (len(trial_bool)/2), dtype=int)
+        for bin in bin_sizes:
+
+            cost_per_bin = []
+            start_win = 0
+            stop_win = int(bin)
+
+            bin_slides = np.arange(bin)
+            for slid in bin_slides:
+                embed()
+                quit()
+        #         #spike_count = np.sum(trial_bool[start_win:stop_win])
+        #         #spike_var = np.var(trial_bool[start_win:stop_win])
+        #
+        #         start_win = start_win + bin
+        #         stop_win = stop_win + bin
+        #         cost = (2*mean_bin - var_bin)/(bin**2)
+        #     cost_per_bin.append(cost)
+        #     cost_averages.append(np.mean(cost_per_bin))
+
+
+        #sigma = best_bin/sampling_rate/2
+
+
 
 def gaussian_convolve(all_spike_trains, fxn, sampling_rate, time):
     """
     Takes an array of spike trains of different sizes,
     convolves it with a gaussian, returns the average gaussian convolve spikes
     """
+
     all_convolve_spikes = []
+    all_pos = []
+
     for spike_train in all_spike_trains:
-        time_cutoff = time * sampling_rate
-        trial_length = int((spike_train[-1] - spike_train[0]) * sampling_rate)
+        spike_train = spike_train[1]
+        all_pos.append(spike_train[0])
         spike_train = spike_train - spike_train[0]  # changing spike train to start at 0 (subtracting baseline)
-        trial_time = np.arange(0, (trial_length + 1), 1)
-        trial_bool = np.zeros(len(trial_time))
-        #Boolean list in length of trial length, where 1 means spike happened, 0 means no spike
+
+        # Boolean list in length of trial length, where 1 means spike happened, 0 means no spike
+        trial_length = int((spike_train[-1] - spike_train[0]) * sampling_rate)
+        trial_bool = np.zeros(trial_length + 1)
         spike_indx = (spike_train * sampling_rate).astype(np.int)
         trial_bool[spike_indx] = 1
 
-        # trial_bool = trial_bool[30000:(len(trial_bool)-30000)]
-        convolve_spikes = np.asarray([convolve(trial_bool, fxn, mode='valid')])  # convolve gaussian with boolean spike list
-        all_convolve_spikes.append(convolve_spikes[0, :][:time_cutoff])
-    #
-    # cutoff = min([len(i) for i in all_convolve_spikes])
-    # for ix, convolved in enumerate(all_convolve_spikes):
-    #     all_convolve_spikes[ix] = all_convolve_spikes[ix][:cutoff]
-    #avg_convolve_spikes = avgNestedLists(all_convolve_spikes)
-    avg_convolve_spikes = np.mean(all_convolve_spikes, 0)
-    return avg_convolve_spikes
+        # convolve gaussian with boolean spike list
+        time_cutoff = int(time * sampling_rate)  # time for which trial runs
+        convolve_spikes = np.asarray(convolve(trial_bool, fxn, mode='valid'))
+        all_convolve_spikes.append(convolve_spikes[0:time_cutoff])
 
+    # for trials which are shorter than the trial time
+    cutoff = min([len(i) for i in all_convolve_spikes])
+    for ix, convolved in enumerate(all_convolve_spikes):
+        all_convolve_spikes[ix] = all_convolve_spikes[ix][:cutoff]
 
-def fourier_psd(avg_convolve_spikes, sampling_rate):
-    p, freq = mlab.psd(avg_convolve_spikes, NFFT=sampling_rate * 3, noverlap=sampling_rate * 1.5,
-                       Fs=sampling_rate,
-                       detrend=mlab.detrend_mean)
-    std_four = np.std(freq[5:])
-    mn_four = np.mean(freq)
-    return p, freq, std_four, mn_four
+    avg_convolve_spikes = np.mean(all_convolve_spikes, 0)
+
+    return avg_convolve_spikes