[frontend] import filestim data from nix rund, add serveral methods

This commit is contained in:
Jan Grewe 2020-08-05 19:19:24 +02:00
parent 32f4f23527
commit 07b60c6622
3 changed files with 291 additions and 82 deletions

View File

@ -420,7 +420,7 @@ class RePro:
@property @property
def dataset(self): def dataset(self):
datasets = (Cells & "cell_id = '%s'" % self.cell_id) * CellDatasetMap * Datasets datasets = (Cells & "cell_id = '%s'" % self.cell_id) * CellDatasetMap * Datasets
d = datasets.proj('dataset_id', 'data_source', 'experimenter', 'setup', 'recording_date', d = datasets.proj('dataset_id', 'data_source', 'data_host', 'experimenter', 'setup', 'recording_date',
'quality', 'comment', 'duration', 'has_nix').fetch(limit=1, as_dict=True)[0] 'quality', 'comment', 'duration', 'has_nix').fetch(limit=1, as_dict=True)[0]
del d["cell_id"] del d["cell_id"]
return Dataset(tuple=d) return Dataset(tuple=d)
@ -447,7 +447,8 @@ class RePro:
return [Stimulus(tuple=s) for s in stims] return [Stimulus(tuple=s) for s in stims]
@staticmethod @staticmethod
def find(name=None, cell_id=None, cell_type=None, species=None, settings=[], quality=None, test=False): def find(name=None, cell_id=None, cell_type=None, species=None, settings=[], quality=None,
min_date=None, max_date=None, test=False):
""" Finds Repro runs in the database. When called without arguments, all RePro runs are """ Finds Repro runs in the database. When called without arguments, all RePro runs are
found and returned. found and returned.
Search can be narrowed by providing further search hints. Search can be narrowed by providing further search hints.
@ -461,6 +462,8 @@ class RePro:
species (str, optional): The species name, or parts of it. Defaults to None. species (str, optional): The species name, or parts of it. Defaults to None.
settings (list of string, optional): List of repro settings e.g. ["contrast: 20", "am: false"]. An AND connection is assumed. Defaults to None. settings (list of string, optional): List of repro settings e.g. ["contrast: 20", "am: false"]. An AND connection is assumed. Defaults to None.
quality (str, optional): The quality assessment. Defaults to None. quality (str, optional): The quality assessment. Defaults to None.
min_date (datetime, optional): the minimum recording date. Dates may be given as datetime objects or string of the format "2010.01.01" or "2010-01-01". Defaults to None.
max_date (datetime, optional): the maximum recording date. Defaults to None.
test (bool, optional): defines whether or not the database matches should be fetched from the database. test (bool, optional): defines whether or not the database matches should be fetched from the database.
Returns: Returns:
list of fishbook.RePro: list of results or empty list if test == True list of fishbook.RePro: list of results or empty list if test == True
@ -481,11 +484,16 @@ class RePro:
cells = cells & "cell_type like '%{0:s}%'".format(cell_type) cells = cells & "cell_type like '%{0:s}%'".format(cell_type)
if species: if species:
cells = cells & "species like '%{0:s}%'".format(species) cells = cells & "species like '%{0:s}%'".format(species)
if min_date and isinstance(min_date, (dt.date, str)):
cells = cells & "recording_date >= '%s'" % min_date
if max_date and isinstance(min_date, (dt.date, str)):
cells = cells & "recording_date < '%s'" % max_date
if quality: if quality:
cells = cells & "quality like '{0:s}'".format(quality) cells = cells & "quality like '{0:s}'".format(quality)
p = cells.proj("quality", "experimenter") p = cells.proj("quality", "experimenter")
repros = repros & p repros = repros & p
results = [] results = []
total = len(repros) total = len(repros)
if not test: if not test:
@ -500,7 +508,7 @@ class RePro:
return self.__tuple.copy() return self.__tuple.copy()
def __str__(self): def __str__(self):
str = "RePro: %s\t id: %s\n" % (self.name, self.id) str = "RePro id: %s\t repro name: %s\n" % (self.id, self.name)
str += "run: %i\t on cell: %s\n" %(self.run, self.cell_id) str += "run: %i\t on cell: %s\n" %(self.run, self.cell_id)
str += "start time: %s\t duration: %s\n" % (self.start, self.duration) str += "start time: %s\t duration: %s\n" % (self.start, self.duration)
return str return str
@ -537,11 +545,13 @@ class Stimulus:
@property @property
def cell(self): def cell(self):
return Cells & "cell_id = %s" % self.__tuple["cell_id"] embed() # FIXME
return Cells & ("cell_id = %s" % self.__tuple["cell_id"])
@property @property
def repro(self): def repro(self):
return Repros & "repro_id = %s" % self.__tuple["repro_id"] embed() # FIXME
return Repros & ("repro_id = %s" % self.__tuple["repro_id"])
@property @property
def start_time(self): def start_time(self):

View File

@ -1,5 +1,5 @@
from .frontend_classes import Dataset, RePro, Stimulus from .frontend_classes import Dataset, RePro, Stimulus
from .util import BoltzmannFit, unzip_if_needed, gaussian_kernel, zero_crossings from .util import BoltzmannFit, unzip_if_needed, gaussian_kernel, zero_crossings, spike_times_to_rate
import numpy as np import numpy as np
import nixio as nix import nixio as nix
from scipy.stats import circstd from scipy.stats import circstd
@ -7,6 +7,7 @@ from scipy.stats import circstd
import os import os
import subprocess import subprocess
from tqdm import tqdm from tqdm import tqdm
import yaml
from IPython import embed from IPython import embed
@ -14,6 +15,8 @@ from IPython import embed
class BaselineData: class BaselineData:
""" """
Class representing the Baseline data that has been recorded within a given Dataset. Class representing the Baseline data that has been recorded within a given Dataset.
This class provides access to basic measures estimated from the baseline activity.
""" """
def __init__(self, dataset: Dataset): def __init__(self, dataset: Dataset):
self.__spike_data = [] self.__spike_data = []
@ -66,8 +69,11 @@ class BaselineData:
def serial_correlation(self, max_lags=50): def serial_correlation(self, max_lags=50):
""" """
Returns the serial correlation of the interspike intervals. Returns the serial correlation of the interspike intervals.
@param max_lags: The number of lags to take into account
@return: the serial correlation as a function of the lag Args
max_lags (int, optional): The number of lags to take into account
Returns
list of float: the serial correlation as a function of the lag
""" """
scs = [] scs = []
for sd in self.__spike_data: for sd in self.__spike_data:
@ -79,13 +85,24 @@ class BaselineData:
return scs return scs
def circular_std(self): def circular_std(self):
"""The circular standard deviation of the baseline spikes. The circ. std. is given in radiant.
Returns:
list of float: for each run of the baseline RePro there will be one entry.
"""
circular_stds = [] circular_stds = []
for i in range(self.size): for i in range(self.size):
phases = self.__spike_phases(index=i) phases = self.__spike_phases(index=i)
circular_stds.append(circstd(phases)) circular_stds.append(circstd(phases))
return circular_stds return circular_stds
@property
def eod_frequency(self): def eod_frequency(self):
"""The average baseline EOD frequency in Hz.
Returns:
float: the EOD frequency averaged across runs. Given in Hz.
"""
eod_frequencies = [] eod_frequencies = []
for i in range(self.size): for i in range(self.size):
eod, time = self.eod(i) eod, time = self.eod(i)
@ -105,6 +122,15 @@ class BaselineData:
return phases return phases
def eod_times(self, index=0, interpolate=True): def eod_times(self, index=0, interpolate=True):
"""The times of the detected EODs.
Args:
index (int, optional): The run of the BaselineActivity RePro. Defaults to 0.
interpolate (bool, optional): Defines whether a simple threshold mechanism is used or times are interpolated. Defaults to True.
Returns:
numpy.ndarray: the eod times.
"""
if index >= self.size: if index >= self.size:
return None return None
if len(self.__eod_times) < len(self.__eod_data): if len(self.__eod_times) < len(self.__eod_data):
@ -135,11 +161,22 @@ class BaselineData:
index (int, optional): If the baseline activity has been recorded several times, the index can be given. Defaults to 0. index (int, optional): If the baseline activity has been recorded several times, the index can be given. Defaults to 0.
Returns: Returns:
: [description] numpy.adarray: the spike times
""" """
return self.__spike_data[index] if len(self.__spike_data) >= index else None return self.__spike_data[index] if len(self.__spike_data) >= index else None
def membrane_voltage(self, index: int=0): def membrane_voltage(self, index: int=0):
"""[summary]
Args:
index (int, optional): [description]. Defaults to 0.
Raises:
IndexError: [description]
Returns:
[type]: [description]
"""
if index >= self.size: if index >= self.size:
raise IndexError("Index %i out of bounds for size %i!" % (index, self.size)) raise IndexError("Index %i out of bounds for size %i!" % (index, self.size))
if not self.__dataset.has_nix: if not self.__dataset.has_nix:
@ -158,18 +195,33 @@ class BaselineData:
data = t.retrieve_data("V-1")[:] data = t.retrieve_data("V-1")[:]
time = np.asarray(t.references["V-1"].dimensions[0].axis(len(data))) time = np.asarray(t.references["V-1"].dimensions[0].axis(len(data)))
except: except:
data = np.empty() data = np.empty(0)
time = np.empty() time = np.empty(0)
f.close() f.close()
return time, data return time, data
def eod(self, index: int=0): def eod(self, index: int=0):
"""Returns the EOD data for a given run of the BaselineActivity RePro.
Args:
index (int, optional): The run index. Defaults to 0.
Returns:
numpy.ndarray: The eod trace.
numpy.ndarray: A matching time axis starting at time zero.
"""
eod = self.__eod_data[index] if len(self.__eod_data) >= index else None eod = self.__eod_data[index] if len(self.__eod_data) >= index else None
time = np.arange(len(eod)) / self.__dataset.samplerate time = np.arange(len(eod)) / self.__dataset.samplerate
return eod, time return eod, time
@property @property
def burst_index(self): def burst_index(self):
"""Fraction of spikes that occur in intervals of less than 1.5 times the EOD period.
Returns:
list of float: burst indices for each repro run.
"""
bi = [] bi = []
for i, sd in enumerate(self.__spike_data): for i, sd in enumerate(self.__spike_data):
if len(sd) < 2: if len(sd) < 2:
@ -182,6 +234,11 @@ class BaselineData:
@property @property
def coefficient_of_variation(self): def coefficient_of_variation(self):
"""Coefficient of variation of the interspike intervals.
Returns:
list of float: for each baseline repro run a single value of the CV.
"""
cvs = [] cvs = []
for d in self.__spike_data: for d in self.__spike_data:
isis = np.diff(d) isis = np.diff(d)
@ -190,6 +247,12 @@ class BaselineData:
@property @property
def vector_strength(self): def vector_strength(self):
"""The vector strength with which the spikes lock to the fish's own EOD
Returns:
list of float: the vector strength calculated separatedly for each repro run.
list of numpy.ndarray: the spike phases within the EOD period (in radiant).
"""
vss = [] vss = []
spike_phases = [] spike_phases = []
for i, sd in enumerate(self.__spike_data): for i, sd in enumerate(self.__spike_data):
@ -203,6 +266,11 @@ class BaselineData:
@property @property
def size(self): def size(self):
"""The number of times the BaselineActivity RePro was run.
Returns:
int: the number of baseline repro runs
"""
return len(self.__spike_data) return len(self.__spike_data)
def __str__(self): def __str__(self):
@ -221,7 +289,7 @@ class BaselineData:
try: try:
data = t.retrieve_data("EOD")[:] data = t.retrieve_data("EOD")[:]
except: except:
data = np.empty() data = np.empty(0)
f.close() f.close()
return data return data
@ -244,6 +312,8 @@ class BaselineData:
print("Tag not found!") print("Tag not found!")
try: try:
data = t.retrieve_data("Spikes-1")[:] data = t.retrieve_data("Spikes-1")[:]
if data[0] < 0:
data = data[1:] # this is related to a nix::RangeDimension bug, should be fixed beyond 1.4.9
except: except:
data = None data = None
@ -284,19 +354,17 @@ class BaselineData:
return np.asarray(data) return np.asarray(data)
class FIData: class FIData:
""" """Class representing the data recorded with the relacs FI-Curve repro. The instance will load the data upon
Class representing the data recorded with the relacs FI-Curve repro. The instance will load the data upon
construction which may take a while. construction which may take a while.
FI Data offers convenient access to the spike and local EOD data as well as offers conveince methods to get the FI Data offers convenient access to the spike and local EOD data as well as offers conveince methods to get the
firing rate and also to fit a Boltzmann function to the the FI curve. firing rate and also to fit a Boltzmann function to the the FI curve.
""" """
def __init__(self, dataset: Dataset): def __init__(self, dataset: Dataset):
""" """Constructor.
Constructor.
:param dataset: The dataset entity for which the fi curve repro data should be loaded. Args:
fishbook.Dataset: The dataset entity for which the fi curve repro data should be loaded.
""" """
self.__spike_data = [] self.__spike_data = []
self.__contrasts = [] self.__contrasts = []
@ -306,7 +374,8 @@ class FIData:
self.__repros = None self.__repros = None
self.__cell = dataset.cells[0] # Beware: Assumption that there is only a single cell self.__cell = dataset.cells[0] # Beware: Assumption that there is only a single cell
self._get_data() self._get_data()
pass if self.size < 1:
print("No FICurve data was recorded in dataset %s" % self.__dataset.id)
def _get_data(self): def _get_data(self):
if not self.__dataset: if not self.__dataset:
@ -443,16 +512,19 @@ class FIData:
""" """
The number of recorded trials The number of recorded trials
:return: An integer with the number of trials. returns
int: the number of trials.
""" """
return len(self.__spike_data) return len(self.__spike_data)
def spikes(self, index=-1): def spikes(self, index=-1):
""" """The spike times recorded in the specified trial(s)
The spike times recorded in the specified trial(s)
:param index: the index of the trial. Default of -1 indicates that all data should be returned. Args:
:return: int, optional: the index of the trial. Default of -1 indicates that all data should be returned.
Returns:
list of numpy.ndarray: the spike trains.
""" """
if 0 <= index < self.size: if 0 <= index < self.size:
return self.__spike_data[index] return self.__spike_data[index]
@ -460,11 +532,13 @@ class FIData:
return self.__spike_data return self.__spike_data
def eod(self, index=-1): def eod(self, index=-1):
""" """ The local eod (including the stimulus) measurement of the selected trial(s).
The local eod (including the stimulus) measurement of the selected trial(s).
:param index: the index of the trial. Default of -1 indicates that all data should be returned. Args:
:return: Either two vectors representing time and the local eod or two lists of such vectors int, optional: the index of the trial. Default of -1 indicates that all data should be returned.
Returns:
Either two vectors representing time and the local eod or two lists of such vectors
""" """
if len(self.__eod_data) == 0: if len(self.__eod_data) == 0:
print("EOD data not available for old-style relacs data.") print("EOD data not available for old-style relacs data.")
@ -475,11 +549,13 @@ class FIData:
return self.__eod_times, self.__eod_data return self.__eod_times, self.__eod_data
def contrast(self, index=-1): def contrast(self, index=-1):
""" """ The stimulus contrast used in the respective trial(s).
The stimulus contrast used in the respective trial(s).
:param index: the index of the trial. Default of -1 indicates that all data should be returned. Args:
:return: Either a single scalar representing the contrast, or a list of such scalars, one entry for each trial. int, optional: the index of the trial. Default of -1 indicates that all data should be returned.
Returns:
Either a single scalar representing the contrast, or a list of such scalars, one entry for each trial.
""" """
if 0 <= index < self.size: if 0 <= index < self.size:
return self.__contrasts[index] return self.__contrasts[index]
@ -499,22 +575,20 @@ class FIData:
return self.__eod_times return self.__eod_times
def rate(self, index=0, kernel_width=0.005): def rate(self, index=0, kernel_width=0.005):
""" """ Returns the firing rate for a single trial. Firing rate estimation using the kernel convolution method.
Returns the firing rate for a single trial.
Args:
int, optional: The index of the trial. 0 <= index < size
float, optional: kernel_width: The width of the gaussian kernel in seconds. Defaults to 0.005 s
:param index: The index of the trial. 0 <= index < size Returns:
:param kernel_width: The width of the gaussian kernel in seconds numpy.ndarray: a vector representing time
:return: tuple of time and rate numpy.ndarray: a vector containing the firing rate.
""" """
t = self.time_axis(index) t = self.time_axis(index)
dt = np.mean(np.diff(t))
sp = self.spikes(index) sp = self.spikes(index)
binary = np.zeros(t.shape) r = spike_times_to_rate(sp, t, kernel_width)
spike_indices = ((sp - t[0]) / dt).astype(int) return t, r
binary[spike_indices[(spike_indices >= 0) & (spike_indices < len(binary))]] = 1
g = gaussian_kernel(kernel_width, dt)
rate = np.convolve(binary, g, mode='same')
return t, rate
def boltzmann_fit(self, start_time=0.01, end_time=0.05, kernel_width=0.005): def boltzmann_fit(self, start_time=0.01, end_time=0.05, kernel_width=0.005):
""" """
@ -527,6 +601,9 @@ class FIData:
:param kernel_width: standard deviation of the Gaussian kernel used for firing rate estimation. :param kernel_width: standard deviation of the Gaussian kernel used for firing rate estimation.
:return: object of type BoltzmannFit :return: object of type BoltzmannFit
""" """
if self.size < 1:
print("No FICurve data recorded in dataset %s" % self.__dataset.id)
return None
contrasts = np.zeros(self.size) contrasts = np.zeros(self.size)
rates = np.zeros(self.size) rates = np.zeros(self.size)
for i in range(self.size): for i in range(self.size):
@ -555,9 +632,12 @@ class FileStimulusData:
self.__spike_data = [] self.__spike_data = []
self.__contrasts = [] self.__contrasts = []
self.__stimuli = [] self.__stimuli = []
self.__delays = []
self.__durations = []
self.__dataset = dataset self.__dataset = dataset
self.__repros = None self.__repros = None
self.__cell = dataset.cells[0] # Beware: Assumption that there is only a single cell self.__cell = dataset.cells[0] # Beware: Assumption that there is only a single cell
self.__all_spikes = None
self._get_data() self._get_data()
def _get_data(self): def _get_data(self):
@ -565,52 +645,64 @@ class FileStimulusData:
return return
self.__repros, _ = RePro.find("FileStimulus", cell_id=self.__cell.id) self.__repros, _ = RePro.find("FileStimulus", cell_id=self.__cell.id)
for r in self.__repros: for r in self.__repros:
sd, c, stims = self.__read_spike_data_from_nix(r) if self.__dataset.has_nix else self.__read_spike_data_from_directory(r) if self.__dataset.has_nix:
if sd is not None and len(sd) > 1: spikes, contrasts, stims, delays, durations = self.__read_spike_data_from_nix(r)
self.__spike_data.extend(sd) else:
self.__contrasts.extend(c) spikes, contrasts, stims, delays, durations = self.__read_spike_data_from_directory(r) # TODO
if spikes is not None and len(spikes) > 1:
self.__spike_data.extend(spikes)
self.__contrasts.extend(contrasts)
self.__stimuli.extend(stims) self.__stimuli.extend(stims)
self.__delays.extend(delays)
self.__durations.extend(durations)
else: else:
continue continue
def __find_contrast(self, repro_settings, stimulus_settings, has_nix=True):
contrast = 0.0
for k in repro_settings.keys():
if k.lower() == "contrast":
contrast = float(repro_settings[k].split("+")[0]) * (100 if has_nix else 1)
if contrast < 0.00001:
for k in stimulus_settings.keys():
if k.lower() == "contrast":
contrast = float(stimulus_settings[k].split("+")[0]) * (100 if has_nix else 1)
return contrast
def __do_read_spike_data_from_nix(self, mt: nix.pycore.MultiTag, stimulus: Stimulus, repro: RePro): def __do_read_spike_data_from_nix(self, mt: nix.pycore.MultiTag, stimulus: Stimulus, repro: RePro):
spikes = None spikes = None
contrast = 0.0 contrast = 0.0
stim_file = ""
r_settings = repro.settings.split("\n") r_settings = yaml.safe_load(repro.settings.replace("\t", ""))
s_settings = stimulus.settings.split("\n") s_settings = yaml.safe_load(stimulus.settings.replace("\t", ""))
stim_file = r_settings["file"]
delay = 0.0 delay = 0.0
for s in r_settings: if "delay:" in map(str.lower, r_settings.keys()):
if "delay:" in s:
delay = float(s.split(":")[-1]) delay = float(s.split(":")[-1])
break
start_time = stimulus.start_time - delay start_time = stimulus.start_time - delay
end_time = stimulus.start_time + mt.extents[stimulus.index] end_time = stimulus.start_time + mt.extents[stimulus.index]
contrast = 0.0 # this is a quick fix!!! duration = mt.extents[stimulus.index]
embed() contrast = self.__find_contrast(r_settings, s_settings, True)
for s in s_settings:
if "Contrast:" in s and "PreContrast" not in s and "\t\t" not in s and "+-" not in s:
contrast = float(s.split(":")[-1])
break
return spikes, contrast, stim_file
spikes = self.__all_spikes[(self.__all_spikes >= start_time) & (self.__all_spikes < end_time)] - start_time - delay
return spikes, contrast, stim_file, delay, duration
"""
local_eod = eod_da[start_index_eod:end_index_eod] local_eod = eod_da[start_index_eod:end_index_eod]
spikes = self.__all_spikes[(self.__all_spikes >= start_time) & (self.__all_spikes < end_time)] - start_time - delay
time = np.asarray(eod_da.dimensions[0].axis(end_index_eod - start_index_eod)) - delay time = np.asarray(eod_da.dimensions[0].axis(end_index_eod - start_index_eod)) - delay
return spikes, local_eod, time, contrast return spikes, local_eod, time, contrast
return spikes, contrast, stim_file return spikes, contrast, stim_file
"""
def __read_spike_data_from_nix(self, repro: RePro): def __read_spike_data_from_nix(self, repro: RePro):
spikes = [] spikes = []
contrasts = [] contrasts = []
stim_files = [] stim_files = []
stimuli = Stimulus.find(cell_id=repro.cell_id, repro_id=repro.id) delays = []
durations = []
stimuli, _ = Stimulus.find(cell_id=repro.cell_id, repro_id=repro.id)
if len(stimuli) == 0: if len(stimuli) == 0:
return spikes, contrasts, stim_files return spikes, contrasts, stim_files
data_source = os.path.join(self.__dataset.data_source, self.__dataset.id + ".nix") data_source = os.path.join(self.__dataset.data_source, self.__dataset.id + ".nix")
@ -625,26 +717,100 @@ class FileStimulusData:
s = stimuli[i] s = stimuli[i]
if not mt or mt.id != s.multi_tag_id: if not mt or mt.id != s.multi_tag_id:
mt = b.multi_tags[s.multi_tag_id] mt = b.multi_tags[s.multi_tag_id]
sp, c, stim = self.__do_read_spike_data_from_nix(mt, s, repro)
sp, c, stim, delay, duration = self.__do_read_spike_data_from_nix(mt, s, repro)
spikes.append(sp) spikes.append(sp)
contrasts.append(c) contrasts.append(c)
stim_files.append(stim) stim_files.append(stim)
delays.append(delay)
durations.append(duration)
f.close() f.close()
return spikes, contrasts, stim_files return spikes, contrasts, stim_files, delays, contrasts
def __read_spike_data_from_directory(self, repro: RePro): def __read_spike_data_from_directory(self, repro: RePro):
print("not yet my friend!") print("not yet, my friend!")
spikes = [] spikes = []
contrast = 0.0 contrasts = []
stim = None stim_files = []
delays = []
durations = []
return spikes, contrast, stim return spikes, contrasts, stim_files, delays, durations
def read_stimulus(self, index=0): def read_stimulus(self, index=0):
pass pass
@property
def size(self):
return len(self.__spike_data)
def spikes(self, index=-1):
if index == -1:
return self.__spike_data
elif index >= 0 and index < self.size:
return self.__spike_data[index]
else:
raise IndexError("FileStimulusData: index %i out of bounds for spike data of size %i" % (index, self.size))
def contrast(self, index=-1):
if index == -1:
return self.__contrasts
elif index >=0 and index < self.size:
return self.__contrasts[index]
else:
raise IndexError("FileStimulusData: index %i out of bounds for contrasts data of size %i" % (index, self.size))
def stimulus_files(self, index=-1):
if index == -1:
return self.__stimuli
elif index >=0 and index < self.size:
return self.__stimuli[index]
else:
raise IndexError("FileStimulusData: index %i out of bounds for contrasts data of size %i" % (index, self.size))
def time_axis(self, index=-1):
"""
Get the time axis of a single trial or a list of time-vectors for all trials.
:param index: the index of the trial. Default of -1 indicates that all data should be returned.
:return: Either a single vector representing time, or a list of such vectors, one for each trial.
"""
if 0 <= index < self.size:
delay = self.__delays[index]
duration = self.__durations[index]
return np.arange(delay, duration, 1./self.__dataset.samplerate)
elif index == -1:
axes = []
for i in range(self.size):
delay = self.__delays[i]
duration = self.__durations[i]
axes.append(np.arange(delay, duration, 1./self.__dataset.samplerate))
return axes
else:
raise IndexError("FileStimulusData: index %i out of bounds for time_axes of size %i" % (index, self.size))
def rate(self, index=-1, kernel_width=0.005):
if index == -1:
time_axes = []
rates = []
for i in range(self.size):
t = self.time_axis(i)
spikes = self.spikes(i)
r = spike_times_to_rate(spikes, t, kernel_width)
time_axes.append(t)
rates.append(r)
return time_axes, rates
elif index >= 0 and index < self.size:
t = self.time_axis(index)
spikes = self.spikes(index)
r = spike_times_to_rate(spikes, t, kernel_width)
return t, r
else:
raise IndexError("FileStimulusData: index %i out of bounds for time_axes of size %i" % (index, self.size))
if __name__ == "__main__": if __name__ == "__main__":
# dataset = Dataset(dataset_id='2011-06-14-ag') # dataset = Dataset(dataset_id='2011-06-14-ag')
dataset = Dataset(dataset_id="2018-09-13-ac-invivo-1") dataset = Dataset(dataset_id="2018-09-13-ac-invivo-1")
# dataset = Dataset(dataset_id='2013-04-18-ac') # dataset = Dataset(dataset_id='2013-04-18-ac')

View File

@ -1,6 +1,25 @@
import numpy as np import numpy as np
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
def spike_times_to_rate(spike_times, time_axis, kernel_width=0.005):
"""Convert spike times to a rate by means of kernel convolution. A Gaussian kernel of the desired width is used.
Args:
spike_times (numpy.ndarray): the spike times in seconds.
time_axis (np.ndarray): the time axis with a proper resolution and extent. (in seconds)
kernel_width (float, optional): the standard deviation of the Gausian kernel. Defaults to 0.005.
Returns:
np.ndarray: the firing rate in Hz.
"""
dt = np.mean(np.diff(time_axis))
binary = np.zeros(time_axis.shape)
spike_indices = ((spike_times - time_axis[0]) / dt).astype(int)
binary[spike_indices[(spike_indices >= 0) & (spike_indices < len(binary))]] = 1
g = gaussian_kernel(kernel_width, dt)
rate = np.convolve(binary, g, mode='same')
return rate
def safe_get_val(dictionary:dict, key, default=None): def safe_get_val(dictionary:dict, key, default=None):
return dictionary[key] if key in dictionary.keys() else default return dictionary[key] if key in dictionary.keys() else default
@ -45,6 +64,12 @@ def zero_crossings(x, t, interpolate=False):
def unzip_if_needed(dataset, tracename='trace-1.raw'): def unzip_if_needed(dataset, tracename='trace-1.raw'):
"""[summary]
Args:
dataset ([type]): [description]
tracename (str, optional): [description]. Defaults to 'trace-1.raw'.
"""
file_name = os.path.join(dataset, tracename) file_name = os.path.join(dataset, tracename)
if os.path.exists(file_name): if os.path.exists(file_name):
return return
@ -54,12 +79,20 @@ def unzip_if_needed(dataset, tracename='trace-1.raw'):
def gaussian_kernel(sigma, dt): def gaussian_kernel(sigma, dt):
"""Creates a gaussian kernel with the integral of one.
Args:
sigma ([type]): [description]
dt ([type]): [description]
Returns:
[type]: [description]
"""
x = np.arange(-4. * sigma, 4. * sigma, dt) x = np.arange(-4. * sigma, 4. * sigma, dt)
y = np.exp(-0.5 * (x / sigma) ** 2) / np.sqrt(2. * np.pi) / sigma y = np.exp(-0.5 * (x / sigma) ** 2) / np.sqrt(2. * np.pi) / sigma
return y return y
class BoltzmannFit: class BoltzmannFit:
""" """
Class representing a fit of a Boltzmann function to some data. Class representing a fit of a Boltzmann function to some data.