forked from jgrewe/fishbook
baseline class should be able to read old data
This commit is contained in:
parent
ffbc4eccce
commit
11cab20371
@ -1,16 +1,79 @@
|
||||
import datajoint as dj
|
||||
import os
|
||||
import numpy as np
|
||||
from database import Dataset, Repro
|
||||
schema = dj.schema("fish_book", locals())
|
||||
|
||||
|
||||
def get_baseline_data(cell_id=None, restriction={}):
|
||||
dsets = []
|
||||
if dataset_name:
|
||||
dsets = (Dataset & ("dataset_id like'%s'" % dataset_name) & restriction)
|
||||
else:
|
||||
dsets = (Dataset & restriction)
|
||||
class BaselineData(object):
|
||||
|
||||
if len(dsets) == 0:
|
||||
return None
|
||||
for d in dsets:
|
||||
d
|
||||
def __init__(self, dataset:Dataset):
|
||||
self.__data = []
|
||||
self.__dataset = dataset
|
||||
self._get_data()
|
||||
|
||||
|
||||
def _get_data(self):
|
||||
if not self.__dataset:
|
||||
self.__data = []
|
||||
self.__data = []
|
||||
|
||||
repros = (Repro & self.__dataset & "repro_name like 'BaselineActivity%'")
|
||||
for r in repros:
|
||||
self.__data.append(self.__read_data(r))
|
||||
|
||||
def __read_data(self, r:Repro):
|
||||
if self.__dataset["has_nix"]:
|
||||
return self.__read_data_from_nix(r)
|
||||
else:
|
||||
return self.__read_data_from_directory(r)
|
||||
|
||||
@property
|
||||
def dataset(self):
|
||||
return self.__dataset
|
||||
|
||||
@property
|
||||
def data(self, index:int=0):
|
||||
return self.__data[0] if len(self.__data) >= index else None
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return len(self.__data)
|
||||
|
||||
def __str__(self):
|
||||
str = "Baseline data of %s " % self.__dataset
|
||||
|
||||
def __read_data_from_nix(self, r)->np.ndarray:
|
||||
# FIXME!
|
||||
return np.asarray([])
|
||||
|
||||
def __read_data_from_directory(self, r)->np.ndarray:
|
||||
data = []
|
||||
data_source = os.path.join(self.__dataset["data_source"], "basespikes1.dat")
|
||||
if os.path.exists(data_source):
|
||||
found_run = False
|
||||
with open(data_source, 'r') as f:
|
||||
l = f.readline()
|
||||
while l:
|
||||
if "index" in l:
|
||||
index = int(l.strip("#").strip().split(":")[-1])
|
||||
found_run = index == r["run"]
|
||||
if l.startswith("#Key") and found_run:
|
||||
data = self.__do_read(f)
|
||||
break
|
||||
l = f.readline()
|
||||
|
||||
return data
|
||||
|
||||
def __do_read(self, f)->np.ndarray:
|
||||
data = []
|
||||
f.readline()
|
||||
f.readline()
|
||||
l = f.readline()
|
||||
while l and "#" not in l and len(l.strip()) > 0:
|
||||
data.append(float(l.strip()))
|
||||
l = f.readline()
|
||||
return np.asarray(data)
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Test")
|
17
database.py
17
database.py
@ -6,7 +6,6 @@ import glob
|
||||
import util as ut
|
||||
import uuid
|
||||
import yaml
|
||||
import pyrelacs
|
||||
|
||||
from IPython import embed
|
||||
|
||||
@ -170,14 +169,14 @@ class CellDatasetMap(dj.Manual):
|
||||
@schema
|
||||
class Repro(dj.Manual):
|
||||
definition = """
|
||||
repro_id : varchar(512)
|
||||
run : smallint
|
||||
repro_id : varchar(512) # The name that was given to the RePro run by relacs
|
||||
run : smallint # A counter counting the runs of the ReProp in this dataset
|
||||
-> Dataset # this is wrong! should be cell!?! In our case it is the same...
|
||||
----
|
||||
repro_name : varchar(512)
|
||||
settings : varchar(3000)
|
||||
start : float
|
||||
duration : float
|
||||
repro_name : varchar(512) # The original name of the RePro itself, not any given name by user or relacs
|
||||
settings : varchar(3000) # Yaml formatted string containing the repro settings (tag.metadata in case of a nix file)
|
||||
start : float # The start time of the repro
|
||||
duration : float # The duration of the repro
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
@ -518,7 +517,7 @@ if __name__ == "__main__":
|
||||
data_dir = "/data/apteronotus"
|
||||
# data_dir = "../high_freq_chirps/data"
|
||||
# drop_tables()
|
||||
|
||||
datasets = glob.glob(os.path.join(data_dir, '/data/eigenmannia/201*'))
|
||||
datasets = glob.glob("/Users/jan/zwischenlager/2012-*")
|
||||
# datasets = glob.glob(os.path.join(data_dir, '/data/eigenmannia/201*'))
|
||||
populate(datasets, update=False)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user