299 lines
11 KiB
Python
299 lines
11 KiB
Python
import os
|
|
import nixio as nix
|
|
from enum import Enum
|
|
import datetime as dt
|
|
|
|
|
|
class ItemDescriptor():
|
|
def __init__(self, name=None, id=None, type=None, value=None, definition=None, block_id=None, entity_type=None, shape=None, metadata=None, data_type=None) -> None:
|
|
super().__init__()
|
|
self.name = name
|
|
self.type = type
|
|
self.id = id
|
|
self.block_id= block_id
|
|
self.definition = definition
|
|
self.value = value
|
|
self.entity_type = entity_type
|
|
self.data_type = data_type
|
|
self.shape = shape
|
|
self.metadata_id = metadata
|
|
|
|
class FileDescriptor():
|
|
def __init__(self, filename, format, version, created_at, updated_at, size) -> None:
|
|
super().__init__()
|
|
self.name = filename
|
|
self.size = None
|
|
self.format = format
|
|
self.version = version
|
|
self.created_at = created_at
|
|
self.updated_at = updated_at
|
|
self.size = size
|
|
self.block_count = 0
|
|
self.data_array_count = 0
|
|
self.tag_count = 0
|
|
self.group_count = 0
|
|
self.data_frame_count = 0
|
|
|
|
def toHtml(self):
|
|
def namAndPath(filename):
|
|
parts = filename.split(os.sep)
|
|
name = parts[-1]
|
|
path = ""
|
|
if len(parts) > 1:
|
|
path = os.sep.join(parts[:-1])
|
|
return name, path
|
|
|
|
name, path = namAndPath(self.name)
|
|
descr = "<html><h4>%s</h4>" % name
|
|
descr += "<ol style='list-style-type:none'>"
|
|
descr += "<li><small><b>location:</b> %s</small></li>" % (path if len(path) > 1 else ".")
|
|
descr += "<li><small><b>format:</b> %s</small></li>" % (self.format)
|
|
descr += "<li><small><b>nix format version:</b> %s</small></li>" % (str(self.version))
|
|
descr += "<li><small><b>file size:</b> %.2f MB</small></li>" % (self.size)
|
|
descr += "<hr>"
|
|
descr += "<li>File contents</li>"
|
|
descr += "<li><small><b>blocks:</b> %i</small></li>" % self.block_count
|
|
descr += "<li><small><b>groups:</b> %i</small></li>" % self.group_count
|
|
descr += "<li><small><b>data arrays:</b> %i</small></li>" % self.data_array_count
|
|
descr += "<li><small><b>data frames:</b> %i</small></li>" % self.data_frame_count
|
|
descr += "<li><small><b>tags:</b> %i</small></li>" % self.tag_count
|
|
descr += "<hr>"
|
|
descr += "<li><small><b>created at:</b> %s</small></li>" % (str(dt.datetime.fromtimestamp(self.created_at)))
|
|
descr += "<li><small><b>updated at:</b> %s</small></li>" % (str(dt.datetime.fromtimestamp(self.updated_at)))
|
|
descr += "</ol>"
|
|
descr += "</html>"
|
|
return descr
|
|
|
|
|
|
class NodeType(Enum):
|
|
Root = "root"
|
|
Section = "section"
|
|
Block = "block"
|
|
DataArray = "data_array"
|
|
DataFrame = "data_frame"
|
|
Property = "property"
|
|
Dimension = "dimension"
|
|
Source = "source"
|
|
Tag = "tag"
|
|
MultiTag = "multi_tag"
|
|
Group = "group"
|
|
Feature="feature"
|
|
|
|
|
|
class Singleton(type):
|
|
_instances = {}
|
|
def __call__(cls, *args, **kwargs):
|
|
if cls not in cls._instances:
|
|
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
|
|
return cls._instances[cls]
|
|
|
|
|
|
class EntityBuffer():
|
|
|
|
def __init__(self) -> None:
|
|
super().__init__()
|
|
self._buffer = {}
|
|
|
|
def put(self, entity):
|
|
if not hasattr(entity, "id"):
|
|
return
|
|
id = entity.id
|
|
if id not in self._buffer.keys():
|
|
self._buffer[id] = entity
|
|
|
|
def has(self, id):
|
|
return id in self._buffer.keys()
|
|
|
|
def get(self, id):
|
|
if self.has(id):
|
|
return self._buffer[id]
|
|
else:
|
|
return None
|
|
|
|
def clear(self):
|
|
self._buffer.clear()
|
|
|
|
|
|
class FileHandler(metaclass=Singleton):
|
|
|
|
def __init__(self) -> None:
|
|
super().__init__()
|
|
self._filename = None
|
|
self._nix_file = None
|
|
self._file_requests = []
|
|
self._entity_buffer = EntityBuffer()
|
|
self._file_descriptor = None
|
|
|
|
def open(self, filename):
|
|
self.close()
|
|
|
|
if not os.path.exists(filename):
|
|
return False, "File %s could not be found!" % filename
|
|
try:
|
|
self._nix_file = nix.File.open(filename, nix.FileMode.ReadOnly)
|
|
self._filename = filename
|
|
self._file_descriptor = FileDescriptor(self.filename, self._nix_file.format, self._nix_file.version,
|
|
self._nix_file.created_at, self._nix_file.updated_at, os.path.getsize(self.filename)/1e+6)
|
|
self.file_descriptor.block_count = len(self._nix_file.blocks)
|
|
for b in self._nix_file.blocks:
|
|
self.file_descriptor.data_array_count += len(b.data_arrays)
|
|
self.file_descriptor.group_count += len(b.groups)
|
|
self.file_descriptor.tag_count += len(b.tags)
|
|
self.file_descriptor.tag_count += len(b.multi_tags)
|
|
if hasattr(b, "data_frames"):
|
|
self.file_descriptor.data_frame_count += len(b.data_frames)
|
|
return True, "Successfully opened file %s." % filename.split(os.sep)[-1]
|
|
except RuntimeError as e:
|
|
return False, "Failed to open file %s! \n Error message is: %s" % (filename, e)
|
|
except OSError as e:
|
|
return False, "Failed to open file %s! \n Error message is: %s\n Probably no nix file?!" % (filename, e)
|
|
|
|
def close(self):
|
|
# TODO check if there are any pending file requests!
|
|
if self._nix_file is not None and self._nix_file.is_open():
|
|
self._nix_file.close()
|
|
self._nix_file = None
|
|
self._file_requests = []
|
|
self._entity_buffer.clear()
|
|
|
|
@property
|
|
def file_descriptor(self):
|
|
return self._file_descriptor
|
|
|
|
@property
|
|
def is_valid(self):
|
|
return self._nix_file is not None and self._nix_file.is_open()
|
|
|
|
@property
|
|
def filename(self):
|
|
return self._filename
|
|
|
|
def request_metadata(self, root_id=None, depth=1):
|
|
"""[summary]
|
|
|
|
Args:
|
|
root_id ([type], optional): [description]. Defaults to None.
|
|
depth (int, optional): [description]. Defaults to 1.
|
|
"""
|
|
def get_subsections(section):
|
|
sub_sections = []
|
|
for s in section.sections:
|
|
self._entity_buffer.put(s)
|
|
sub_sections.append(ItemDescriptor(s.name, s.id, s.type, definition=s.definition, entity_type=NodeType.Section))
|
|
return sub_sections
|
|
|
|
def get_properties(section):
|
|
props = []
|
|
for p in section.props:
|
|
value = "unset"
|
|
props.append(ItemDescriptor(p.name, p.id, value=value, entity_type=NodeType.Property))
|
|
return props
|
|
|
|
sections = []
|
|
properties = []
|
|
if root_id is None:
|
|
sections = get_subsections(self._nix_file)
|
|
else:
|
|
fs = self._entity_buffer.get(root_id)
|
|
if fs is None:
|
|
found_section = self._nix_file.find_sections(lambda s: s.id == root_id)
|
|
fs = found_section[0] if len(found_section) > 0 else None
|
|
if fs is None:
|
|
return sections, properties
|
|
sections.extend(get_subsections(fs))
|
|
properties.extend(get_properties(fs))
|
|
return sections, properties
|
|
|
|
def _entity_info(self, entities, block_id, entity_type):
|
|
infos = []
|
|
for e in entities:
|
|
self._entity_buffer.put(e)
|
|
itd = ItemDescriptor(e.name, e.id, e.type, definition=e.definition, entity_type=entity_type, block_id=block_id)
|
|
itd.metadata_id = e.metadata if hasattr(e, "metadata") else None
|
|
itd.data_type = e.data_type if hasattr(e, "data_type") else None
|
|
itd.shape = e.shape if hasattr(e, "shape") else None
|
|
infos.append(itd)
|
|
# TODO set the value to something meaningful for the various entity types
|
|
return infos
|
|
|
|
def request_blocks(self):
|
|
return self._entity_info(self._nix_file.blocks, None, NodeType.Block)
|
|
|
|
def get_block(self, id):
|
|
b = b = self._entity_buffer.get(id)
|
|
if not b:
|
|
b = self._nix_file.blocks[id]
|
|
return b
|
|
|
|
def request_data_arrays(self, block_id):
|
|
b = self.get_block(block_id)
|
|
return self._entity_info(b.data_arrays, block_id, NodeType.DataArray)
|
|
|
|
def request_tags(self, block_id):
|
|
b = self.get_block(block_id)
|
|
tags = self._entity_info(b.tags, block_id, NodeType.Tag)
|
|
tags.extend(self._entity_info(b.multi_tags, block_id, NodeType.MultiTag))
|
|
return tags
|
|
|
|
def request_references(self, block_id, tag_id, is_mtag):
|
|
b = self.get_block(block_id)
|
|
t = self._entity_buffer.get(tag_id)
|
|
if t is None:
|
|
if is_mtag:
|
|
t = b.multi_tags[tag_id]
|
|
else:
|
|
t = b.tags[tag_id]
|
|
return self._entity_info(t.references, block_id, NodeType.DataArray)
|
|
|
|
def request_features(self, block_id, tag_id, is_mtag):
|
|
b = self.get_block(block_id)
|
|
t = self._entity_buffer.get(tag_id)
|
|
if t is None:
|
|
if is_mtag:
|
|
t = b.multi_tags[tag_id]
|
|
else:
|
|
t = b.tags[tag_id]
|
|
feats = []
|
|
for f in t.features:
|
|
itd = ItemDescriptor(f.data.name, f.id, f.link_type, definition=f.data.definition, block_id=block_id, entity_type=NodeType.Feature)
|
|
feats.append(itd)
|
|
return feats
|
|
|
|
def request_dimensions(self, block_id, array_id):
|
|
da = self._entity_buffer.get(array_id)
|
|
if da is None:
|
|
b = self.get_block(block_id)
|
|
da = b.data_arrays[array_id]
|
|
dimensions = []
|
|
for i, d in enumerate(da.dimensions):
|
|
dim_name = "%i. dim: %s" % (i+1, d.label if hasattr(d, "label") else "")
|
|
dim_type= "%s %s" % (d.dimension_type, "dimension")
|
|
dimensions.append(ItemDescriptor(dim_name, type=dim_type, entity_type=NodeType.Dimension, block_id=block_id))
|
|
return dimensions
|
|
|
|
def request_data_frames(self, block_id):
|
|
if self._nix_file.version[1] >= 2:
|
|
b = self.get_block(block_id)
|
|
return self._entity_info(b.data_frames, block_id, NodeType.DataFrame)
|
|
return []
|
|
|
|
def request_groups(self, block_id):
|
|
b = self.get_block(block_id)
|
|
return self._entity_info(b.groups, block_id, NodeType.Group)
|
|
|
|
def request_sources(self, block_id, parent_source_id=None):
|
|
def get_subsources(src):
|
|
sub_sources = []
|
|
for s in src.sources:
|
|
self._entity_buffer.put(s)
|
|
sub_sources.append(ItemDescriptor(s.name, s.id, s.type, definition=s.definition, entity_type=NodeType.Source))
|
|
return sub_sources
|
|
b = self.get_block(block_id)
|
|
if parent_source_id is None:
|
|
return self._entity_info(b.sources, block_id, NodeType.Source)
|
|
else:
|
|
srcs = b.find_sources(lambda s: s.id == parent_source_id)
|
|
sources = []
|
|
for src in srcs:
|
|
sources.extend(get_subsources(src))
|
|
return sources |