[timeline] add better detection timeline

This commit is contained in:
Jan Grewe 2025-01-26 23:06:54 +01:00
parent 30f23163e3
commit 7f7fb56839

View File

@ -0,0 +1,195 @@
import logging
import pathlib
import numpy as np
from PySide6.QtCore import Qt
from PySide6.QtWidgets import QWidget, QVBoxLayout, QSizePolicy, QGraphicsView, QGraphicsScene, QGraphicsItem, QGraphicsRectItem, QGraphicsLineItem
from PySide6.QtCore import Qt, QPointF, QRectF, QPointF, QRectF
from PySide6.QtGui import QPixmap, QBrush, QColor, QImage, QPen, QFont, QPainter
from fixtracks.utils.signals import DetectionTimelineSignals
class Window(QGraphicsRectItem):
signals = DetectionTimelineSignals()
def __init__(self, x, y, width, height, pen, brush):
super().__init__(x, y, width, height)
self._width = width
self.setPen(pen)
self.setBrush(brush)
self.setZValue(1.0)
self.setAcceptHoverEvents(True) # Enable hover events if needed
self.setFlags(
QGraphicsItem.ItemIsMovable | # Enables item dragging
QGraphicsItem.ItemIsSelectable # Enables item selection
)
self._y = y
def mouseMoveEvent(self, event):
# print(event.scenePos())
super().mouseMoveEvent(event)
def mousePressEvent(self, event):
self.setCursor(Qt.ClosedHandCursor)
super().mousePressEvent(event)
def mouseReleaseEvent(self, event):
self.setCursor(Qt.OpenHandCursor)
r = self.scenePos()
if r.x() < -1000:
self.setPos(-1000., self._y)
if r.x() > 1000 - self._width:
self.setPos(1000 - self._width, self._y)
if r.y() != self._y:
self.setPos(self.scenePos().x(), self._y)
self.signals.windowMoved.emit(self.scenePos().x() + self.rect().width() /2, self._width)
super().mouseReleaseEvent(event)
def hoverEnterEvent(self, event):
# self.signals.hover.emit(self.data(0), QPointF(event.scenePos().x(), event.scenePos().y()))
super().hoverEnterEvent(event)
class DetectionTimeline(QWidget):
signals = DetectionTimelineSignals()
def __init__(self, detectiondata=None, trackone_id=1, tracktwo_id=2, parent=None):
super().__init__(parent)
self._trackone = trackone_id
self._tracktwo = tracktwo_id
self._data = detectiondata
self._width = 2000
self._stepCount = 200
self._bg_brush = QBrush(QColor(20, 20, 20, 255))
transparent_brush = QBrush(QColor(200, 200, 200, 64))
self._white_pen = QPen(QColor.fromString("white"))
self._white_pen.setWidth(0.1)
self._t1_pen = QPen(QColor.fromString("white"))
self._t1_pen.setWidth(2)
self._t2_pen = QPen(QColor(0, 255, 0, 255))
self._t2_pen.setWidth(2)
self._other_pen = QPen(QColor.fromString("red"))
self._other_pen.setWidth(2)
axis_pen = QPen(QColor.fromString("white"))
axis_pen.setWidth(2)
font = QFont()
font.setPointSize(15)
font.setBold(False)
self._view = QGraphicsView()
self._view.setRenderHints(QPainter.RenderHint.Antialiasing | QPainter.RenderHint.SmoothPixmapTransform);
self._view.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
self._view.fitInView(self._scene.sceneRect(), aspectRadioMode=Qt.AspectRatioMode.KeepAspectRatio)
self._scene = QGraphicsScene(QRectF(0, 0, self._width, 50.))
self._scene.setBackgroundBrush(self._bg_brush)
t1_label = self._scene.addText("track 1", font)
t1_label.setDefaultTextColor(self._t1_pen.color())
t1_label.setPos(0, 0)
t2_label = self._scene.addText("track 2", font)
t2_label.setFont(font)
t2_label.setDefaultTextColor(self._t2_pen.color())
t2_label.setPos(0, 17)
other_label = self._scene.addText("other", font)
other_label.setFont(font)
other_label.setDefaultTextColor(self._other_pen.color())
other_label.setPos(0, 30)
line = self._scene.addLine(0, 50, self._width, 50)
line.setPen(axis_pen)
self._window = Window(self._width//2, -5, 100, 60, axis_pen, transparent_brush)
self._window.signals.windowMoved.connect(self.on_windowMoved)
self._scene.addItem(self._window)
self._view.setScene(self._scene)
layout = QVBoxLayout()
layout.addWidget(self._view)
self.setLayout(layout)
if self._data is not None:
self.draw_coverage()
def setDetectionData(self, data):
self._data = data
self.draw_coverage()
def draw_coverage(self):
maxframe = np.max(self._data.frame.values)
bins = np.linspace(0, maxframe, self._stepCount)
pos = np.linspace(0, self._scene.width(), self._stepCount)
track1_frames = self._data.frame.values[self._data.track == self._trackone]
track2_frames = self._data.frame.values[self._data.track == self._trackone]
other_frames = self._data.frame.values[(self._data.track != self._trackone) & (self._data.track != self._tracktwo)]
t1_coverage, _ = np.histogram(track1_frames, bins=bins)
t1_coverage = t1_coverage > 0
t2_coverage, _ = np.histogram(track2_frames, bins=bins)
t2_coverage = t2_coverage > 0
other_coverage, _ = np.histogram(other_frames, bins=bins)
other_coverage = other_coverage > 0
for i in range(len(t1_coverage)-1):
if t1_coverage[i]: self._scene.addLine(pos[i], 0, pos[i], 15., pen=self._t1_pen)
if t2_coverage[i]: self._scene.addLine(pos[i], 17, pos[i], 32., pen=self._t2_pen)
if other_coverage[i]: self._scene.addLine(pos[i], 34, pos[i], 49., pen=self._other_pen)
def fit_scene_to_view(self):
"""Scale the image to fit the QGraphicsView."""
self._view.fitInView(self._scene.sceneRect(), Qt.KeepAspectRatio)
def resizeEvent(self, event):
"""Handle window resizing to fit the image."""
super().resizeEvent(event)
self.fit_scene_to_view()
def on_windowMoved(self, x, w):
scene_width = self._scene.width()
start_pos = x - w/2 + scene_width/2
end_pos = x + w/2 + scene_width/2
range_start = np.round(start_pos / scene_width, 3)
range_end = np.round(end_pos / scene_width, 3)
self.signals.windowMoved.emit(range_start, range_end)
# TODO add method to change window size
def main():
import pickle
import numpy as np
from IPython import embed
from PySide6.QtWidgets import QApplication
from fixtracks.info import PACKAGE_ROOT
datafile = PACKAGE_ROOT / "data/merged_small.pkl"
with open(datafile, "rb") as f:
df = pickle.load(f)
bg_coords = np.stack(df.keypoints[(df.track != 1) & (df.track != 2)].values,).astype(np.float32)[:,0,:]
bg_tracks = df.track[(df.track != 1) & (df.track != 2)].values
bg_ids = df.track[(df.track != 1) & (df.track != 2)].index.values
scnd_coords = np.stack(df.keypoints[(df.track == 2)].values,).astype(np.float32)[:,0,:]
scnd_tracks = df.track[df.track == 2].values
scnd_ids = df.track[(df.track == 2)].index.values
focus_coords = np.stack(df.keypoints[df.track == 1].values,).astype(np.float32)[:,0,:]
focus_tracks = df.track[df.track == 1].values
focus_ids = df.track[(df.track == 2)].index.values
app = QApplication([])
window = QWidget()
window.setMinimumSize(200, 75)
layout = QVBoxLayout()
view = DetectionTimeline(df)
layout.addWidget(view)
# view.setImage(img)
# view.addDetections(bg_coords, bg_tracks, bg_ids, background_brush)
# view.addDetections(focus_coords, focus_tracks, focus_ids, focus_brush)
# view.addDetections(scnd_coords, scnd_tracks, scnd_ids, second_brush)
window.setLayout(layout)
window.show()
app.exec()
if __name__ == "__main__":
main()