Compare commits

...

30 Commits

Author SHA1 Message Date
00b6b54db9 [tracks] react on move request and move window 2025-03-01 17:11:37 +01:00
e3b26c3da4 [Timeline] send move request signal when user clicks onto the scene 2025-03-01 17:11:01 +01:00
30741be200 [tracks] changing window size immediately updates the view 2025-03-01 16:51:39 +01:00
50d982c93b [detectionview] fix bug, when no detection is in the current selection 2025-03-01 16:50:27 +01:00
6c54a86cde fix bug in tracks 2025-02-28 17:20:21 +01:00
03ebb6485a [some improvements] 2025-02-28 08:46:19 +01:00
116e0ce5de [wip] add score to items, and ignore them 2025-02-28 08:12:04 +01:00
d1b5776e69 [controls] remove size limit 2025-02-27 19:59:50 +01:00
4a76655766 [detections] add score to items 2025-02-27 19:59:46 +01:00
ae24463be2 [classifier] make sure, we always start with user labeled detections 2025-02-27 17:41:07 +01:00
15264dbe48 [tracks] allow jumping to a given frame 2025-02-27 16:14:48 +01:00
9d38421e02 [timeline] disable mouse dragging for now, trigger resize on setData 2025-02-26 11:23:20 +01:00
1c2f84b236 [enums] add userlabeled flag to detectionItems 2025-02-26 11:16:25 +01:00
ff3e0841a6 [classifier] better messaging 2025-02-26 11:16:04 +01:00
9e2c6f343a [trackingdata] fixes of selection handling, ...
something is still off with the deletion...
2025-02-26 11:15:49 +01:00
c0a7631acd [detectionview] simplify indexing 2025-02-26 11:15:15 +01:00
5758cf61c6 [selection] add shortcut, disable deletion for now 2025-02-26 11:14:49 +01:00
faf095a2a1 [tracks] add warnings around dangerzone actions 2025-02-26 11:14:18 +01:00
0c5e5629b7 [tracks] add way to flag many detections in one go 2025-02-26 09:24:20 +01:00
4ef6143d14 [tracks] layout tweaks 2025-02-26 08:32:02 +01:00
d6b91c25d2 [classifier] kind of handling mulitple detections in one frame 2025-02-26 08:19:59 +01:00
430ee4fac7 [classifier] working but not really... 2025-02-25 18:45:37 +01:00
f1a4f4dc84 [tracks] disentanglement 2025-02-25 09:15:10 +01:00
461f3aadfe [wip] not prooperly working 2025-02-25 08:14:53 +01:00
3bc938cda7 [timeline] renaming stuff 2025-02-24 16:04:29 +01:00
6fbbb52370 [trackingdata] implement better selection handling ...
allow deletion of entries
2025-02-24 16:03:42 +01:00
d176925796 [trackingdata] change selections, constructor ...
renaming of some functions
2025-02-24 16:03:05 +01:00
35be41282a [trackingdata] scores returns None, if no detections in range 2025-02-24 11:45:24 +01:00
d300f72949 [main] fix setting of loglevel 2025-02-24 11:44:20 +01:00
765d381c5d [trackingdata] avoid div by zero on center of gravity estimation 2025-02-24 10:39:18 +01:00
10 changed files with 550 additions and 275 deletions

View File

@@ -32,7 +32,7 @@ def set_logging(loglevel):
logging.basicConfig(level=loglevel, force=True)
def main(args):
set_logging(logging.DEBUG)
set_logging(args.loglevel)
if platform.system() == "Windows":
# from PySide6.QtWinExtras import QtWin
myappid = f"{info.organization_name}.{info.application_version}"
@@ -75,6 +75,6 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description="FixTracks. Tools for fixing animal tracking")
parser.add_argument("-ll", "--loglevel", type=str, default="INFO", help=f"The log level that should be used. Valid levels are {[str(k) for k in levels.keys()]}")
args = parser.parse_args()
args.loglevel = levels[args.loglevel if args.loglevel.lower() in levels else "info"]
args.loglevel = levels[args.loglevel.lower() if args.loglevel.lower() in levels else "info"]
main(args)

View File

@@ -7,6 +7,8 @@ class DetectionData(Enum):
FRAME = 1
COORDINATES = 2
TRACK_ID = 3
USERLABELED = 4
SCORE = 5
class Tracks(Enum):
TRACKONE = 1

View File

@@ -22,6 +22,8 @@ class DetectionSceneSignals(QObject):
class DetectionTimelineSignals(QObject):
windowMoved = Signal()
manualMove = Signal()
moveRequest = Signal(float)
class DetectionSignals(QObject):
hover = Signal((int, QPointF))

View File

@@ -7,21 +7,16 @@ from PySide6.QtCore import QObject
class TrackingData(QObject):
def __init__(self, parent=None):
def __init__(self, datadict, parent=None):
super().__init__(parent)
self._data = None
self._columns = []
self._start = 0
self._stop = 0
self._indices = None
self._selection_column = None
self._user_selections = None
def setData(self, datadict):
assert isinstance(datadict, dict)
self._data = datadict
self._data["userlabeled"] = np.zeros_like(self["frame"], dtype=bool)
if "userlabeled" not in self._data.keys():
self._data["userlabeled"] = np.zeros_like(self["frame"], dtype=bool)
self._columns = [k for k in self._data.keys()]
self._indices = np.arange(len(self["index"]), dtype=int)
self._selection_indices = np.asarray([])
self._selected_ids = None
@property
def data(self):
@@ -42,62 +37,91 @@ class TrackingData(QObject):
def numDetections(self):
return self._data["track"].shape[0]
@property
def selectionRange(self):
return self._start, self._stop
@property
def selectionRangeColumn(self):
return self._selection_column
def _find(self, ids):
if len(ids) < 1:
return np.array([])
ids = np.sort(ids)
indexes = np.ones_like(ids, dtype=int) * -1
j = 0
for idx in self._indices:
if self["index"][idx] == ids[j]:
indexes[j] = idx
j += 1
if j == len(indexes):
break
indexes = indexes[indexes >= 0]
return indexes
@property
def selectionIndices(self):
return self._indices
return self._selection_indices
@property
def selectionIDs(self):
return self._selected_ids
def setSelectionRange(self, col, start, stop):
logging.debug("Trackingdata: set selection range based on column %s to %.2f - %.2f", col, start, stop)
self._start = start
self._stop = stop
self._selection_column = col
col_indices = np.where((self._data[col] >= self._start) & (self._data[col] < self._stop))[0]
self._indices = self["index"][col_indices]
logging.info("Trackingdata: set selection range based on column %s to %.2f - %.2f", col, start, stop)
col_indices = np.where((self[col] >= start) & (self[col] < stop))[0]
self._selection_indices = self._indices[col_indices]
if len(col_indices) < 1:
logging.warning("TrackingData: Selection range is empty!")
def selectedData(self, col:str):
if col not in self.columns:
logging.error("TrackingData:selectedData: Invalid column name! %s", col)
return self[col][self._indices]
return self[col][self._selection_indices]
def setUserSelection(self, ids):
def setSelection(self, ids):
"""
Set the user selections. That is, e.g. when the user selected a number of detection ids (aka the index of the original data frame entries).
Set the selection based on the detection IDs.
Parameters
----------
ids : array-like
An array-like object containing the IDs to be set as user selections.
The IDs will be converted to integers.
"""
self._user_selections = ids.astype(int)
logging.debug("TrackingData.setSelection: %i number of ids", len(ids))
self._selection_indices = self._find(ids)
self._selected_ids = ids
# print(self._selected_ids, self._selection_indices)
def assignUserSelection(self, track_id:int, userFlag:bool=True)-> None:
def setTrack(self, track_id:int, setUserLabeled:bool=True)-> None:
"""Assign a new track_id to the user-selected detections
Parameters
----------
track_id : int
The new track id for the user-selected detections
userFlag : bool
Should the "userlabeled" state of the detections be set to True or False?
setUserLabeled : bool
Should the "userlabeled" state of the detections be set to True? Otherwise they will be left untouched.
"""
self["track"][self._user_selections] = track_id
self.setAssignmentStatus(userFlag)
logging.info("TrackingData: set track id %i for selection, set user-labeled status %s", track_id, str(setUserLabeled))
# print(self._selected_ids, self._selection_indices)
# print("before: ", self["track"][self._selection_indices], self["frame"][self._selection_indices])
self["track"][self._selection_indices] = track_id
if setUserLabeled:
self.setUserLabeledStatus(True, True)
# print("after: ", self["track"][self._selection_indices], self["frame"][self._selection_indices])
def setAssignmentStatus(self, isTrue: bool):
logging.debug("TrackingData:Re-setting assignment status of user selected data to %s", str(isTrue))
self["userlabeled"][self._user_selections] = isTrue
def setUserLabeledStatus(self, new_status: bool, selection=True):
"""Sets the status of the "userlabeled" column to a given value (True|False). This can done for ALL data in one go, or only for the UserSelection.
def revertAssignmentStatus(self):
Parameters
----------
new_status : bool
The new status, TRUE, if the detections are confirmed by the user (human observer) and can be treated as correct
selection : bool, optional
Whether the new status should be set for the selection only (True, default) ore not (False)
"""
logging.debug("TrackingData: (Re-)setting assignment status of %s to %s",
"user selected data" if selection else " ALL", str(new_status))
if selection:
self["userlabeled"][self._selection_indices] = new_status
else:
self["userlabeled"][:] = new_status
def revertUserLabeledStatus(self):
logging.debug("TrackingData:Un-setting assignment status of all data!")
self["userlabeled"][:] = False
@@ -105,14 +129,17 @@ class TrackingData(QObject):
logging.debug("TrackingData: Reverting all track assignments!")
self["track"][:] = -1
def deleteDetections(self):
# from IPython import embed
# if self._user_selections is not None:
# ids = self._user_selections
# for c in self.columns:
# pass
# embed()
pass
def deleteDetections(self, ids=None):
if ids is not None:
logging.debug("TrackingData.deleteDetections of %i detections", len(ids))
del_indices = self._find(ids)
else:
logging.debug("TrackingData.deleteDetections of all selected detections (%i)", len(self._selected_ids))
del_indices = self._selected_ids
for c in self._columns:
self._data[c] = np.delete(self._data[c], del_indices, axis=0)
self._indices = self._indices[:-len(del_indices)]
self._selected_ids = np.setdiff1d(self._selected_ids, del_indices)
def assignTracks(self, tracks:np.ndarray):
"""assigns the given tracks to the user-selected detections. If the sizes of
@@ -154,11 +181,11 @@ class TrackingData(QObject):
and M is number of keypoints
"""
if selection:
if len(self._indices) < 1:
if len(self._selection_indices) < 1:
logging.info("TrackingData.coordinates returns empty array, not detections in range!")
return np.ndarray([])
return np.stack(self._data["keypoints"][self._indices]).astype(np.float32)
return np.stack(self._data["keypoints"]).astype(np.float32)
return np.stack(self["keypoints"][self._selection_indices]).astype(np.float32)
return np.stack(self["keypoints"]).astype(np.float32)
def keypointScores(self, selection=False):
"""
@@ -169,13 +196,13 @@ class TrackingData(QObject):
numpy.ndarray
A NumPy array of type float32 containing the keypoint scores of the shape (N, M)
with N the number of detections and M the number of keypoints.
"""
"""
if selection:
if len(self._indices) < 1:
if len(self._selection_indices) < 1:
logging.info("TrackingData.scores returns empty array, not detections in range!")
return np.ndarray([])
return np.stack(self._data["keypoint_score"][self._indices]).astype(np.float32)
return np.stack(self._data["keypoint_score"]).astype(np.float32)
return None
return np.stack(self["keypoint_score"][self._selection_indices]).astype(np.float32)
return np.stack(self["keypoint_score"]).astype(np.float32)
def centerOfGravity(self, selection=False, threshold=0.8, nodes=[0,1,2]):
"""
@@ -195,12 +222,17 @@ class TrackingData(QObject):
A NumPy array of shape (N, 2) containing the center of gravity for each detection.
"""
scores = self.keypointScores(selection)
if scores is None:
return None
scores[scores < threshold] = 0.0
scores[:, np.setdiff1d(np.arange(scores.shape[1]), nodes)] = 0.0
weighted_coords = self.coordinates(selection=selection) * scores[:, :, np.newaxis]
weighted_coords = self.coordinates(selection) * scores[:, :, np.newaxis]
sum_scores = np.sum(scores, axis=1, keepdims=True)
center_of_gravity = np.sum(weighted_coords, axis=1) / sum_scores
return center_of_gravity
cogs = np.zeros((weighted_coords.shape[0], 2))
val_ids = np.where(sum_scores > 0.0)[0]
cogs[val_ids] = np.sum(weighted_coords[val_ids], axis=1) / sum_scores[val_ids]
return cogs
def animalLength(self, bodyaxis=None):
if bodyaxis is None:
@@ -288,22 +320,26 @@ def main():
plt.plot([positions[si, 0], positions[ei, 0]],
[positions[si, 1], positions[ei, 1]], color="tab:green")
datafile = PACKAGE_ROOT / "data/merged_small.pkl"
datafile = PACKAGE_ROOT / "data/merged_small_tracked.pkl"
with open(datafile, "rb") as f:
df = pickle.load(f)
data = TrackingData()
data.setData(as_dict(df))
data = TrackingData(as_dict(df))
test_indices = [32, 88, 99, 2593]
data.deleteDetections(test_indices)
embed()
data.deleteDetections(test_indices)
data.setSelection(test_indices)
all_cogs = data.centerOfGravity()
orientations = data.orientation()
lengths = data.animalLength()
frames = data["frame"]
tracks = data["track"]
bendedness = data.bendedness()
indices = data._indices
# positions = data.coordinates()[[160388, 160389]]
embed()
tracks = data["track"]
cogs = all_cogs[tracks==1]
all_dists = neighborDistances(cogs, 2, False)

View File

@@ -1,8 +1,8 @@
import logging
import numpy as np
from PySide6.QtWidgets import QWidget, QVBoxLayout, QTabWidget, QPushButton, QGraphicsView
from PySide6.QtWidgets import QSpinBox, QProgressBar, QGridLayout, QLabel, QCheckBox, QProgressDialog
from PySide6.QtWidgets import QWidget, QVBoxLayout, QTabWidget, QPushButton, QGraphicsView, QTextEdit
from PySide6.QtWidgets import QSpinBox, QProgressBar, QGridLayout, QLabel, QCheckBox, QDoubleSpinBox
from PySide6.QtCore import Qt, Signal, Slot, QRunnable, QObject, QThreadPool
from PySide6.QtGui import QBrush, QColor
@@ -12,23 +12,37 @@ from fixtracks.utils.trackingdata import TrackingData
from IPython import embed
class Detection():
def __init__(self, id, frame, track, position, orientation, length, userlabeled, confidence):
self.id = id
self.frame = frame
self.track = track
self.position = position
self.confidence = confidence
self.angle = orientation
self.length = length
self.userlabeled = userlabeled
class WorkerSignals(QObject):
error = Signal(str)
message = Signal(str)
running = Signal(bool)
progress = Signal(int, int, int)
currentframe = Signal(int)
stopped = Signal(int)
class ConsitencyDataLoader(QRunnable):
class ConsistencyDataLoader(QRunnable):
def __init__(self, data):
super().__init__()
self.signals = WorkerSignals()
self.data = data
self.bendedness = self.positions = None
self.bendedness = None
self.positions = None
self.lengths = None
self.orientations = None
self.userlabeled = None
self.scores = None
self.confidence = None
self.frames = None
self.tracks = None
@@ -40,17 +54,18 @@ class ConsitencyDataLoader(QRunnable):
self.positions = self.data.centerOfGravity()
self.orientations = self.data.orientation()
self.lengths = self.data.animalLength()
self.bendedness = self.data.bendedness()
# self.bendedness = self.data.bendedness()
self.userlabeled = self.data["userlabeled"]
self.scores = self.data["confidence"] # ignore for now, let's see how far this carries.
self.confidence = self.data["confidence"] # ignore for now, let's see how far this carries.
self.frames = self.data["frame"]
self.tracks = self.data["track"]
self.signals.stopped.emit(0)
class ConsistencyWorker(QRunnable):
def __init__(self, positions, orientations, lengths, bendedness, frames, tracks,
userlabeled, startframe=0, stoponerror=False) -> None:
userlabeled, confidence, startframe=0, stoponerror=False, min_confidence=0.0) -> None:
super().__init__()
self.signals = WorkerSignals()
self.positions = positions
@@ -58,6 +73,8 @@ class ConsistencyWorker(QRunnable):
self.lengths = lengths
self.bendedness = bendedness
self.userlabeled = userlabeled
self.confidence = confidence
self._min_confidence = min_confidence
self.frames = frames
self.tracks = tracks
self._startframe = startframe
@@ -70,124 +87,195 @@ class ConsistencyWorker(QRunnable):
@Slot()
def run(self):
def needs_checking(original, new):
res = False
for n, o in zip(new, original):
res = (o == 1 or o == 2) and n != o
if res:
print("inverted assignment, needs cross-checking?")
if not res:
res = len(new) > 1 and (np.all(new == 1) or np.all(new == 2))
if res:
print("all detections would be assigned to one track!")
return res
def get_detections(frame, indices):
detections = []
for i in indices:
if np.any(self.positions[i] < 0.1):
logging.debug("Encountered probably invalid position %s", str(self.positions[i]))
continue
if self._min_confidence > 0.0 and self.confidence[i] < self._min_confidence:
self.tracks[i] = -1
continue
d = Detection(i, frame, self.tracks[i], self.positions[i],
self.orientations[i], self.lengths[i],
self.userlabeled[i], self.confidence[i])
detections.append(d)
return detections
def assign_by_distance(f, p):
t1_step = f - last_frame[0]
t2_step = f - last_frame[1]
def assign_by_distance(d):
t1_step = d.frame - last_detections[1].frame
t2_step = d.frame - last_detections[2].frame
if t1_step == 0 or t2_step == 0:
print(f"framecount is zero! current frame {f}, last frame {last_frame[0]} and {last_frame[1]}")
distance_to_trackone = np.linalg.norm(p - last_pos[0])/t1_step
distance_to_tracktwo = np.linalg.norm(p - last_pos[1])/t2_step
print(f"framecount is zero! current frame {f}, last frame {last_detections[1].frame} and {last_detections[2].frame}")
distance_to_trackone = np.linalg.norm(d.position - last_detections[1].position) /t1_step
distance_to_tracktwo = np.linalg.norm(d.position - last_detections[2].position) /t2_step
most_likely_track = np.argmin([distance_to_trackone, distance_to_tracktwo]) + 1
distances = np.zeros(2)
distances[0] = distance_to_trackone
distances[1] = distance_to_tracktwo
return most_likely_track, distances
def assign_by_orientation(f, o):
t1_step = f - last_frame[0]
t2_step = f - last_frame[1]
orientationchange = (last_angle - o)
orientationchange[orientationchange > 180] = 360 - orientationchange[orientationchange > 180]
orientationchange /= np.array([t1_step, t2_step])
# orientationchange = np.abs(np.unwrap((last_angle - o)/np.array([t1_step, t2_step])))
most_likely_track = np.argmin(np.abs(orientationchange)) + 1
return most_likely_track, orientationchange
def assign_by_orientation(d):
t1_step = d.frame - last_detections[1].frame
t2_step = d.frame - last_detections[2].frame
orientationchanges = np.zeros(2)
for i in [1, 2]:
orientationchanges[i-1] = (last_detections[i].angle - d.angle)
def assign_by_length(o):
length_difference = np.abs((last_length - o))
most_likely_track = np.argmin(length_difference) + 1
return most_likely_track, length_difference
orientationchanges[orientationchanges > 180] = 360 - orientationchanges[orientationchanges > 180]
orientationchanges /= np.array([t1_step, t2_step])
most_likely_track = np.argmin(np.abs(orientationchanges)) + 1
return most_likely_track, orientationchanges
def do_assignment(f, indices, assignments):
for i, idx in enumerate(indices):
self.tracks[idx] = assignments[i]
last_pos[assignments[i]-1] = pp[i]
last_frame[assignments[i]-1] = f
last_angle[assignments[i]-1] = self.orientations[idx]
last_length[assignments[i]-1] += ((self.lengths[idx] - last_length[assignments[i]-1])/processed)
def assign_by_length(d):
length_differences = np.zeros(2)
length_differences[0] = np.abs((last_detections[1].length - d.length))
length_differences[1] = np.abs((last_detections[2].length - d.length))
most_likely_track = np.argmin(length_differences) + 1
return most_likely_track, length_differences
last_pos = [self.positions[(self.tracks == 1) & (self.frames <= self._startframe)][-1],
self.positions[(self.tracks == 2) & (self.frames <= self._startframe)][-1]]
last_frame = [self.frames[(self.tracks == 1) & (self.frames <= self._startframe)][-1],
self.frames[(self.tracks == 2) & (self.frames <= self._startframe)][-1]]
last_angle = [self.orientations[(self.tracks == 1) & (self.frames <= self._startframe)][-1],
self.orientations[(self.tracks == 2) & (self.frames <= self._startframe)][-1]]
last_length = [self.lengths[(self.tracks == 1) & (self.frames <= self._startframe)][-1],
self.lengths[(self.tracks == 2) & (self.frames <= self._startframe)][-1]]
def check_multiple_detections(detections):
if self._min_confidence > 0.0:
for i, d in enumerate(detections):
if d.confidence < self._min_confidence:
del detections[i]
distances = np.zeros((len(detections), len(detections)))
for i, d1 in enumerate(detections):
for j, d2 in enumerate(detections):
distances[i, j] = np.abs(np.linalg.norm(d2.position - d1.position))
lowest_dist = np.argmin(np.sum(distances, axis=1))
del detections[lowest_dist]
return detections
def find_last_userlabeled(startframe):
t1index = np.where((self.frames < startframe) & (self.userlabeled) & (self.tracks == 1))[0][-1]
t2index = np.where((self.frames < startframe) & (self.userlabeled) & (self.tracks == 2))[0][-1]
d1 = Detection(t1index, self.frames[t1index], self.tracks[t1index], self.positions[t1index],
self.orientations[t1index], self.lengths[t1index], self.userlabeled[t1index],
self.confidence[t1index])
d2 = Detection(t1index, self.frames[t2index], self.tracks[t2index], self.positions[t2index],
self.orientations[t2index], self.lengths[t2index], self.userlabeled[t2index],
self.confidence[t1index])
last_detections[1] = d1
last_detections[2] = d2
unique_frames = np.unique(self.frames)
steps = int((len(unique_frames) - self._startframe) // 100)
errors = 0
processed = 1
progress = 0
self._stoprequest = False
maxframes = np.max(self.frames)
startframe = np.max(last_frame)
steps = int((maxframes - startframe) // 200)
last_detections = {1: None, 2: None, -1: None}
find_last_userlabeled(self._startframe)
for f in np.unique(self.frames[self.frames > startframe]):
processed += 1
self.signals.currentframe.emit(f)
for f in unique_frames[unique_frames >= self._startframe]:
if self._stoprequest:
break
error = False
message = ""
self.signals.currentframe.emit(f)
indices = np.where(self.frames == f)[0]
pp = self.positions[indices]
originaltracks = self.tracks[indices]
dist_assignments = np.zeros_like(originaltracks)
angle_assignments = np.zeros_like(originaltracks)
length_assignments = np.zeros_like(originaltracks)
userlabeled = np.zeros_like(originaltracks)
distances = np.zeros((len(originaltracks), 2))
detections = get_detections(f, indices)
done = [False, False]
if len(detections) == 0:
continue
if len(detections) > 2:
message = f"Frame {f}: More than 2 detections ({len(detections)}) in the same frame!"
logging.info("ConsistencyTracker: %s", message)
self.signals.message.emit(message)
while len(detections) > 2:
detections = check_multiple_detections(detections)
if len(detections) > 1 and np.any([detections[0].userlabeled, detections[1].userlabeled]):
# more than one detection
if detections[0].userlabeled and detections[1].userlabeled:
if detections[0].track == detections[1].track:
error = True
message = f"Frame {f}: Classification error both detections in the same frame are assigned to the same track!"
logging.info("ConsistencyTracker: %s", message)
self.signals.message.emit(message)
elif detections[0].userlabeled and not detections[1].userlabeled:
detections[1].track = 1 if detections[0].track == 2 else 2
elif not detections[0].userlabeled and detections[1].userlabeled:
detections[0].track = 1 if detections[1].track == 2 else 2
if not error:
last_detections[detections[0].track] = detections[0]
last_detections[detections[1].track] = detections[1]
self.tracks[detections[0].id] = detections[0].track
self.tracks[detections[1].id] = detections[1].track
done[0] = True
done[1] = True
elif len(detections) == 1 and detections[0].userlabeled: # ony one detection and labeled
last_detections[detections[0].track] = detections[0]
done[0] = True
if np.sum(done) == len(detections):
continue
if error and self._stoponerror:
self.signals.message.emit(f"Tracking stopped at frame {f}.")
break
elif error:
continue
dist_assignments = np.zeros(2, dtype=int)
orientation_assignments = np.zeros_like(dist_assignments)
length_assignments = np.zeros_like(dist_assignments)
distances = np.zeros((2, 2))
orientations = np.zeros_like(distances)
lengths = np.zeros_like(distances)
assignments = np.zeros(2)
for i, d in enumerate(detections):
dist_assignments[i], distances[i, :] = assign_by_distance(d)
orientation_assignments[i], orientations[i,:] = assign_by_orientation(d)
length_assignments[i], lengths[i, :] = assign_by_length(d)
assignments = dist_assignments # (dist_assignments * 10 + orientation_assignments + length_assignments) / 3
for i, (idx, p) in enumerate(zip(indices, pp)):
if self.userlabeled[idx]:
print("user")
userlabeled[i] = True
last_pos[originaltracks[i]-1] = pp[i]
last_frame[originaltracks[i]-1] = f
last_angle[originaltracks[i]-1] = self.orientations[idx]
last_length[originaltracks[i]-1] += ((self.lengths[idx] - last_length[originaltracks[i]-1]) / processed)
continue
dist_assignments[i], distances[i, :] = assign_by_distance(f, p)
angle_assignments[i], orientations[i,:] = assign_by_orientation(f, self.orientations[idx])
length_assignments[i], lengths[i, :] = assign_by_length(self.lengths[idx])
if np.any(userlabeled):
continue
# check (re) assignment, update, and proceed
if not needs_checking(originaltracks, dist_assignments):
do_assignment(f, indices, dist_assignments)
else:
if not (np.all(length_assignments == 1) or np.all(length_assignments == 2)): # if I find a solution by body length
logging.debug("frame %i: Decision based on body length", f)
do_assignment(f, indices, length_assignments)
elif not (np.all(angle_assignments == 1) or np.all(angle_assignments == 2)): # else there is a solution based on orientation
logging.info("frame %i: Decision based on orientation", f)
do_assignment(f, indices, angle_assignments)
else:
logging.info("frame %i: Cannot decide who is who")
for idx in indices:
self.tracks[idx] = -1
error = False
temp = {}
message = ""
if len(detections) > 1:
if assignments[0] == assignments[1]:
d.track = -1
error = True
errors += 1
if self._stoponerror:
break
message = f"Frame {f}: Classification error: both detections in the same frame are assigned to the same track!"
break
elif assignments[0] != assignments[1]:
detections[0].track = assignments[0]
detections[1].track = assignments[1]
temp[detections[0].track] = detections[0]
temp[detections[1].track] = detections[1]
self.tracks[detections[0].id] = detections[0].track
self.tracks[detections[1].id] = detections[1].track
else:
if np.abs(np.diff(distances[0,:])) > 50: # maybe include the time difference into this?
detections[0].track = assignments[0]
temp[detections[0].track] = detections[0]
self.tracks[detections[0].id] = detections[0].track
else:
self.tracks[detections[0].id] = -1
message = f"Frame: {f}: Decision based on distance not safe. Track set to unassigned."
error = True
errors += 1
if not error:
for k in temp:
last_detections[temp[k].track] = temp[k]
else:
logging.info("frame %i: Cannot decide who is who! %s", f, message)
for idx in indices:
self.tracks[idx] = -1
errors += 1
if error and self._stoponerror:
self.signals.message.emit(message)
break
processed += 1
if steps > 0 and f % steps == 0:
progress += 1
self.signals.progress.emit(progress, processed, errors)
self.signals.message.emit(f"Tracking stopped at frame {f}.")
self.signals.stopped.emit(f)
@@ -266,6 +354,7 @@ class SizeClassifier(QWidget):
tracks[(self._sizes >= t2lower) & (self._sizes < t2upper)] = 2
return tracks
class NeighborhoodValidator(QWidget):
apply = Signal()
name = "Neighborhood Validator"
@@ -390,6 +479,7 @@ class ConsistencyClassifier(QWidget):
self._all_lengths = None
self._all_bendedness = None
self._all_scores = None
self._confidence = None
self._userlabeled = None
self._maxframes = 0
self._frames = None
@@ -435,25 +525,38 @@ class ConsistencyClassifier(QWidget):
self._stoponerror.setChecked(True)
self.threadpool = QThreadPool()
self._ignore_confidence = QCheckBox("Ignore detections widh confidence less than")
self._confidence_spinner = QDoubleSpinBox()
self._confidence_spinner.setRange(0.0, 1.0)
self._confidence_spinner.setSingleStep(0.01)
self._confidence_spinner.setDecimals(2)
self._confidence_spinner.setValue(0.5)
self._messagebox = QTextEdit()
self._messagebox.setFocusPolicy(Qt.NoFocus)
self._messagebox.setReadOnly(True)
lyt = QGridLayout()
lyt.addWidget(QLabel("Start frame:"), 0, 0 )
lyt.addWidget(self._startframe_spinner, 0, 1, 1, 2)
lyt.addWidget(QLabel("of"), 1, 1, 1, 1)
lyt.addWidget(self._maxframeslabel, 1, 2, 1, 1)
lyt.addWidget(self._stoponerror, 2, 0, 1, 3)
lyt.addWidget(QLabel("Current frame"), 3,0)
lyt.addWidget(self._framelabel, 3,1)
lyt.addWidget(QLabel("assigned"), 4, 0)
lyt.addWidget(self._assignedlabel, 4, 1)
lyt.addWidget(QLabel("errors/issues"), 5, 0)
lyt.addWidget(self._errorlabel, 5, 1)
lyt.addWidget(self._startframe_spinner, 0, 1, 1, 1)
lyt.addWidget(QLabel("of"), 0, 2, 1, 1)
lyt.addWidget(self._maxframeslabel, 0, 3, 1, 1)
lyt.addWidget(self._stoponerror, 1, 0, 1, 3)
lyt.addWidget(self._ignore_confidence, 3, 0, 1, 3)
lyt.addWidget(self._confidence_spinner, 3, 3, 1, 1)
lyt.addWidget(QLabel("Current frame"), 4, 0)
lyt.addWidget(self._framelabel, 4, 1)
lyt.addWidget(QLabel("(Re-)Assigned"), 5, 0)
lyt.addWidget(self._assignedlabel, 5, 1)
lyt.addWidget(QLabel("Errors/issues"), 5, 2)
lyt.addWidget(self._errorlabel, 5, 3, 1, 1)
lyt.addWidget(self._messagebox, 6, 0, 2, 4)
lyt.addWidget(self._startbtn, 6, 0)
lyt.addWidget(self._stopbtn, 6, 1)
lyt.addWidget(self._proceedbtn, 6, 2)
lyt.addWidget(self._apply_btn, 7, 0, 1, 2)
lyt.addWidget(self._refreshbtn, 7, 2, 1, 1)
lyt.addWidget(self._progressbar, 8, 0, 1, 3)
lyt.addWidget(self._startbtn, 8, 0, 1, 2)
lyt.addWidget(self._stopbtn, 8, 2)
# lyt.addWidget(self._proceedbtn, 8, 2)
lyt.addWidget(self._refreshbtn, 8, 3, 1, 1)
lyt.addWidget(self._apply_btn, 9, 0, 1, 4)
lyt.addWidget(self._progressbar, 10, 0, 1, 4)
self.setLayout(lyt)
def setData(self, data:TrackingData):
@@ -478,21 +581,38 @@ class ConsistencyClassifier(QWidget):
self._all_lengths = self._dataworker.lengths
self._all_bendedness = self._dataworker.bendedness
self._userlabeled = self._dataworker.userlabeled
self._all_scores = self._dataworker.scores
self._confidence = self._dataworker.confidence
self._frames = self._dataworker.frames
self._tracks = self._dataworker.tracks
self._dataworker = None
if np.sum(self._userlabeled) < 1:
msg = "ConsistencyTracker: I need at least 1 user-labeled frame to start with!"
logging.error(msg)
self._messagebox.append(msg)
self.setEnabled(False)
else:
t1_userlabeled = self._frames[self._userlabeled & (self._tracks == 1)]
t2_userlabeled = self._frames[self._userlabeled & (self._tracks == 2)]
if any([len(t1_userlabeled) == 0, len(t2_userlabeled)== 0]):
self._messagebox.append("Error preparing data! Make sure that the first user-labeled frames contain both tracks!")
self.setEnabled(False)
return
max_startframe = np.min([t1_userlabeled[-1], t2_userlabeled[-1]]) -1
first_guess = np.max([t1_userlabeled[0], t2_userlabeled[0]])
while first_guess not in t1_userlabeled or first_guess not in t2_userlabeled:
first_guess += 1
min_startframe = first_guess + 1
self._maxframes = np.max(self._frames)
# FIXME the following line causes an error when there are no detections in the range
min_frame = max([self._frames[self._tracks == 1][0], self._frames[self._tracks == 2][0]]) + 1
self._maxframeslabel.setText(str(self._maxframes))
self._startframe_spinner.setMinimum(min_frame)
self._startframe_spinner.setMaximum(self._frames[-1])
self._startframe_spinner.setValue(self._frames[0] + 1)
self._startframe_spinner.setMinimum(min_startframe)
self._startframe_spinner.setMaximum(max_startframe)
self._startframe_spinner.setValue(min_startframe)
self._startframe_spinner.setSingleStep(20)
self._startframe_spinner.setToolTip(f"Maximum possible start frame: {max_startframe}")
self._startbtn.setEnabled(True)
self._assignedlabel.setText("0")
self._errorlabel.setText("0")
self._dataworker = None
self.setEnabled(True)
self.setEnabled(True)
@Slot(float)
def on_progress(self, value):
@@ -502,32 +622,39 @@ class ConsistencyClassifier(QWidget):
def stop(self):
if self._worker is not None:
self._worker.stop()
self._startbtn.setEnabled(True)
self._proceedbtn.setEnabled(True)
self._stopbtn.setEnabled(False)
self._refreshbtn.setEnabled(True)
self._messagebox.append("Stopping tracking.")
def start(self):
confidence_level = self._confidence_spinner.value() if self._ignore_confidence.isChecked() else 0.0
self._startbtn.setEnabled(False)
self._refreshbtn.setEnabled(False)
self._stopbtn.setEnabled(True)
self._worker = ConsistencyWorker(self._all_pos, self._all_orientations, self._all_lengths,
self._all_bendedness, self._frames, self._tracks, self._userlabeled,
self._startframe_spinner.value(), self._stoponerror.isChecked())
self._confidence, self._startframe_spinner.value(), self._stoponerror.isChecked(),
min_confidence=confidence_level)
self._worker.signals.stopped.connect(self.worker_stopped)
self._worker.signals.progress.connect(self.worker_progress)
self._worker.signals.message.connect(self.worker_error)
self._worker.signals.currentframe.connect(self.worker_frame)
self._messagebox.append("Tracking in progress ...")
self.threadpool.start(self._worker)
def worker_frame(self, frame):
self._framelabel.setText(str(frame))
def worker_error(self, msg):
self._messagebox.append(msg)
def proceed(self):
self.start()
def refresh(self):
self._dataworker = ConsitencyDataLoader(self._data)
self.setEnabled(False)
self._dataworker = ConsistencyDataLoader(self._data)
self._dataworker.signals.stopped.connect(self.data_processed)
self._messagebox.clear()
self._messagebox.append("Refreshing...")
self.threadpool.start(self._dataworker)
def worker_progress(self, progress, processed, errors):
@@ -536,13 +663,15 @@ class ConsistencyClassifier(QWidget):
self._assignedlabel.setText(str(processed))
def worker_stopped(self, frame):
self._apply_btn.setEnabled(True)
self._startbtn.setEnabled(True)
self._proceedbtn.setEnabled(True)
self._stopbtn.setEnabled(False)
self._apply_btn.setEnabled(True)
self._refreshbtn.setEnabled(True)
self._startframe_spinner.setValue(frame-1)
self._proceedbtn.setEnabled(bool(frame < self._maxframes-1))
self._refreshbtn.setEnabled(True)
self._processed_frames = frame
self._messagebox.append("... done.")
def assignedTracks(self):
return self._tracks
@@ -607,14 +736,14 @@ def main():
import pickle
from fixtracks.info import PACKAGE_ROOT
datafile = PACKAGE_ROOT / "data/merged2.pkl"
datafile = PACKAGE_ROOT / "data/merged_small_beginning.pkl"
with open(datafile, "rb") as f:
df = pickle.load(f)
data = TrackingData()
data.setData(as_dict(df))
data = TrackingData(as_dict(df))
coords = data.coordinates()
cogs = data.centerOfGravity()
userlabeled = data["userlabeled"]
app = QApplication([])
window = QWidget()
window.setMinimumSize(200, 200)
@@ -624,7 +753,7 @@ def main():
# else:
w = ClassifierWidget()
w.setData(data)
w.size_classifier.setCoordinates(coords)
# w.size_classifier.setCoordinates(coords)
layout = QVBoxLayout()
layout.addWidget(w)

View File

@@ -21,16 +21,16 @@ class Window(QGraphicsRectItem):
self.setBrush(brush)
self.setZValue(1.0)
self.setAcceptHoverEvents(True) # Enable hover events if needed
self.setFlags(
QGraphicsItem.ItemIsMovable | # Enables item dragging
QGraphicsItem.ItemIsSelectable # Enables item selection
)
# self.setFlags(
# QGraphicsItem.ItemIsMovable | # Enables item dragging
# QGraphicsItem.ItemIsSelectable # Enables item selection
# )
self._y = y
def setWindowX(self, newx):
logging.debug("timeline.window: set position to %.3f", newx)
self.setX(newx)
self.signals.windowMoved.emit()
# self.signals.windowMoved.emit()
def setWindowWidth(self, newwidth):
logging.debug("timeline.window: update window width to %f", newwidth)
@@ -38,7 +38,7 @@ class Window(QGraphicsRectItem):
r = self.rect()
r.setWidth(newwidth)
self.setRect(r)
self.signals.windowMoved.emit()
# self.signals.windowMoved.emit()
def setWindow(self, newx:float, newwidth:float):
"""
@@ -58,13 +58,14 @@ class Window(QGraphicsRectItem):
r = self.rect()
self.setRect(newx, r.y(), self._width, r.height())
self.update()
self.signals.windowMoved.emit()
# self.signals.windowMoved.emit()
def mouseMoveEvent(self, event):
super().mouseMoveEvent(event)
def mousePressEvent(self, event):
self.setCursor(Qt.ClosedHandCursor)
# print(event.pos())
super().mousePressEvent(event)
def mouseReleaseEvent(self, event):
@@ -77,7 +78,7 @@ class Window(QGraphicsRectItem):
if r.y() != self._y:
self.setY(self._y)
super().mouseReleaseEvent(event)
self.signals.windowMoved.emit()
self.signals.manualMove.emit()
def hoverEnterEvent(self, event):
super().hoverEnterEvent(event)
@@ -116,11 +117,12 @@ class DetectionTimeline(QWidget):
font.setBold(True)
self._window = Window(0, 0, 100, 60, window_pen, transparent_brush)
self._window.signals.windowMoved.connect(self.on_windowMoved)
self._window.signals.manualMove.connect(self.on_windowMoved)
self._scene = QGraphicsScene(QRectF(0, 0, self._total_width, 85.))
self._scene.setBackgroundBrush(self._bg_brush)
self._scene.addItem(self._window)
self._scene.mousePressEvent = self.on_sceneMousePress
self._view = QGraphicsView()
# self._view.setRenderHints(QPainter.RenderHint.Antialiasing | QPainter.RenderHint.SmoothPixmapTransform)
@@ -151,12 +153,22 @@ class DetectionTimeline(QWidget):
self._position_label.setFont(f)
layout = QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(5, 2, 5, 2)
layout.addWidget(self._view)
layout.addWidget(self._position_label, Qt.AlignmentFlag.AlignRight)
self.setLayout(layout)
# self.setMaximumHeight(100)
# self.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed)
def on_sceneMousePress(self, event):
scene_pos = event.scenePos()
relpos = scene_pos.x() / self._total_width
relpos = 0 if relpos < 0.0 else relpos
relpos = 2000/self._total_width if scene_pos.x() > self._total_width else relpos
self.signals.moveRequest.emit(relpos)
logging.debug("Timeline: Scene clicked at position: %.2f, %.2f --> rel x-pos %.3f", scene_pos.x(), scene_pos.y(), relpos)
def clear(self):
for i in self._scene.items():
if isinstance(i, (QGraphicsLineItem, QGraphicsEllipseItem)):
@@ -166,6 +178,7 @@ class DetectionTimeline(QWidget):
logging.debug("Timeline: setData!")
self._data = data
self.update()
self.resizeEvent(None)
def update(self):
self.clear()
@@ -309,10 +322,9 @@ def main():
datafile = PACKAGE_ROOT / "data/merged_small.pkl"
with open(datafile, "rb") as f:
df = pickle.load(f)
data = TrackingData()
data.setData(as_dict(df))
data.setUserSelection(np.arange(0,100, 1))
data.setAssignmentStatus(True)
data = TrackingData(as_dict(df))
data.setSelection(np.arange(0,100, 1))
data.setUserLabeledStatus(True)
start_x = 0.1
app = QApplication([])
window = QWidget()
@@ -329,12 +341,14 @@ def main():
backBtn.clicked.connect(lambda: back(0.2))
btnLyt = QHBoxLayout()
btnLyt.setSpacing(1)
btnLyt.addWidget(backBtn)
btnLyt.addWidget(zeroBtn)
btnLyt.addWidget(fwdBtn)
view.setWindowPos(start_x)
layout = QVBoxLayout()
layout.setSpacing(1)
layout.addWidget(view)
layout.addLayout(btnLyt)
window.setLayout(layout)

View File

@@ -10,6 +10,7 @@ from fixtracks.utils.signals import DetectionSignals, DetectionViewSignals, Dete
from fixtracks.utils.enums import DetectionData, Tracks
from fixtracks.utils.trackingdata import TrackingData
class Detection(QGraphicsEllipseItem):
signals = DetectionSignals()
@@ -128,20 +129,23 @@ class DetectionView(QWidget):
del it
def updateDetections(self, keypoint=-1):
logging.info("DetectionView.updateDetections!")
self.clearDetections()
if self._data is None:
return
frames = self._data.selectedData("frame")
tracks = self._data.selectedData("track")
ids = self._data.selectedData("index")
coordinates = self._data.coordinates(selection=True)
centercoordinates = self._data.centerOfGravity(selection=True)
userlabeled = self._data.selectedData("userlabeled")
indices = self._data.selectedData("index")
image_rect = self._pixmapitem.boundingRect() if self._pixmapitem is not None else QRectF(0,0,0,0)
scores = self._data.selectedData("confidence")
for i, idx in enumerate(indices):
t = tracks[i]
image_rect = self._pixmapitem.boundingRect() if self._pixmapitem is not None else QRectF(0,0,0,0)
num_detections = len(frames)
for i, (id, f, t, l, s) in enumerate(zip(ids, frames, tracks, userlabeled, scores)):
c = Tracks.fromValue(t).toColor()
c.setAlpha(int(i * 255 / num_detections))
if keypoint >= 0:
x = coordinates[i, keypoint, 0]
y = coordinates[i, keypoint, 1]
@@ -150,10 +154,12 @@ class DetectionView(QWidget):
y = centercoordinates[i, 1]
item = Detection(image_rect.left() + x, image_rect.top() + y, 20, 20, brush=QBrush(c))
item.setData(DetectionData.TRACK_ID.value, tracks[i])
item.setData(DetectionData.ID.value, idx)
item.setData(DetectionData.TRACK_ID.value, t)
item.setData(DetectionData.ID.value, id)
item.setData(DetectionData.COORDINATES.value, coordinates[i, :, :])
item.setData(DetectionData.FRAME.value, frames[i])
item.setData(DetectionData.FRAME.value, f)
item.setData(DetectionData.USERLABELED.value, l)
item.setData(DetectionData.SCORE.value, s)
item = self._scene.addItem(item)
def fit_image_to_view(self):
@@ -212,7 +218,7 @@ def main():
view.setImage(img)
view.addDetections(bg_coords, bg_tracks, bg_ids, background_brush)
view.addDetections(focus_coords, focus_tracks, focus_ids, focus_brush)
view.addDetections(scnd_coords, scnd_tracks, scnd_ids, second_brush)
view.addDetections(scnd_coords, scnd_tracks, scnd_ids, second_brush)
window.setLayout(layout)
window.show()
app.exec()

View File

@@ -4,7 +4,7 @@ import numpy as np
from PySide6.QtCore import Qt, Signal, QSize
from PySide6.QtGui import QFont
from PySide6.QtWidgets import QWidget, QLabel, QPushButton, QSizePolicy
from PySide6.QtWidgets import QGridLayout, QVBoxLayout
from PySide6.QtWidgets import QGridLayout, QVBoxLayout, QApplication
from fixtracks.utils.styles import pushBtnStyle
@@ -15,6 +15,7 @@ class SelectionControls(QWidget):
assignTwo = Signal()
assignOther = Signal()
accept = Signal()
accept_until = Signal()
unaccept = Signal()
delete = Signal()
revertall = Signal()
@@ -51,7 +52,6 @@ class SelectionControls(QWidget):
quarterstepBackBtn.setStyleSheet(pushBtnStyle("darkgray"))
quarterstepBackBtn.clicked.connect(lambda: self.on_Back(quarterstep))
fwdBtn = QPushButton(">>|")
fwdBtn.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
fwdBtn.setShortcut(Qt.Key.Key_Right)
@@ -102,7 +102,7 @@ class SelectionControls(QWidget):
acceptBtn.setFont(font)
acceptBtn.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
acceptBtn.setStyleSheet(pushBtnStyle("darkgray"))
acceptBtn.setToolTip(f"Accept assignments of current selection as TRUE")
acceptBtn.setToolTip(f"Accept assignments of current selection as TRUE, Hold shift while clicking to accept all until here.")
acceptBtn.clicked.connect(self.on_Accept)
unacceptBtn = QPushButton("un-accept")
@@ -117,8 +117,9 @@ class SelectionControls(QWidget):
deleteBtn.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
deleteBtn.setStyleSheet(pushBtnStyle("red"))
deleteBtn.setToolTip(f"DANGERZONE! Delete current selection of detections!")
deleteBtn.setEnabled(False)
deleteBtn.setShortcut("Ctrl+D")
deleteBtn.clicked.connect(self.on_Delete)
deleteBtn.setEnabled(False)
revertBtn = QPushButton("revert assignments")
revertBtn.setFont(font)
@@ -171,7 +172,7 @@ class SelectionControls(QWidget):
grid.setColumnStretch(0, 1)
grid.setColumnStretch(7, 1)
self.setLayout(grid)
self.setMaximumSize(QSize(500, 500))
# self.setMaximumSize(QSize(500, 500))
def setWindow(self, start:int=0, end:int=0):
self.startframe.setText(f"{start:.0f}")
@@ -210,7 +211,12 @@ class SelectionControls(QWidget):
def on_Accept(self):
logging.debug("SelectionControl: accept AssignmentBtn")
self.accept.emit()
modifiers = QApplication.keyboardModifiers()
if modifiers == Qt.KeyboardModifier.ShiftModifier:
logging.debug("Shift key was pressed during accept")
self.accept_until.emit()
else:
self.accept.emit()
def on_Unaccept(self):
logging.debug("SelectionControl: revoke user assignmentBtn")

View File

@@ -94,7 +94,8 @@ class SkeletonWidget(QWidget):
i = s.data(DetectionData.ID.value)
t = s.data(DetectionData.TRACK_ID.value)
f = s.data(DetectionData.FRAME.value)
self._info_label.setText(f"Id {i}, track {t} on frame {f}, length {l:.1f} px")
sc = s.data(DetectionData.SCORE.value)
self._info_label.setText(f"Id {i}, track {t} on frame {f}, length {l:.1f} px, confidence {sc:.2f}")
else:
self._info_label.setText("")
@@ -129,7 +130,7 @@ class SkeletonWidget(QWidget):
self._scene.setSceneRect(self._minx, self._miny, self._maxx - self._minx, self._maxy - self._miny)
self._view.fitInView(self._scene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
def addSkeleton(self, coords, detection_id, frame, track, brush, update=True):
def addSkeleton(self, coords, detection_id, frame, track, score, brush, update=True):
def check_extent(x, y, w, h):
if x == 0 and y == 0:
return
@@ -157,12 +158,14 @@ class SkeletonWidget(QWidget):
item.setData(DetectionData.ID.value, detection_id)
item.setData(DetectionData.TRACK_ID.value, track)
item.setData(DetectionData.FRAME.value, frame)
item.setData(DetectionData.SCORE.value, score)
self._skeletons.append(item)
if update:
self.update()
def addSkeletons(self, coordinates:np.ndarray, detection_ids:np.ndarray,
frames:np.ndarray, tracks:np.ndarray, brush:QBrush):
frames:np.ndarray, tracks:np.ndarray, scores:np.ndarray,
brush:QBrush):
num_detections = 0 if coordinates is None else coordinates.shape[0]
logging.debug("SkeletonWidget: add %i Skeletons", num_detections)
if num_detections < 1:
@@ -172,9 +175,10 @@ class SkeletonWidget(QWidget):
detection_ids = detection_ids[sorting]
frames = frames[sorting]
tracks = tracks[sorting]
scores = scores[sorting]
for i in range(num_detections):
self.addSkeleton(coordinates[i,:,:], detection_ids[i], frames[i],
tracks[i], brush=brush, update=False)
tracks[i], scores[i], brush=brush, update=False)
self.update()
# def addSkeleton(self, coords, detection_id, brush):

View File

@@ -1,11 +1,11 @@
import logging
import numpy as np
import pandas as pd
from PySide6.QtCore import Qt, QThreadPool, Signal
from PySide6.QtGui import QImage, QBrush, QColor
from PySide6.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QSizePolicy, QComboBox
from PySide6.QtWidgets import QSpinBox, QSpacerItem, QProgressBar, QSplitter, QGridLayout, QFileDialog, QGridLayout
from PySide6.QtWidgets import QSpinBox, QSpacerItem, QProgressBar, QSplitter, QFileDialog, QMessageBox
from fixtracks.utils.reader import PickleLoader
from fixtracks.utils.writer import PickleWriter
@@ -31,17 +31,12 @@ class FixTracks(QWidget):
self._currentWindowPos = 0 # in frames
self._currentWindowWidth = 0 # in frames
self._maxframes = 0
self._data = TrackingData()
self._manualmove = False
self._data = None
self._detectionView = DetectionView()
self._detectionView.signals.itemsSelected.connect(self.on_detectionsSelected)
self._skeleton = SkeletonWidget()
# self._skeleton.setMaximumSize(QSize(400, 400))
top_splitter = QSplitter(Qt.Orientation.Horizontal)
top_splitter.addWidget(self._detectionView)
top_splitter.addWidget(self._skeleton)
top_splitter.setStretchFactor(0, 2)
top_splitter.setStretchFactor(1, 1)
self._progress_bar = QProgressBar(self)
self._progress_bar.setMaximumHeight(20)
@@ -50,6 +45,7 @@ class FixTracks(QWidget):
self._timeline = DetectionTimeline()
self._timeline.signals.windowMoved.connect(self.on_windowChanged)
self._timeline.signals.moveRequest.connect(self.on_moveRequest)
self._windowspinner = QSpinBox()
self._windowspinner.setRange(10, 10000)
@@ -60,15 +56,31 @@ class FixTracks(QWidget):
self._keypointcombo = QComboBox()
self._keypointcombo.currentIndexChanged.connect(self.on_keypointSelected)
combo_layout = QGridLayout()
combo_layout.addWidget(QLabel("Window:"), 0, 0)
combo_layout.addWidget(self._windowspinner, 0, 1)
combo_layout.addWidget(QLabel("Keypoint:"), 1, 0)
combo_layout.addWidget(self._keypointcombo, 1, 1)
self._goto_spinner = QSpinBox()
self._goto_spinner.setSingleStep(1)
timelinebox = QHBoxLayout()
timelinebox.addWidget(self._timeline)
self._gotobtn = QPushButton("go!")
self._gotobtn.setToolTip("Jump to a given frame")
self._gotobtn.clicked.connect(self.on_goto)
combo_layout = QHBoxLayout()
combo_layout.addWidget(QLabel("Window width:"))
combo_layout.addWidget(self._windowspinner)
combo_layout.addWidget(QLabel("frames"))
combo_layout.addItem(QSpacerItem(10, 10, QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed))
combo_layout.addWidget(QLabel("Keypoint:"))
combo_layout.addWidget(self._keypointcombo)
combo_layout.addItem(QSpacerItem(10, 10, QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed))
combo_layout.addWidget(QLabel("Jump to frame:"))
combo_layout.addWidget(self._goto_spinner)
combo_layout.addWidget(self._gotobtn)
combo_layout.addItem(QSpacerItem(100, 10, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed))
combo_layout.setSpacing(1)
timelinebox = QVBoxLayout()
timelinebox.setSpacing(2)
timelinebox.addLayout(combo_layout)
timelinebox.addWidget(self._timeline)
self._controls_widget = SelectionControls()
self._controls_widget.assignOne.connect(self.on_assignOne)
@@ -77,6 +89,7 @@ class FixTracks(QWidget):
self._controls_widget.fwd.connect(self.on_forward)
self._controls_widget.back.connect(self.on_backward)
self._controls_widget.accept.connect(self.on_setUserFlag)
self._controls_widget.accept_until.connect(self.on_setUserFlagsUntil)
self._controls_widget.unaccept.connect(self.on_unsetUserFlag)
self._controls_widget.delete.connect(self.on_deleteDetection)
self._controls_widget.revertall.connect(self.on_revertUserFlags)
@@ -102,6 +115,7 @@ class FixTracks(QWidget):
data_selection_box.addWidget(QLabel("Select data file"))
data_selection_box.addWidget(self._data_combo)
data_selection_box.addItem(QSpacerItem(100, 10, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed))
data_selection_box.setSpacing(0)
btnBox = QHBoxLayout()
btnBox.setAlignment(Qt.AlignmentFlag.AlignLeft)
@@ -117,9 +131,14 @@ class FixTracks(QWidget):
cntrlBox = QHBoxLayout()
cntrlBox.addWidget(self._classifier)
cntrlBox.addWidget(self._controls_widget, alignment=Qt.AlignmentFlag.AlignCenter)
cntrlBox.addItem(QSpacerItem(300, 100, QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Expanding))
cntrlBox.addWidget(self._skeleton)
cntrlBox.addItem(QSpacerItem(50, 100, QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Expanding))
cntrlBox.setSpacing(0)
cntrlBox.setContentsMargins(0,0,0,0)
vbox = QVBoxLayout()
vbox.setSpacing(0)
vbox.setContentsMargins(0,0,0,0)
vbox.addLayout(timelinebox)
vbox.addLayout(cntrlBox)
vbox.addLayout(btnBox)
@@ -127,13 +146,16 @@ class FixTracks(QWidget):
container.setLayout(vbox)
splitter = QSplitter(Qt.Orientation.Vertical)
splitter.addWidget(top_splitter)
splitter.addWidget(self._detectionView)
splitter.addWidget(container)
splitter.setStretchFactor(0, 3)
splitter.setStretchFactor(1, 1)
layout = QVBoxLayout()
layout.addLayout(data_selection_box)
layout.addWidget(splitter)
layout.setSpacing(0)
layout.setContentsMargins(5,2,2,5)
self.setLayout(layout)
def on_autoClassify(self, tracks):
@@ -159,14 +181,19 @@ class FixTracks(QWidget):
self._detectionView.setImage(img)
def update(self):
kp = self._keypointcombo.currentText().lower()
if len(kp) == 0:
return
kpi = -1 if "center" in kp else int(kp)
start_frame = self._currentWindowPos
stop_frame = start_frame + self._currentWindowWidth
self._timeline.setWindow(start_frame / self._maxframes,
self._currentWindowWidth/self._maxframes)
logging.debug("Tracks:update: Updating View for detection range %i, %i frames", start_frame, stop_frame)
self._data.setSelectionRange("frame", start_frame, stop_frame)
self._controls_widget.setWindow(start_frame, stop_frame)
kp = self._keypointcombo.currentText().lower()
kpi = -1 if "center" in kp else int(kp)
self._detectionView.updateDetections(kpi)
@property
@@ -204,15 +231,16 @@ class FixTracks(QWidget):
self._progress_bar.setRange(0, 100)
self._progress_bar.setValue(0)
if state and self._reader is not None:
self._data.setData(self._reader.asdict)
self._data = TrackingData(self._reader.asdict)
self._saveBtn.setEnabled(True)
self._currentWindowPos = 0
self._currentWindowWidth = self._windowspinner.value()
self._maxframes = self._data.max("frame")
self._maxframes = np.max(self._data["frame"])
self._goto_spinner.setMaximum(self._maxframes)
self.populateKeypointCombo(self._data.numKeypoints())
self._timeline.setData(self._data)
self._timeline.setWindow(self._currentWindowPos / self._maxframes,
self._currentWindowWidth / self._maxframes)
# self._timeline.setWindow(self._currentWindowPos / self._maxframes,
# self._currentWindowWidth / self._maxframes)
self._detectionView.setData(self._data)
self._classifier.setData(self._data)
self.update()
@@ -247,49 +275,78 @@ class FixTracks(QWidget):
def on_assignOne(self):
logging.debug("Assigning user selection to track One")
self._data.assignUserSelection(self.trackone_id)
self._data.setTrack(self.trackone_id)
self._timeline.update()
self.update()
def on_assignTwo(self):
logging.debug("Assigning user selection to track Two")
self._data.assignUserSelection(self.tracktwo_id)
self._data.setTrack(self.tracktwo_id)
self._timeline.update()
self.update()
def on_assignOther(self):
logging.debug("Assigning user selection to track Other")
self._data.assignUserSelection(self.trackother_id, False)
self._data.setTrack(self.trackother_id, False)
self._timeline.update()
self.update()
def on_setUserFlag(self):
self._data.setAssignmentStatus(True)
self._data.setUserLabeledStatus(True)
self._timeline.update()
self.update()
def on_setUserFlagsUntil(self):
self._data.setSelectionRange("frame", 0, self._currentWindowPos + self._currentWindowWidth)
self._data.setUserLabeledStatus(True)
self._timeline.update()
self.update()
def on_unsetUserFlag(self):
logging.debug("Tracks:unsetUserFlag")
self._data.setAssignmentStatus(False)
self._data.setUserLabeledStatus(False)
self._timeline.update()
self.update()
def on_revertUserFlags(self):
logging.debug("Tracks:revert ALL UserFlags and track assignments")
self._data.revertAssignmentStatus()
self._data.revertTrackAssignments()
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Icon.Warning)
msg_box.setText(f"Are you sure you want to revert ALL track assignments?")
msg_box.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
msg_box.setDefaultButton(QMessageBox.StandardButton.No)
ret = msg_box.exec()
if ret == QMessageBox.StandardButton.Yes:
self._data.revertUserLabeledStatus()
self._data.revertTrackAssignments()
self._timeline.update()
self.update()
def on_deleteDetection(self):
logging.warning("Tracks:delete detections is currently not supported!")
# self._data.deleteDetections()
logging.info("Tracks:deleting detections!")
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Icon.Warning)
msg_box.setText(f"Are you sure you want to delete the selected ({len(self._data.selectionIndices)})detections?")
msg_box.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
msg_box.setDefaultButton(QMessageBox.StandardButton.No)
ret = msg_box.exec()
if ret == QMessageBox.StandardButton.Yes:
self._data.deleteDetections()
self._timeline.update()
self.update()
def on_windowChanged(self):
logging.debug("Tracks:Timeline reports window change ")
self._currentWindowPos = np.round(self._timeline.rangeStart * self._maxframes)
if not self._manualmove:
self._currentWindowPos = np.round(self._timeline.rangeStart * self._maxframes)
self.update()
self._manualmove = False
def on_moveRequest(self, pos):
new_pos = int(np.round(pos * self._maxframes))
self._currentWindowPos = new_pos
self.update()
def on_windowSizeChanged(self, value):
@@ -302,14 +359,29 @@ class FixTracks(QWidget):
"""
self._currentWindowWidth = value
logging.debug("Tracks:OnWindowSizeChanged %i franes", value)
self._timeline.setWindowWidth(self._currentWindowWidth / self._maxframes)
self._controls_widget.setSelectedTracks(None)
# if self._maxframes == 0:
# self._timeline.setWindowWidth(self._currentWindowWidth / 2000)
# else:
# self._timeline.setWindowWidth(self._currentWindowWidth / self._maxframes)
# self._controls_widget.setSelectedTracks(None)
self.update()
def on_goto(self):
target = self._goto_spinner.value()
if target > self._maxframes - self._currentWindowWidth:
target = self._maxframes - self._currentWindowWidth
logging.info("Jump to frame %i", target)
self._currentWindowPos = target
self._timeline.setWindow(self._currentWindowPos / self._maxframes,
self._currentWindowWidth / self._maxframes)
self.update()
def on_detectionsSelected(self, detections):
logging.debug("Tracks: Detections selected")
logging.debug("Tracks: %i Detections selected", len(detections))
tracks = np.zeros(len(detections), dtype=int)
ids = np.zeros_like(tracks)
frames = np.zeros_like(tracks)
scores = np.zeros(tracks.shape, dtype=float)
coordinates = None
if len(detections) > 0:
c = detections[0].data(DetectionData.COORDINATES.value)
@@ -320,16 +392,20 @@ class FixTracks(QWidget):
ids[i] = d.data(DetectionData.ID.value)
frames[i] = d.data(DetectionData.FRAME.value)
coordinates[i, :, :] = d.data(DetectionData.COORDINATES.value)
self._data.setUserSelection(ids)
scores[i] = d.data(DetectionData.SCORE.value)
self._data.setSelection(ids)
self._controls_widget.setSelectedTracks(tracks)
self._skeleton.clear()
self._skeleton.addSkeletons(coordinates, ids, frames, tracks, QBrush(QColor(10, 255, 65, 255)))
self.update()
self._skeleton.addSkeletons(coordinates, ids, frames, tracks, scores, QBrush(QColor(10, 255, 65, 255)))
def moveWindow(self, stepsize):
step = np.round(stepsize * (self._currentWindowWidth))
new_start_frame = self._currentWindowPos + step
self._timeline.setWindowPos(new_start_frame / self._maxframes)
logging.info("Tracks.moveWindow: move window with stepsize %.2f", stepsize)
self._manualmove = True
new_start_frame = self._currentWindowPos + np.round(stepsize * self._currentWindowWidth)
if new_start_frame < 0:
new_start_frame = 0
elif new_start_frame + self._currentWindowWidth > self._maxframes:
new_start_frame = self._maxframes - self._currentWindowWidth
self._currentWindowPos = new_start_frame
self._controls_widget.setSelectedTracks(None)
self.update()