working on merging
This commit is contained in:
parent
b24c4435e7
commit
39dadac57e
17
README.md
17
README.md
@ -1,3 +1,20 @@
|
||||
# fitracks
|
||||
|
||||
UI tool to merge and fix tracking results from our 2-camera behavior setup
|
||||
|
||||
## TODO
|
||||
* add status bar that shows the processes that are running in the background
|
||||
* icons
|
||||
* merging of the detections
|
||||
* track merging
|
||||
* leftcut: 1778, rightcut: 400
|
||||
|
||||
|
||||
* how to merge the detections:
|
||||
1. go through the frames.
|
||||
2. for each frame:
|
||||
1. get the detections for that frame
|
||||
2. if they are outside the overlap range, accept them into the destination data frame
|
||||
* if on the right video, increment the x-positions
|
||||
* if they are on the left video, accept as is
|
||||
3. if they are within, they need to be merged:
|
@ -1,10 +1,10 @@
|
||||
import logging
|
||||
|
||||
from PyQt6.QtWidgets import QWidget, QGridLayout, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QComboBox, QSizePolicy, QSpinBox, QGraphicsView, QGraphicsScene, QGraphicsLineItem, QSpacerItem
|
||||
from PyQt6.QtCore import QThreadPool, Qt, pyqtSignal
|
||||
from PyQt6.QtWidgets import QWidget, QGridLayout, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QComboBox, QSizePolicy, QSpinBox, QGraphicsView, QGraphicsScene, QGraphicsLineItem, QSpacerItem, QProgressDialog, QProgressBar
|
||||
from PyQt6.QtCore import QThreadPool, Qt, pyqtSignal, pyqtSlot
|
||||
from PyQt6.QtGui import QImage, QPixmap, QColor, QPen
|
||||
|
||||
from fixtracks.util import ImageReader, DataFrameReader
|
||||
from fixtracks.util import ImageReader, DataFrameReader, Merger
|
||||
|
||||
|
||||
class VideoPreview(QWidget):
|
||||
@ -59,6 +59,8 @@ class MergeDetections(QWidget):
|
||||
self.left_dataframereader = None
|
||||
self._left_data = None
|
||||
self._right_data = None
|
||||
self._progressDialog = None
|
||||
self._merger = None
|
||||
|
||||
self.left_datacombo = QComboBox()
|
||||
self.left_videocombo = QComboBox()
|
||||
@ -67,6 +69,9 @@ class MergeDetections(QWidget):
|
||||
self.left_posspinner = QSpinBox()
|
||||
self.left_posspinner.valueChanged.connect(self.on_leftmergelinemove)
|
||||
self.left_preview = VideoPreview()
|
||||
self.left_framespinner = QSpinBox()
|
||||
self.left_framespinner.setMaximum(10000)
|
||||
self.left_framespinner.setValue(100)
|
||||
|
||||
self.right_datacombo = QComboBox()
|
||||
self.right_videocombo = QComboBox()
|
||||
@ -75,6 +80,9 @@ class MergeDetections(QWidget):
|
||||
self.right_posspinner = QSpinBox()
|
||||
self.right_posspinner.valueChanged.connect(self.on_rightmergelinemove)
|
||||
self.right_preview = VideoPreview(left=False)
|
||||
self.right_framespinner = QSpinBox()
|
||||
self.right_framespinner.setMaximum(10000)
|
||||
self.right_framespinner.setValue(100)
|
||||
|
||||
self._mergePreviewBtn = QPushButton("Preview")
|
||||
self._mergePreviewBtn.clicked.connect(self.on_mergePreview)
|
||||
@ -112,17 +120,21 @@ class MergeDetections(QWidget):
|
||||
grd.addWidget(QLabel("Data"), 1, 0)
|
||||
grd.addWidget(QLabel("Video"), 2, 0)
|
||||
grd.addWidget(QLabel("Mergeline"), 3, 0)
|
||||
grd.addWidget(QLabel("Seek frame"), 4, 0)
|
||||
grd.addWidget(self.left_datacombo, 1, 1)
|
||||
grd.addWidget(self.left_videocombo, 2, 1)
|
||||
grd.addWidget(self.left_posspinner, 3, 1)
|
||||
grd.addWidget(self.left_framespinner, 4, 1)
|
||||
|
||||
grd.addWidget(QLabel("Right"), 0, 2)
|
||||
grd.addWidget(QLabel("Data"), 1, 2)
|
||||
grd.addWidget(QLabel("Video"), 2, 2)
|
||||
grd.addWidget(QLabel("Mergeline"), 3, 2)
|
||||
grd.addWidget(QLabel("Seek frame"), 4, 2)
|
||||
grd.addWidget(self.right_datacombo, 1, 3)
|
||||
grd.addWidget(self.right_videocombo, 2, 3)
|
||||
grd.addWidget(self.right_posspinner, 3, 3)
|
||||
grd.addWidget(self.right_framespinner, 4, 3)
|
||||
|
||||
grd.setColumnStretch(0, 0)
|
||||
grd.setColumnStretch(2, 0)
|
||||
@ -173,33 +185,39 @@ class MergeDetections(QWidget):
|
||||
|
||||
def on_rightvideoSelection(self):
|
||||
logging.debug("Video selection of the %s side", "right")
|
||||
self.right_imagereader = ImageReader(self.right_videocombo.currentText(), 100)
|
||||
self.right_imagereader = ImageReader(self.right_videocombo.currentText(), self.right_framespinner.value())
|
||||
self.right_imagereader.signals.finished.connect(self.right_imgreaderDone)
|
||||
self.threadpool.start(self.right_imagereader)
|
||||
|
||||
def right_imgreaderDone(self, state):
|
||||
logging.debug("Right image reader done with state %s", str(state))
|
||||
frame = self.right_imagereader.frame
|
||||
img = self._toImage(frame)
|
||||
self.right_preview.set_image(img)
|
||||
self.right_posspinner.setMaximum(img.width() - 1)
|
||||
self.right_posspinner.setValue(0)
|
||||
self.checkButtons()
|
||||
if frame is not None:
|
||||
img = self._toImage(frame)
|
||||
self.right_preview.set_image(img)
|
||||
self.right_posspinner.setMaximum(img.width() - 1)
|
||||
self.right_posspinner.setValue(0)
|
||||
self.checkButtons()
|
||||
else:
|
||||
logging.error("Reading frame failed!")
|
||||
|
||||
def on_leftVideoSelection(self):
|
||||
logging.debug("Video selection of the %s side", "left")
|
||||
self.left_imagereader = ImageReader(self.left_videocombo.currentText(), 100)
|
||||
self.left_imagereader = ImageReader(self.left_videocombo.currentText(), self.left_framespinner.value())
|
||||
self.left_imagereader.signals.finished.connect(self.left_imgreaderDone)
|
||||
self.threadpool.start(self.left_imagereader)
|
||||
|
||||
def left_imgreaderDone(self, state):
|
||||
logging.debug("Left image reader done with state %s", str(state))
|
||||
frame = self.left_imagereader.frame
|
||||
img = self._toImage(frame)
|
||||
self.left_preview.set_image(img)
|
||||
self.left_posspinner.setMaximum(img.width() - 1)
|
||||
self.left_posspinner.setValue(img.width() - 1)
|
||||
self.checkButtons()
|
||||
if frame is not None:
|
||||
img = self._toImage(frame)
|
||||
self.left_preview.set_image(img)
|
||||
self.left_posspinner.setMaximum(img.width() - 1)
|
||||
self.left_posspinner.setValue(img.width() - 1)
|
||||
self.checkButtons()
|
||||
else:
|
||||
logging.error("Reading frame failed!")
|
||||
|
||||
def on_leftDataSelection(self):
|
||||
logging.debug("Data selection of the %s side", "left")
|
||||
@ -241,8 +259,37 @@ class MergeDetections(QWidget):
|
||||
|
||||
def on_merge(self):
|
||||
logging.debug("detectionmerge: merge pressed")
|
||||
|
||||
pass
|
||||
self.merger = Merger(self._left_data, self._right_data,
|
||||
self.left_posspinner.value(),
|
||||
self.right_posspinner.value())
|
||||
self._progressDialog = QProgressDialog(parent=self)
|
||||
self._progressDialog.setAutoClose(True)
|
||||
self._progressDialog.setRange(0, 100)
|
||||
self._progressDialog.setLabelText("Merging detections:")
|
||||
self._progressDialog.setCancelButtonText("Cancel")
|
||||
self._progressDialog.setWindowModality(Qt.WindowModality.WindowModal)
|
||||
self._progressDialog.canceled.connect(self.on_mergeCancelled)
|
||||
self._progressDialog.show()
|
||||
self.merger.signals.progress.connect(self.on_mergeProgress)
|
||||
self.merger.signals.finished.connect(self.on_mergeDone)
|
||||
self.threadpool.start(self.merger)
|
||||
|
||||
@pyqtSlot()
|
||||
def on_mergeCancelled(self):
|
||||
self.merger.stop_request()
|
||||
|
||||
@pyqtSlot(float)
|
||||
def on_mergeProgress(self, value):
|
||||
print("mergeProgress", value)
|
||||
if self._progressDialog is not None:
|
||||
self._progressDialog.setValue(int(value * 100))
|
||||
|
||||
@pyqtSlot(bool)
|
||||
def on_mergeDone(self, state):
|
||||
logging.debug("Merging stopped with status %s", state)
|
||||
self._progressDialog.close()
|
||||
self._progressDialog = None
|
||||
self.merger = None
|
||||
|
||||
def checkButtons(self):
|
||||
merge_enabled = self._left_data is not None and self._right_data is not None
|
||||
|
@ -16,6 +16,13 @@ class TasksWidget(QWidget):
|
||||
folderBtn.setMaximumSize(200, 100)
|
||||
folderBtn.clicked.connect(self._open_folder)
|
||||
|
||||
self.convertBtn = QPushButton("Convert tracks")
|
||||
self.convertBtn.setToolTip("Convert JSON pose files to to Pandas data frames")
|
||||
self.convertBtn.setEnabled(False)
|
||||
self.convertBtn.setMaximumSize(200, 100)
|
||||
self.convertBtn.clicked.connect(self.convert_clicked)
|
||||
self.convertBtn.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
|
||||
|
||||
self.mergeBtn = QPushButton("Merge detections")
|
||||
self.mergeBtn.setEnabled(False)
|
||||
self.mergeBtn.setMaximumSize(200, 100)
|
||||
@ -31,6 +38,7 @@ class TasksWidget(QWidget):
|
||||
l = QVBoxLayout()
|
||||
l.setAlignment(Qt.AlignmentFlag.AlignVCenter)
|
||||
l.addWidget(folderBtn)
|
||||
l.addWidget(self.convertBtn)
|
||||
l.addWidget(self.mergeBtn)
|
||||
l.addWidget(self.tracksBtn)
|
||||
self.setLayout(l)
|
||||
@ -42,6 +50,9 @@ class TasksWidget(QWidget):
|
||||
def _tracks_clicked(self):
|
||||
self.clicked.emit("Tracks")
|
||||
|
||||
def _convert_clickes(self)
|
||||
self.clicked.emit("Convert")
|
||||
|
||||
def _open_folder(self):
|
||||
logging.debug("TasksWidget:select data folder")
|
||||
folder = QFileDialog.getExistingDirectory()
|
||||
|
@ -1,14 +1,18 @@
|
||||
import logging
|
||||
import pandas as pd
|
||||
import cv2 as cv
|
||||
|
||||
import time
|
||||
import numpy as np
|
||||
from PyQt6.QtCore import QRunnable, pyqtSlot, pyqtSignal, QObject
|
||||
|
||||
from IPython import embed
|
||||
|
||||
class ProducerSignals(QObject):
|
||||
finished = pyqtSignal(bool)
|
||||
# error = pyqtSignal(str)
|
||||
# start = pyqtSignal(float)
|
||||
# running = pyqtSignal()
|
||||
progress = pyqtSignal(float)
|
||||
|
||||
|
||||
class ImageReader(QRunnable):
|
||||
@ -28,6 +32,9 @@ class ImageReader(QRunnable):
|
||||
'''
|
||||
logging.debug("ImageReader: trying to open file %s", self._filename)
|
||||
cap = cv.VideoCapture(self._filename)
|
||||
framecount = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
|
||||
if self._framenumber >= framecount:
|
||||
logging.warning("ImageReader: desired frame number (%i) exceeds the frame count (%i)! Redefined to frame count." % (self._framenumber, framecount))
|
||||
if not cap.isOpened():
|
||||
logging.debug("ImageReader: failed to open file %s", self._filename)
|
||||
self._signals.finished.emit(False)
|
||||
@ -39,7 +46,8 @@ class ImageReader(QRunnable):
|
||||
self._signals.finished.emit(False)
|
||||
break
|
||||
fn += 1
|
||||
self._frame = frame # cv.cvtColor(frame, cv.COLOR_BGR2RGB)
|
||||
self._frame = frame # cv.cvtColor(frame, cv.COLOR_BGR2RGB)
|
||||
cap.release()
|
||||
self._signals.finished.emit(True)
|
||||
|
||||
@property
|
||||
@ -77,3 +85,158 @@ class DataFrameReader(QRunnable):
|
||||
def dataframe(self):
|
||||
return self._dataframe
|
||||
|
||||
|
||||
class Merger(QRunnable):
|
||||
def __init__(self, left_data, right_data, left_cut, right_cut) -> None:
|
||||
super().__init__()
|
||||
self._signals = ProducerSignals()
|
||||
self._left_data = left_data
|
||||
self._right_data = right_data
|
||||
self._left_cut = left_cut
|
||||
self._right_cut = right_cut
|
||||
self._result = None
|
||||
self._stopRequest = False
|
||||
|
||||
@pyqtSlot()
|
||||
def stop_request(self):
|
||||
self._stopRequest = True
|
||||
|
||||
@pyqtSlot()
|
||||
def run(self):
|
||||
|
||||
|
||||
logging.debug("Merger: running merge for %i frames", max_frames)
|
||||
self._stopRequest = False
|
||||
max_frames = max(self._left_data.frame.max(), self._right_data.frame.max())
|
||||
step = max_frames // 100
|
||||
self._result = pd.DataFrame(columns=self._left_data.columns)
|
||||
|
||||
for frame in range(max_frames):
|
||||
if self._stopRequest:
|
||||
break
|
||||
lf = self._left_data[self._left_data.frame == frame]
|
||||
rf = self._right_data[self._right_data.frame == frame]
|
||||
merge_frame(lf, rf, self._left_cut, self._right_cut, self._result)
|
||||
if frame % step == 0:
|
||||
self.signals.progress.emit(frame/max_frames)
|
||||
time.sleep(0.01)
|
||||
self._signals.finished.emit(True and (not self._stopRequest))
|
||||
|
||||
@property
|
||||
def signals(self):
|
||||
return self._signals
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
return self._result
|
||||
|
||||
def merge_frames(left, right, leftcut, rightcut, destination):
|
||||
# for
|
||||
pass
|
||||
|
||||
def check_frame(frame, cut, left=True):
|
||||
"""checks whether the detected object is (partially) in the overlapping zone of the two cameras.
|
||||
A frame is 'ok' if the box is not in the danger zone.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
frame : pd.Series,
|
||||
a row of the DataFrame
|
||||
cut : int
|
||||
The cut x-position
|
||||
left : bool, optional
|
||||
whether we are looking at the right or left frame, by default True
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
whether or not the
|
||||
"""
|
||||
if left:
|
||||
return not (frame.box_x1 > cut or frame.box_x2 > cut) # any of the box coordinates is beyond the cut
|
||||
else:
|
||||
return not (frame.box_x1 < cut or frame.box_x2 < cut)
|
||||
|
||||
|
||||
def merge_detections(left_data:pd.DataFrame, right_data: pd.DataFrame, left_cut: int, right_cut: int) ->pd.DataFrame:
|
||||
"""Merge the key-point detections based on the left and right video. Key-points with x-coordinates beyond each of the limits (left and right cut) are discarded.
|
||||
How to merge the detections WITHIN one of the fish detections???
|
||||
|
||||
Parameters
|
||||
----------
|
||||
left_data : pd.DataFrame
|
||||
Detections based on the left video
|
||||
right_data : pd.DataFrame
|
||||
Detections based on the right video.
|
||||
left_cut : int
|
||||
Where to cut off and discard the detected key points.
|
||||
right_cut : int
|
||||
Where to cut off and discard the detected key points.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pd.DataFrame
|
||||
merged detections of left and right data
|
||||
"""
|
||||
def check_dataframe(df):
|
||||
return True
|
||||
|
||||
def to_numpy(df):
|
||||
key_columns = [c for c in df.columns if "key_" in c]
|
||||
box_columns = [c for c in df.columns if "box_" in c]
|
||||
num_frames = len(df)
|
||||
num_keypoints = len(key_columns)
|
||||
dimensions = 2
|
||||
keypoints = np.empty((num_frames, num_keypoints, dimensions))
|
||||
visibility = np.empty((num_frames, num_keypoints))
|
||||
boxcoordinates = np.empty((num_frames, 4))
|
||||
|
||||
for i, row in df.iterrows():
|
||||
for j, k in enumerate(key_columns):
|
||||
key_data = row[k]
|
||||
l = list(map(float, list(key_data[1:-1].split(","))))
|
||||
keypoints[i, j, :] = l
|
||||
for j, b in enumerate(box_columns):
|
||||
boxcoordinates[i, j] = row[b]
|
||||
if isinstance(row["visible"], str):
|
||||
vis = list(map(float, row["visible"][1:-1].split()))
|
||||
visibility[i, :] = vis
|
||||
else:
|
||||
visibility[i, :] = row["visible"]
|
||||
return keypoints, visibility, boxcoordinates
|
||||
|
||||
logging.debug("Cutting left detections to limit %i", left_cut)
|
||||
if not check_dataframe(left_data) or not check_dataframe(right_data):
|
||||
logging.error("Left or right dataframe structure does not match my expectations")
|
||||
return None
|
||||
df = pd.DataFrame(columns=left_data.columns)
|
||||
dangerzone = {"left": [], "right":[]}
|
||||
lkeypoints, lquality, lbox = to_numpy(left_data)
|
||||
embed()
|
||||
exit()
|
||||
for i, row in left_data.iterrows():
|
||||
if check_frame(row, left_cut):
|
||||
df = pd.concat([df, row.to_frame().T], ignore_index=True)
|
||||
else:
|
||||
dangerzone["left"].append(row)
|
||||
if i > 10000:
|
||||
break
|
||||
print("Left done")
|
||||
for i, row in right_data.iterrows():
|
||||
if check_frame(row[1], right_cut, left=False):
|
||||
# convert_right(row[1]) need to convert the frame coordinates!
|
||||
df = pd.concat([df, row[1].to_frame().T], ignore_index=True)
|
||||
else:
|
||||
dangerzone["right"].append(row[1])
|
||||
if i > 10000:
|
||||
break
|
||||
|
||||
|
||||
def main():
|
||||
left = pd.read_csv("../data/left_tracks.csv", sep=";", index_col=0)
|
||||
# right = pd.read_csv("../data/right_tracks.csv", sep=";", index_col=0)
|
||||
merge_detections(left, None, 2000, 300)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue
Block a user