[util] merging seems to work
This commit is contained in:
parent
23b513ab68
commit
9946c4c72a
@ -2,6 +2,7 @@ import logging
|
||||
import pandas as pd
|
||||
import cv2 as cv
|
||||
import time
|
||||
import pickle
|
||||
import numpy as np
|
||||
from PyQt6.QtCore import QRunnable, pyqtSlot, pyqtSignal, QObject
|
||||
|
||||
@ -193,10 +194,12 @@ class Merger(QRunnable):
|
||||
max_frames = len(self._left_data) + len(self._right_data)
|
||||
|
||||
logging.debug("Cutting left detections to limit %i", self.left_cut)
|
||||
self.signals.progress.emit(0.1)
|
||||
lkeypoints, lquality, lbox = self.to_numpy(self._left_data)
|
||||
self.signals.progress.emit(0.2)
|
||||
lframes = self._left_data.frame.values
|
||||
led, ltd = self.sort_detections(lkeypoints, self.left_cut, left=True)
|
||||
|
||||
self.signals.progress.emit(0.3)
|
||||
logging.debug("Cutting right detections to limit %i", self._right_cut_cut)
|
||||
rkeypoints, rquality, rbox = self.to_numpy(self.right_data)
|
||||
rframes = self.right_data.frame.values
|
||||
@ -289,6 +292,7 @@ def merge_detections(left_data:pd.DataFrame, right_data: pd.DataFrame, left_cut:
|
||||
return True
|
||||
|
||||
def to_numpy(df):
|
||||
logging.info("Converting to numpy ...")
|
||||
key_columns = [c for c in df.columns if "key_" in c]
|
||||
box_columns = [c for c in df.columns if "box_" in c]
|
||||
num_frames = len(df)
|
||||
@ -310,11 +314,12 @@ def merge_detections(left_data:pd.DataFrame, right_data: pd.DataFrame, left_cut:
|
||||
visibility[i, :] = vis
|
||||
else:
|
||||
visibility[i, :] = row["visible"]
|
||||
logging.debug("Converting to numpy done!")
|
||||
return keypoints, visibility, boxcoordinates
|
||||
|
||||
def sort_detections(keypoints, threshold, left=True):
|
||||
"""Categorize the detections into those that are easy (not in the visual overlap zone) and those that are tricky, i.e. right across the threshold.
|
||||
Detections beyond threshold can be discarded, those across the threshold need to be treated separately.
|
||||
Detections beyond threshold are ignored, those across the threshold need to be treated separately.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@ -332,6 +337,7 @@ def merge_detections(left_data:pd.DataFrame, right_data: pd.DataFrame, left_cut:
|
||||
np.ndarray
|
||||
The tricky detections
|
||||
"""
|
||||
logging.info("Sorting detections")
|
||||
if left:
|
||||
easyindeces = np.where(np.all(keypoints[:,:,0] < threshold, axis=1))[0]
|
||||
trickyindices = np.where(np.any((keypoints[:,:,0] >= threshold) &
|
||||
@ -342,32 +348,85 @@ def merge_detections(left_data:pd.DataFrame, right_data: pd.DataFrame, left_cut:
|
||||
(keypoints[:,:,0] >= threshold), axis=1))[0]
|
||||
return easyindeces, trickyindices
|
||||
|
||||
logging.debug("Cutting left detections to limit %i", left_cut)
|
||||
def select_and_transform(df, keypoints, boxes, quality, frames, valid_detections, left_threshold=None, right_threshold=None):
|
||||
keypoints = keypoints[valid_detections, :, :]
|
||||
boxes = boxes[valid_detections, :]
|
||||
quality = quality[valid_detections, :]
|
||||
frames = frames[valid_detections]
|
||||
df = df.iloc[valid_detections]
|
||||
if all([left_threshold, right_threshold]):
|
||||
keypoints[:, :, 0] += (left_threshold - right_threshold)
|
||||
boxes[:, [0, 2]] += (left_threshold - right_threshold)
|
||||
|
||||
return df, keypoints, quality, boxes, frames
|
||||
|
||||
|
||||
def to_dataframe(old_left, old_right, lkeypoints, rkeypoints, lboxes, rboxes, lqualities, rqualities, lframes, rframes):
|
||||
frames = np.concatenate([lframes, rframes])
|
||||
sorting = np.argsort(frames)
|
||||
frames = frames[sorting]
|
||||
confidences = np.concatenate([old_left.confidence.values, old_right.confidence.values])
|
||||
confidences = confidences[sorting]
|
||||
classes = np.concatenate([old_left.cls.values, old_right.cls.values])
|
||||
classes = classes[sorting]
|
||||
names = np.concatenate([old_left.name.values, old_right.name.values])
|
||||
names = names[sorting]
|
||||
keypoints = np.concatenate([lkeypoints, rkeypoints], axis=0)
|
||||
keypoints = keypoints[sorting, :, :]
|
||||
boxes = np.concatenate([lboxes, rboxes], axis=0)
|
||||
boxes = boxes[sorting, :]
|
||||
qualities = np.concatenate([lqualities, rqualities], axis=0)
|
||||
qualities = qualities[sorting, :]
|
||||
tracks = np.concatenate([old_left.track_id.values, old_right.track_id.values], axis=0)
|
||||
tracks = tracks[sorting]
|
||||
# sort before converting to df
|
||||
q = []; b=[]; k = []
|
||||
for i in range(len(frames)):
|
||||
q.append(qualities[i, :])
|
||||
b.append(boxes[i, :])
|
||||
k.append(keypoints[i, :])
|
||||
|
||||
d = {"frame":frames, "cls": classes, "name":names, "keypoint_score": q, "track": tracks,
|
||||
"keypoints": k, "box":b, "confidence":confidences}
|
||||
df = pd.DataFrame(d)
|
||||
return df
|
||||
|
||||
|
||||
|
||||
logging.info("Cutting left detections to limit %i", left_cut)
|
||||
if not check_dataframe(left_data) or not check_dataframe(right_data):
|
||||
logging.error("Left or right dataframe structure does not match my expectations")
|
||||
return None
|
||||
logging.info("Converting to numpy %i", left_cut)
|
||||
lkeypoints, lquality, lbox = to_numpy(left_data)
|
||||
# rkeypoint, rquality, rbox = to_numpy(right_data)
|
||||
rkeypoints, rquality, rbox = to_numpy(right_data)
|
||||
lframes = left_data.frame.values
|
||||
# rframes = right_data.frame.values
|
||||
led, ltd = sort_detections(lkeypoints, left_cut, left=True)
|
||||
# red, rtd = sort_detections(rkeypoints, right_cut, left=False)
|
||||
rframes = right_data.frame.values
|
||||
logging.info("Filtering detections")
|
||||
left_easy, _ = sort_detections(lkeypoints, left_cut, left=True)
|
||||
right_easy, _ = sort_detections(rkeypoints, right_cut, left=False)
|
||||
logging.info("Merging and transformation")
|
||||
ldf, lkeypoints, lquality, lboxes, lframes = select_and_transform(left_data, lkeypoints, lbox, lquality, lframes, left_easy)
|
||||
rdf, rkeypoints, rquality, rboxes, rframes = select_and_transform(right_data, rkeypoints, rbox, rquality, rframes, right_easy, left_cut, right_cut)
|
||||
export_df = to_dataframe(ldf, rdf, lkeypoints, rkeypoints, lboxes, rboxes, lquality, rquality, lframes, rframes)
|
||||
|
||||
# here we need to decide what to do with these data points, trust the left, or trust the right perspective?
|
||||
# we could also discard them. unless it is a lot of data points, not much harm will be done...
|
||||
# next step after handling the tricky ones is to export the data again to pandas? nixtrack?
|
||||
# 1. the right coordinates have to adapted! x - right_threshold + left_threshold!
|
||||
rkeypoints[:, :, 0] += (left_cut - right_cut)
|
||||
filename = "test.pkl"
|
||||
logging.info("Saving/pickling merged file to %s" % filename)
|
||||
with open(filename, 'rb') as f:
|
||||
pickle.dump(export_df, f)
|
||||
|
||||
embed()
|
||||
exit()
|
||||
logging.info("Merging done!")
|
||||
|
||||
|
||||
def main():
|
||||
logging.info("Loading data left")
|
||||
left = pd.read_csv("../data/left_tracks.csv", sep=";", index_col=0)
|
||||
# right = pd.read_csv("../data/right_tracks.csv", sep=";", index_col=0)
|
||||
merge_detections(left, None, 2000, 300)
|
||||
logging.info("Loading data right")
|
||||
right = pd.read_csv("../data/right_tracks.csv", sep=";", index_col=0)
|
||||
merge_detections(left, right, 2000, 300)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, force=True)
|
||||
|
||||
main()
|
Loading…
Reference in New Issue
Block a user