move stuff to merger class
This commit is contained in:
parent
6568f0f11c
commit
23b513ab68
@ -9,7 +9,7 @@ from IPython import embed
|
|||||||
|
|
||||||
class ProducerSignals(QObject):
|
class ProducerSignals(QObject):
|
||||||
finished = pyqtSignal(bool)
|
finished = pyqtSignal(bool)
|
||||||
# error = pyqtSignal(str)
|
error = pyqtSignal(str)
|
||||||
# start = pyqtSignal(float)
|
# start = pyqtSignal(float)
|
||||||
# running = pyqtSignal()
|
# running = pyqtSignal()
|
||||||
progress = pyqtSignal(float)
|
progress = pyqtSignal(float)
|
||||||
@ -96,6 +96,93 @@ class Merger(QRunnable):
|
|||||||
self._right_cut = right_cut
|
self._right_cut = right_cut
|
||||||
self._result = None
|
self._result = None
|
||||||
self._stopRequest = False
|
self._stopRequest = False
|
||||||
|
for df in [self._left_data, self._right_data]:
|
||||||
|
if not self.check_dataframe(df):
|
||||||
|
self.signals.error.emit("Merger.__init__: Error checking DataFrame structure!")
|
||||||
|
|
||||||
|
def check_dataframe(self, df):
|
||||||
|
"""Perform some sanity checks on the dataframe.
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
df : pandas.DataFrame
|
||||||
|
the DataFrame conataining the detections of the left or right camera.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
bool
|
||||||
|
True, if everything's all right, False otherwise.
|
||||||
|
"""
|
||||||
|
return True
|
||||||
|
|
||||||
|
def to_numpy(self, df):
|
||||||
|
"""Convert some columns of the DataFrame to numpy arrays.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
df : pandas.DataFrame
|
||||||
|
The DataFrame containing the detections.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
numpy.ndarray
|
||||||
|
3D array containing the x,y coordinates of each detection in each frame. Shape (num_detections, num_keypoints, 2)
|
||||||
|
numpy.ndarray
|
||||||
|
2D array with visibility score for each of the keypoints in each frame. Shape (num_detections, num_keypoints)
|
||||||
|
numpy.ndarray
|
||||||
|
2D array, Coordinates of the bounding box for each detection. Shape: (num_detections, 4) x1, y1, x2, y2
|
||||||
|
"""
|
||||||
|
key_columns = [c for c in df.columns if "key_" in c]
|
||||||
|
box_columns = [c for c in df.columns if "box_" in c]
|
||||||
|
num_detections = len(df)
|
||||||
|
num_keypoints = len(key_columns)
|
||||||
|
dimensions = 2
|
||||||
|
keypoints = np.empty((num_detections, num_keypoints, dimensions))
|
||||||
|
visibility = np.empty((num_detections, num_keypoints))
|
||||||
|
boxcoordinates = np.empty((num_detections, 4))
|
||||||
|
|
||||||
|
for i, row in df.iterrows():
|
||||||
|
for j, k in enumerate(key_columns):
|
||||||
|
key_data = row[k]
|
||||||
|
l = list(map(float, list(key_data[1:-1].split(","))))
|
||||||
|
keypoints[i, j, :] = l
|
||||||
|
for j, b in enumerate(box_columns):
|
||||||
|
boxcoordinates[i, j] = row[b]
|
||||||
|
if isinstance(row["visible"], str):
|
||||||
|
vis = list(map(float, row["visible"][1:-1].split()))
|
||||||
|
visibility[i, :] = vis
|
||||||
|
else:
|
||||||
|
visibility[i, :] = row["visible"]
|
||||||
|
return keypoints, visibility, boxcoordinates
|
||||||
|
|
||||||
|
def sort_detections(self, keypoints, threshold, left=True):
|
||||||
|
"""Categorize the detections into those that are easy (not in the visual overlap zone) and those that are tricky, i.e. right across the threshold.
|
||||||
|
Detections beyond threshold can be discarded, those across the threshold need to be treated separately.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
keypoints : np.ndarray
|
||||||
|
3d array of keypoint coordinates (num detections, num keypoints, (x,y))
|
||||||
|
threshold : int
|
||||||
|
the threshold line at which the data should be merged
|
||||||
|
left : bool, optional
|
||||||
|
whether or not the data is from the left side, controls how the threshold is interpreted, by default True
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.ndarray
|
||||||
|
The indices of the easy detections
|
||||||
|
np.ndarray
|
||||||
|
The tricky detections
|
||||||
|
"""
|
||||||
|
if left:
|
||||||
|
easyindeces = np.where(np.all(keypoints[:,:,0] < threshold, axis=1))[0]
|
||||||
|
trickyindices = np.where(np.any((keypoints[:,:,0] >= threshold) &
|
||||||
|
(keypoints[:,:,0] < threshold), axis=1))[0]
|
||||||
|
else:
|
||||||
|
easyindeces = np.where(np.all(keypoints[:,:,0] >= threshold, axis=1))[0]
|
||||||
|
trickyindices = np.where(np.any((keypoints[:,:,0] < threshold) &
|
||||||
|
(keypoints[:,:,0] >= threshold), axis=1))[0]
|
||||||
|
return easyindeces, trickyindices
|
||||||
|
|
||||||
@pyqtSlot()
|
@pyqtSlot()
|
||||||
def stop_request(self):
|
def stop_request(self):
|
||||||
@ -103,23 +190,43 @@ class Merger(QRunnable):
|
|||||||
|
|
||||||
@pyqtSlot()
|
@pyqtSlot()
|
||||||
def run(self):
|
def run(self):
|
||||||
|
max_frames = len(self._left_data) + len(self._right_data)
|
||||||
|
|
||||||
|
logging.debug("Cutting left detections to limit %i", self.left_cut)
|
||||||
|
lkeypoints, lquality, lbox = self.to_numpy(self._left_data)
|
||||||
|
lframes = self._left_data.frame.values
|
||||||
|
led, ltd = self.sort_detections(lkeypoints, self.left_cut, left=True)
|
||||||
|
|
||||||
logging.debug("Merger: running merge for %i frames", max_frames)
|
logging.debug("Cutting right detections to limit %i", self._right_cut_cut)
|
||||||
self._stopRequest = False
|
rkeypoints, rquality, rbox = self.to_numpy(self.right_data)
|
||||||
max_frames = max(self._left_data.frame.max(), self._right_data.frame.max())
|
rframes = self.right_data.frame.values
|
||||||
step = max_frames // 100
|
red, rtd = self.sort_detections(rkeypoints, self.right_cut, left=False)
|
||||||
self._result = pd.DataFrame(columns=self._left_data.columns)
|
rkeypoints[:, :, 0] += (self.left_cut - self.right_cut)
|
||||||
|
|
||||||
for frame in range(max_frames):
|
# here we need to decide what to do with these data points, trust the left, or trust the right perspective?
|
||||||
if self._stopRequest:
|
# we could also discard them. unless it is a lot of data points, not much harm will be done...
|
||||||
break
|
# next step after handling the tricky ones is to export the data again to pandas? nixtrack?
|
||||||
lf = self._left_data[self._left_data.frame == frame]
|
# 1. the right coordinates have to adapted! x - right_threshold + left_threshold!
|
||||||
rf = self._right_data[self._right_data.frame == frame]
|
|
||||||
merge_frame(lf, rf, self._left_cut, self._right_cut, self._result)
|
embed()
|
||||||
if frame % step == 0:
|
exit()
|
||||||
self.signals.progress.emit(frame/max_frames)
|
|
||||||
time.sleep(0.01)
|
# logging.debug("Merger: running merge for %i frames", max_frames)
|
||||||
|
# self._stopRequest = False
|
||||||
|
# max_frames = max(self._left_data.frame.max(), self._right_data.frame.max())
|
||||||
|
# step = max_frames // 100
|
||||||
|
# self._result = pd.DataFrame(columns=self._left_data.columns)
|
||||||
|
|
||||||
|
# for frame in range(max_frames):
|
||||||
|
# if self._stopRequest:
|
||||||
|
# break
|
||||||
|
|
||||||
|
# lf = self._left_data[self._left_data.frame == frame]
|
||||||
|
# rf = self._right_data[self._right_data.frame == frame]
|
||||||
|
# merge_frame(lf, rf, self._left_cut, self._right_cut, self._result)
|
||||||
|
# if frame % step == 0:
|
||||||
|
# self.signals.progress.emit(frame/max_frames)
|
||||||
|
# time.sleep(0.01)
|
||||||
self._signals.finished.emit(True and (not self._stopRequest))
|
self._signals.finished.emit(True and (not self._stopRequest))
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
Loading…
Reference in New Issue
Block a user