[project] restructure, add json to pandas conversion to tasks

This commit is contained in:
Jan Grewe 2025-01-21 11:25:13 +01:00
parent 681a085f4a
commit e039d74d1a
10 changed files with 623 additions and 78 deletions

View File

@ -1,77 +0,0 @@
import cv2
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython import embed
def show_video(filename):
cap = cv2.VideoCapture('2024.11.13_0_converted_right_undistorted_fixed.mp4')
count = 0
while cap.isOpened():
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
count += 1
if count < 2000:
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
print(count)
cv2.imshow('frame', gray[::2,::2])
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def topandas(filename):
temp = []
tracking_data = {"frame": [], "name": [], "cls": [],
"confidence": [], "visible": [] ,
"track_id": [],
}
key_dict = {f"key_{i}": [] for i in range(6)}
box_labels = ["x1", "y1", "x2", "y2"]
box_dict = {f"box_{i}": [] for i in box_labels}
tracking_data.update(key_dict)
tracking_data.update(box_dict)
with open(filename, "r") as f:
data = json.load(f)
for d in data: # each of the frames
if len(d["data"]) == 0:
continue
for dd in d["data"]: # each of the found objects, i.e. fish
key_x = [float(x) for x in list(np.round(dd["keypoints"]["x"], 2))]
key_y = [float(y) for y in list(np.round(dd["keypoints"]["y"], 2))]
key_dict = {f"key_{i}": [v] for i, v in enumerate(zip(key_x, key_y))}
visible = np.round(dd["keypoints"]["visible"], 3)
tracking_data["frame"].append(d["frame"])
tracking_data["name"].append(dd["name"])
tracking_data["cls"].append(dd["class"])
tracking_data["visible"].append(visible)
tracking_data["confidence"].append(float(np.round(dd["confidence"], 3)))
tracking_data["track_id"].append(dd["track_id"] if "track_id" in dd.keys() else -1)
for bk, bl in zip(box_dict.keys(), box_labels):
tracking_data[bk].append(dd["box"][bl])
for i, kd in enumerate(key_dict.keys()):
tracking_data[kd].append((key_x[i], key_y[i]))
df = pd.DataFrame(tracking_data)
return df
# Test code
def main():
# import left and right tracks into a pandas dataframe
right_tracks = "2024.11.13_0_converted_right_undistorted_fixed_pose.json"
left_tracks = "2024.11.13_0_converted_left_undistorted_fixed_pose.json"
rdf = topandas(right_tracks)
rdf.to_csv("right_tracks.csv", sep=";")
ldf = topandas(left_tracks)
ldf.to_csv("left_tracks.csv", sep=";")
if __name__ == "__main__":
main()

View File

View File

@ -0,0 +1,165 @@
import json
import logging
import pathlib
import pandas as pd
import numpy as np
from PySide6.QtCore import QRunnable, Slot
from fixtracks.utils.signals import ProducerSignals
class Converter(QRunnable):
def __init__(self, infile, outfile) -> None:
super().__init__()
self._signals = ProducerSignals()
self._infile = infile
self._outfile = outfile
self._ok = True
self._stopRequest = False
self._checkfiles()
def _checkfiles(self):
for file in [self._infile, self._outfile]:
if file is None:
self._signals.error.emit(f"Converter.__init__: {file} is None!")
self._ok = False
if not pathlib.Path.exists(pathlib.Path(file)):
self._signals.error.emit("Converter.__init__: File {file} does not exist!")
self._ok = False
@Slot()
def stop_request(self):
self._stopRequest = True
@Slot()
def run(self):
logging.info(f"Converting input file {self._infile} to {self._outfile}")
self._signals.progress.emit(0.0)
tracking_data = {"frame": [], "name": [], "cls": [],
"confidence": [], "visible": [] ,
"track_id": [],
}
key_dict = {f"key_{i}": [] for i in range(6)}
box_labels = ["x1", "y1", "x2", "y2"]
box_dict = {f"box_{i}": [] for i in box_labels}
tracking_data.update(key_dict)
tracking_data.update(box_dict)
logging.info("Reading infile")
with open(self._infile, "r", encoding="utf-8") as f:
data = json.load(f)
progress = 0.1
self._signals.progress.emit(progress)
logging.info("Reading infile done")
interval = int(len(data) // 80)
for i, d in enumerate(data): # each of the frames
if len(d["data"]) == 0:
continue
if i % interval == 0:
progress += 0.8/80
self._signals.progress.emit(np.round(progress, 1))
if self._stopRequest:
self._signals.error.emit("Conversion cancelled!")
break
for dd in d["data"]: # each of the found objects, i.e. fish
key_x = [float(x) for x in list(np.round(dd["keypoints"]["x"], 2))]
key_y = [float(y) for y in list(np.round(dd["keypoints"]["y"], 2))]
key_dict = {f"key_{i}": [v] for i, v in enumerate(zip(key_x, key_y))}
visible = np.round(dd["keypoints"]["visible"], 3)
tracking_data["frame"].append(d["frame"])
tracking_data["name"].append(dd["name"])
tracking_data["cls"].append(dd["class"])
tracking_data["visible"].append(visible)
tracking_data["confidence"].append(float(np.round(dd["confidence"], 3)))
tracking_data["track_id"].append(dd["track_id"] if "track_id" in dd.keys() else -1)
for bk, bl in zip(box_dict.keys(), box_labels):
tracking_data[bk].append(dd["box"][bl])
for i, kd in enumerate(key_dict.keys()):
tracking_data[kd].append((key_x[i], key_y[i]))
if self._stopRequest:
self._signals.error.emit("Conversion cancelled!")
return
df = pd.DataFrame(tracking_data)
df.to_csv(self._outfile, sep=";")
self._signals.progress.emit(1.0)
self._signals.finished.emit(True and (not self._stopRequest))
@property
def signals(self):
return self._signals
# import cv2
# def topandas(filename):
# tracking_data = {"frame": [], "name": [], "cls": [],
# "confidence": [], "visible": [] ,
# "track_id": [],
# }
# key_dict = {f"key_{i}": [] for i in range(6)}
# box_labels = ["x1", "y1", "x2", "y2"]
# box_dict = {f"box_{i}": [] for i in box_labels}
# tracking_data.update(key_dict)
# tracking_data.update(box_dict)
# with open(filename, "r", encoding="utf-8") as f:
# data = json.load(f)
# for d in data: # each of the frames
# if len(d["data"]) == 0:
# continue
# for dd in d["data"]: # each of the found objects, i.e. fish
# key_x = [float(x) for x in list(np.round(dd["keypoints"]["x"], 2))]
# key_y = [float(y) for y in list(np.round(dd["keypoints"]["y"], 2))]
# key_dict = {f"key_{i}": [v] for i, v in enumerate(zip(key_x, key_y))}
# visible = np.round(dd["keypoints"]["visible"], 3)
# tracking_data["frame"].append(d["frame"])
# tracking_data["name"].append(dd["name"])
# tracking_data["cls"].append(dd["class"])
# tracking_data["visible"].append(visible)
# tracking_data["confidence"].append(float(np.round(dd["confidence"], 3)))
# tracking_data["track_id"].append(dd["track_id"] if "track_id" in dd.keys() else -1)
# for bk, bl in zip(box_dict.keys(), box_labels):
# tracking_data[bk].append(dd["box"][bl])
# for i, kd in enumerate(key_dict.keys()):
# tracking_data[kd].append((key_x[i], key_y[i]))
# df = pd.DataFrame(tracking_data)
# return df
# import matplotlib.pyplot as plt
# from IPython import embed
# def show_video(filename):
# cap = cv2.VideoCapture('2024.11.13_0_converted_right_undistorted_fixed.mp4')
# count = 0
# while cap.isOpened():
# ret, frame = cap.read()
# # if frame is read correctly ret is True
# if not ret:
# print("Can't receive frame (stream end?). Exiting ...")
# break
# count += 1
# if count < 2000:
# continue
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# print(count)
# cv2.imshow('frame', gray[::2,::2])
# if cv2.waitKey(1) == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
# Test code
def main():
# import left and right tracks into a pandas dataframe
infile = "../../data/2024.11.13_0_converted_right_undistorted_fixed_pose.json"
outfile = "../../data/right_tracks.csv"
converter = Converter(infile, outfile)
converter.run()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, force=True)
main()

246
fixtracks/utils/merger.py Normal file
View File

@ -0,0 +1,246 @@
import pickle
import logging
import numpy as np
import pandas as pd
from PySide6.QtCore import QRunnable, Slot
from fixtracks.utils.signals import ProducerSignals
class Merger(QRunnable):
def __init__(self, left_data, right_data, left_cut, right_cut) -> None:
super().__init__()
self._signals = ProducerSignals()
self._left_data = left_data
self._right_data = right_data
self._left_cut = left_cut
self._right_cut = right_cut
self._result = None
self._stopRequest = False
self._merged = None
self._current_task = ""
self._mergeprogress = 0.0
for df in [self._left_data, self._right_data]:
if not self.check_dataframe(df):
self.signals.error.emit("Merger.__init__: Error checking DataFrame structure!")
def check_dataframe(self, df):
"""Perform some sanity checks on the dataframe.
Parameters
----------
df : pandas.DataFrame
the DataFrame conataining the detections of the left or right camera.
Returns
-------
bool
True, if everything's all right, False otherwise.
"""
return True
def to_numpy(self, df):
"""Convert some columns of the DataFrame to numpy arrays.
Parameters
----------
df : pandas.DataFrame
The DataFrame containing the detections.
Returns
-------
numpy.ndarray
3D array containing the x,y coordinates of each detection in each frame. Shape (num_detections, num_keypoints, 2)
numpy.ndarray
2D array with visibility score for each of the keypoints in each frame. Shape (num_detections, num_keypoints)
numpy.ndarray
2D array, Coordinates of the bounding box for each detection. Shape: (num_detections, 4) x1, y1, x2, y2
"""
logging.info("Converting to numpy ...")
key_columns = [c for c in df.columns if "key_" in c]
box_columns = [c for c in df.columns if "box_" in c]
num_frames = len(df)
num_keypoints = len(key_columns)
dimensions = 2
keypoints = np.empty((num_frames, num_keypoints, dimensions))
visibility = np.empty((num_frames, num_keypoints))
boxcoordinates = np.empty((num_frames, 4))
for i, row in df.iterrows():
for j, k in enumerate(key_columns):
key_data = row[k]
l = list(map(float, list(key_data[1:-1].split(","))))
keypoints[i, j, :] = l
for j, b in enumerate(box_columns):
boxcoordinates[i, j] = row[b]
if isinstance(row["visible"], str):
vis = list(map(float, row["visible"][1:-1].split()))
visibility[i, :] = vis
else:
visibility[i, :] = row["visible"]
logging.info("Converting to numpy done!")
return keypoints, visibility, boxcoordinates
def sort_detections(self, keypoints, threshold, left=True):
"""Categorize the detections into those that are easy (not in the visual overlap zone) and those that are tricky, i.e. right across the threshold.
Detections beyond threshold are ignored, those across the threshold need to be treated separately.
Parameters
----------
keypoints : np.ndarray
3d array of keypoint coordinates (num detections, num keypoints, (x,y))
threshold : int
the threshold line at which the data should be merged
left : bool, optional
whether or not the data is from the left side, controls how the threshold is interpreted, by default True
Returns
-------
np.ndarray
The indices of the easy detections
np.ndarray
The tricky detections
"""
logging.info("Sorting detections")
if left:
easyindeces = np.where(np.all(keypoints[:,:,0] < threshold, axis=1))[0]
trickyindices = np.where(np.any((keypoints[:,:,0] >= threshold) &
(keypoints[:,:,0] < threshold), axis=1))[0]
else:
easyindeces = np.where(np.all(keypoints[:,:,0] >= threshold, axis=1))[0]
trickyindices = np.where(np.any((keypoints[:,:,0] < threshold) &
(keypoints[:,:,0] >= threshold), axis=1))[0]
return easyindeces, trickyindices
def select_and_transform(self, df, keypoints, boxes, quality, frames, valid_detections,
left_threshold=None, right_threshold=None):
keypoints = keypoints[valid_detections, :, :]
boxes = boxes[valid_detections, :]
quality = quality[valid_detections, :]
frames = frames[valid_detections]
df = df.iloc[valid_detections]
if all([left_threshold, right_threshold]):
keypoints[:, :, 0] += (left_threshold - right_threshold)
boxes[:, [0, 2]] += (left_threshold - right_threshold)
return df, keypoints, quality, boxes, frames
def to_dataframe(self, old_left, old_right, lkeypoints, rkeypoints, lboxes, rboxes,
lqualities, rqualities, lframes, rframes):
frames = np.concatenate([lframes, rframes])
sorting = np.argsort(frames)
frames = frames[sorting]
confidences = np.concatenate([old_left.confidence.values, old_right.confidence.values])
confidences = confidences[sorting]
classes = np.concatenate([old_left.cls.values, old_right.cls.values])
classes = classes[sorting]
names = np.concatenate([old_left.name.values, old_right.name.values])
names = names[sorting]
keypoints = np.concatenate([lkeypoints, rkeypoints], axis=0)
keypoints = keypoints[sorting, :, :]
boxes = np.concatenate([lboxes, rboxes], axis=0)
boxes = boxes[sorting, :]
qualities = np.concatenate([lqualities, rqualities], axis=0)
qualities = qualities[sorting, :]
tracks = np.concatenate([old_left.track_id.values, old_right.track_id.values], axis=0)
tracks = tracks[sorting]
# sort before converting to df
q = []; b=[]; k = []
for i in range(len(frames)):
q.append(qualities[i, :])
b.append(boxes[i, :])
k.append(keypoints[i, :])
d = {"frame":frames, "cls": classes, "name":names, "keypoint_score": q, "track": tracks,
"keypoints": k, "box":b, "confidence":confidences}
df = pd.DataFrame(d)
return df
def save(self, filename):
if self._merged is None:
logging.error("Saving/pickling merged dataFrame is None!")
return
logging.info("Saving/pickling merged file to %s" % filename)
with open(filename, 'wb') as f:
pickle.dump(self._merged, f)
@Slot()
def stop_request(self):
self._stopRequest = True
@Slot()
def run(self):
logging.info("Cutting left detections to limit %i", self._left_cut)
self.signals.progress.emit(0.0)
self.signals.progress2.emit("Merging", self._mergeprogress, 0.)
if not self.check_dataframe(self._left_data) or not self.check_dataframe(self._right_data):
logging.error("Left or right dataframe structure does not match my expectations")
return None
self.signals.progress.emit(0.05)
if not self._stopRequest:
logging.info("Converting to numpy... %s", "Left camera")
lkeypoints, lquality, lbox = self.to_numpy(self._left_data)
lframes = self._left_data.frame.values
self.signals.progress.emit(0.3)
else:
self.signals.finished(False)
return
if not self._stopRequest:
logging.info("Converting to numpy... %s", "Right camera")
rkeypoints, rquality, rbox = self.to_numpy(self._right_data)
rframes = self._right_data.frame.values
self.signals.progress.emit(0.6)
else:
self.signals.finished(False)
return
logging.info("Filtering detections")
left_easy, _ = self.sort_detections(lkeypoints, self._left_cut, left=True)
right_easy, _ = self.sort_detections(rkeypoints, self._right_cut, left=False)
self.signals.progress.emit(0.7)
logging.info("Merging and transformation")
ldf, lkeypoints, lquality, lboxes, lframes = self.select_and_transform(self._left_data, lkeypoints, lbox,
lquality, lframes, left_easy)
self.signals.progress.emit(0.8)
rdf, rkeypoints, rquality, rboxes, rframes = self.select_and_transform(self._right_data, rkeypoints, rbox,
rquality, rframes, right_easy,
self._left_cut, self._right_cut)
self.signals.progress.emit(0.9)
if not self._stopRequest:
self._merged = self.to_dataframe(ldf, rdf, lkeypoints, rkeypoints, lboxes, rboxes, lquality, rquality,
lframes, rframes)
self.signals.progress.emit(1.0)
else:
self.signals.finished(False)
return
logging.info("Merging done!")
self._signals.finished.emit(True and (not self._stopRequest))
@property
def signals(self):
return self._signals
@property
def result(self):
return self._result
# TEST code
def main():
logging.info("Loading data left")
left = pd.read_csv("../data/left_tracks.csv", sep=";", index_col=0)
logging.info("Loading data right")
right = pd.read_csv("../data/right_tracks.csv", sep=";", index_col=0)
# merge_detections(left, right, 2000, 300)
merger = Merger(left, right, 2000, 300 )
merger.run()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, force=True)
main()

78
fixtracks/utils/reader.py Normal file
View File

@ -0,0 +1,78 @@
import logging
import cv2 as cv
import pandas as pd
from PySide6.QtCore import QRunnable, Signal, Slot
from fixtracks.utils.signals import ProducerSignals
class ImageReader(QRunnable):
finished = Signal(bool)
def __init__(self, filename, frame=1000) -> None:
super().__init__()
self._filename = filename
self._framenumber = frame
self._signals = ProducerSignals()
self._frame = None
@Slot()
def run(self):
'''
Your code goes in this function
'''
logging.debug("ImageReader: trying to open file %s", self._filename)
cap = cv.VideoCapture(self._filename)
framecount = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
if self._framenumber >= framecount:
logging.warning("ImageReader: desired frame number (%i) exceeds the frame count (%i)! Redefined to frame count." % (self._framenumber, framecount))
if not cap.isOpened():
logging.debug("ImageReader: failed to open file %s", self._filename)
self._signals.finished.emit(False)
fn = 0
while cap.isOpened() and fn < self._framenumber:
ret, frame = cap.read()
if not ret:
logging.warning("ImageReader: failed to read frame %i", fn)
self._signals.finished.emit(False)
break
fn += 1
self._frame = frame # cv.cvtColor(frame, cv.COLOR_BGR2RGB)
cap.release()
self._signals.finished.emit(True)
@property
def signals(self):
return self._signals
@property
def frame(self):
return self._frame
class DataFrameReader(QRunnable):
finished = Signal(bool)
def __init__(self, filename) -> None:
super().__init__()
self._filename = filename
self._signals = ProducerSignals()
self._dataframe = None
@Slot()
def run(self):
'''
Your code goes in this function
'''
logging.debug("DataFrameReader: trying to open file %s", self._filename)
self._dataframe = pd.read_csv(self._filename, sep=";", index_col=0)
self._signals.finished.emit(True)
@property
def signals(self):
return self._signals
@property
def dataframe(self):
return self._dataframe

View File

@ -0,0 +1,9 @@
from PySide6.QtCore import Signal, QObject
class ProducerSignals(QObject):
finished = Signal(bool)
error = Signal(str)
# start = pyqtSignal(float)
# running = pyqtSignal()
progress = Signal(float)
progress2 = Signal((str, float, float))

View File

View File

@ -5,6 +5,7 @@ from PySide6.QtCore import Qt
from fixtracks.widgets.taskwidgets import FixTracks
from fixtracks.widgets.detectionmerge import MergeDetections
from fixtracks.widgets.taskwidget import TasksWidget
from fixtracks.widgets.converter import Json2PandasConverter
class CentralWidget(QWidget):
@ -14,13 +15,19 @@ class CentralWidget(QWidget):
self._tw = TasksWidget()
self._tw.clicked.connect(self._select_task)
self._mergewidget = MergeDetections(self)
self._mergewidget.back.connect(self._on_back)
self._convertwidget = Json2PandasConverter(self)
self._convertwidget.back.connect(self._on_back)
self._trackwidget = FixTracks(self)
layout = QStackedLayout()
layout.setAlignment(Qt.AlignmentFlag.AlignCenter)
self._tasksindex = layout.addWidget(self._tw)
self._convertindex = layout.addWidget(self._convertwidget)
self._mergeindex = layout.addWidget(self._mergewidget)
self._trackindex = layout.addWidget(self._trackwidget)
self.setLayout(layout)
@ -33,6 +40,8 @@ class CentralWidget(QWidget):
elif "tracks" in s.lower():
self.layout().setCurrentIndex(self._trackindex)
# self._trackwidget.fileList = self._tw.fileList
elif "convert" in s.lower():
self.layout().setCurrentIndex(self._convertindex)
else:
self.layout().setCurrentIndex(self._tasksindex)
logging.warning("Centralwidget: got invalid task request: %s", s)

View File

@ -0,0 +1,115 @@
import logging
from PySide6.QtCore import Qt, QThreadPool, Signal, Slot, QSize
from PySide6.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QSizePolicy, QSpacerItem, QProgressDialog, QFileDialog
from fixtracks.utils.converter import Converter
class Json2PandasConverter(QWidget):
back = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self._infile = None
self._outfile = None
self._converter = None
self._progressDialog = None
self.threadpool = QThreadPool()
self._convertBtn = QPushButton("Convert")
self._convertBtn.setEnabled(False)
self._convertBtn.setToolTip("Convert data to pandas DataFrame")
self._convertBtn.clicked.connect(self.on_convert)
self._backBtn = QPushButton("Back")
self._backBtn.clicked.connect(self.on_back)
btnBox = QHBoxLayout()
btnBox.setAlignment(Qt.AlignmentFlag.AlignLeft)
btnBox.addWidget(self._backBtn)
btnBox.addItem(QSpacerItem(100, 10, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed))
btnBox.addWidget(self._convertBtn)
layout = QVBoxLayout()
layout.setAlignment(Qt.AlignmentFlag.AlignVCenter)
layout.addWidget(QLabel("Convert JSON to Pandas dataframe"))
layout.addLayout(self.layout_controls())
layout.addLayout(btnBox)
self.setLayout(layout)
def layout_controls(self):
lyt = QHBoxLayout()
inputBtn = QPushButton("input")
inputBtn.setMinimumSize(QSize(100, 100))
inputBtn.clicked.connect(self.on_inputSelect)
outputBtn = QPushButton("output")
outputBtn.setMinimumSize(QSize(100, 100))
outputBtn.clicked.connect(self.on_outputSelect)
lyt.addWidget(inputBtn)
lyt.addWidget(outputBtn)
return lyt
def check_buttons(self):
if self._infile is not None and self._outfile is not None:
self._convertBtn.setEnabled(True)
def on_convert(self):
logging.info("Converting started")
if self._converter is not None:
self._converter = None
self._converter = Converter(self._infile, self._outfile)
self._progressDialog = QProgressDialog(parent=self)
self._progressDialog.setAutoClose(True)
self._progressDialog.setRange(0, 100)
self._progressDialog.setLabelText("Converting pose data (will take a while, be patient):")
self._progressDialog.setCancelButtonText("Cancel")
self._progressDialog.setWindowModality(Qt.WindowModality.WindowModal)
self._progressDialog.canceled.connect(self.on_convertCancelled)
self._progressDialog.show()
self._converter.signals.progress.connect(self.on_convertProgress)
self._converter.signals.finished.connect(self.on_convertDone)
self.threadpool.start(self._converter)
@Slot(float)
def on_convertProgress(self, value):
logging.debug(f"Conversion progress: {value * 100}%")
if self._progressDialog is not None:
self._progressDialog.setValue(int(value * 100))
@Slot()
def on_convertCancelled(self):
logging.info("Cancel Button pressed! Requesting stop of converter")
self._converter.stop_request()
def on_back(self):
logging.debug("Back button pressed!")
self._infile = None
self._outfile = None
self._convertBtn.setEnabled(False)
self.back.emit()
def on_inputSelect(self):
file_dialog = QFileDialog(self, "Select JSON File", "", "JSON Files (*.json);;All Files (*)")
if file_dialog.exec():
self._infile = file_dialog.selectedFiles()[0]
else:
self._infile = None
self.check_buttons()
def on_outputSelect(self):
file_dialog = QFileDialog(self, "Select CSV File", "", "CSV Files (*.csv);;All Files (*)")
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if file_dialog.exec():
file_path = file_dialog.selectedFiles()[0]
if not file_path.endswith(".csv"):
file_path += ".csv"
self._outfile = file_path
else:
self._outfile = None
self.check_buttons()
@Slot(bool)
def on_convertDone(self, state):
logging.debug("Conversion stopped with status %s", state)

View File

@ -23,7 +23,7 @@ class TasksWidget(QWidget):
self.convertBtn = QPushButton()
self.convertBtn.setIcon(QIcon(":/icons/convert"))
self.convertBtn.setToolTip("Convert JSON pose files to to Pandas data frames")
self.convertBtn.setEnabled(False)
self.convertBtn.setEnabled(True)
self.convertBtn.setFixedSize(250, 100)
self.convertBtn.setIconSize(0.95 * self.convertBtn.size())
self.convertBtn.clicked.connect(self._convert_clicked)