Compare commits
7 Commits
Author | SHA1 | Date | |
---|---|---|---|
08a4abda62 | |||
af9444ab26 | |||
73925ffd1d | |||
0488ca6e64 | |||
8199fe4c7f | |||
5b3540e304 | |||
bf2c2d21a2 |
@ -28,7 +28,7 @@ If you leave away the ```--user``` the package will be installed system-wide.
|
||||
|
||||
## TrackingResults
|
||||
|
||||
Is a class that wraps around the *.h5 files written by DeppLabCut
|
||||
Is a class that wraps around the *.h5 files written by DeepLabCut
|
||||
|
||||
## ImageMarker
|
||||
|
||||
|
@ -1,2 +1,3 @@
|
||||
from .image_marker import ImageMarker, MarkerTask
|
||||
from .tracking_result import TrackingResult
|
||||
from .tracking_result import TrackingResult
|
||||
from .distance_calibration import DistanceCalibration
|
193
etrack/calibration_functions.py
Normal file
193
etrack/calibration_functions.py
Normal file
@ -0,0 +1,193 @@
|
||||
from turtle import left
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from IPython import embed
|
||||
from etrack import MarkerTask, ImageMarker
|
||||
|
||||
|
||||
def mark_crop_positions(self):
|
||||
task = MarkerTask("crop area", ["bottom left corner", "top left corner", "top right corner", "bottom right corner"], "Mark crop area")
|
||||
im = ImageMarker([task])
|
||||
|
||||
marker_positions = im.mark_movie(file_name, frame_number)
|
||||
print(marker_positions)
|
||||
|
||||
np.save('marker_positions', marker_positions)
|
||||
|
||||
return marker_positions
|
||||
|
||||
|
||||
def assign_marker_positions(marker_positions):
|
||||
bottom_left_x = marker_positions[0]['bottom left corner'][0]
|
||||
bottom_left_y = marker_positions[0]['bottom left corner'][1]
|
||||
bottom_right_x = marker_positions[0]['bottom right corner'][0]
|
||||
bottom_right_y = marker_positions[0]['bottom right corner'][1]
|
||||
top_left_x = marker_positions[0]['top left corner'][0]
|
||||
top_left_y = marker_positions[0]['top left corner'][1]
|
||||
top_right_x = marker_positions[0]['top right corner'][0]
|
||||
top_right_y = marker_positions[0]['top right corner'][1]
|
||||
return bottom_left_x, bottom_left_y, bottom_right_x, bottom_right_y, top_left_x, top_left_y, top_right_x, top_right_y
|
||||
|
||||
|
||||
def assign_checkerboard_positions(checkerboard_marker_positions):
|
||||
checkerboard_top_right = checkerboard_marker_positions[0]['top right corner']
|
||||
checkerboard_top_left = checkerboard_marker_positions[0]['top left corner']
|
||||
checkerboard_bottom_right = checkerboard_marker_positions[0]['bottom right corner']
|
||||
checkerboard_bottom_left = checkerboard_marker_positions[0]['bottom left corner']
|
||||
return checkerboard_top_right, checkerboard_top_left, checkerboard_bottom_right, checkerboard_bottom_left
|
||||
|
||||
|
||||
def crop_frame(frame, marker_positions):
|
||||
|
||||
# load the four marker positions
|
||||
bottom_left_x, bottom_left_y, bottom_right_x, bottom_right_y, top_left_x, top_left_y, top_right_x, top_right_y = assign_marker_positions(marker_positions)
|
||||
|
||||
# define boundaries of frame, taken by average of points on same line but slightly different pixel values
|
||||
left_bound = int(np.mean([bottom_left_x, top_left_x]))
|
||||
right_bound = int(np.mean([bottom_right_x, top_right_x]))
|
||||
top_bound = int(np.mean([top_left_y, top_right_y]))
|
||||
bottom_bound = int(np.mean([bottom_left_y, bottom_right_y]))
|
||||
|
||||
# crop the frame by boundary values
|
||||
cropped_frame = frame[top_bound:bottom_bound, left_bound:right_bound]
|
||||
cropped_frame = np.mean(cropped_frame, axis=2) # mean over 3rd dimension (RGB/color values)
|
||||
|
||||
# mean over short or long side of the frame corresponding to x or y axis of picture
|
||||
frame_width = np.mean(cropped_frame,axis=0)
|
||||
frame_height = np.mean(cropped_frame,axis=1)
|
||||
|
||||
# differences of color values lying next to each other --> derivation
|
||||
diff_width = np.diff(frame_width)
|
||||
diff_height = np.diff(frame_height)
|
||||
|
||||
# two x vectors for better plotting
|
||||
x_width = np.arange(0, len(diff_width), 1)
|
||||
x_height = np.arange(0, len(diff_height), 1)
|
||||
|
||||
return cropped_frame, frame_width, frame_height, diff_width, diff_height, x_width, x_height
|
||||
|
||||
def rotation_angle():
|
||||
pass
|
||||
|
||||
|
||||
def threshold_crossings(data, threshold_factor):
|
||||
# upper and lower threshold
|
||||
median_data = np.median(data)
|
||||
median_lower = median_data + np.min(data)
|
||||
median_upper = np.max(data) - median_data
|
||||
lower_threshold = median_lower / threshold_factor
|
||||
upper_threshold = median_upper / threshold_factor
|
||||
|
||||
# array with values if data >/< than threshold = True or not
|
||||
lower_crossings = np.diff(data < lower_threshold, prepend=False) # prepend: point after crossing
|
||||
upper_crossings = np.diff(data > upper_threshold, append=False) # append: point before crossing
|
||||
|
||||
# indices where crossings are
|
||||
lower_crossings_indices = np.argwhere(lower_crossings)
|
||||
upper_crossings_indices = np.argwhere(upper_crossings)
|
||||
|
||||
# sort out several crossings of same edge of checkerboard (due to noise)
|
||||
half_window_size = 10
|
||||
lower_peaks = []
|
||||
upper_peaks = []
|
||||
for lower_idx in lower_crossings_indices: # for every lower crossing..
|
||||
if lower_idx < half_window_size: # ..if indice smaller than window size near indice 0
|
||||
half_window_size = lower_idx
|
||||
lower_window = data[lower_idx[0] - int(half_window_size):lower_idx[0] + int(half_window_size)] # create data window from -window_size to +window_size
|
||||
min_window = np.min(lower_window) # take minimum of window
|
||||
min_idx = np.where(data == min_window) # find indice where minimum is
|
||||
|
||||
lower_peaks.append(min_idx) # append to list
|
||||
for upper_idx in upper_crossings_indices: # same for upper crossings with max of window
|
||||
if upper_idx < half_window_size:
|
||||
half_window_size = upper_idx
|
||||
upper_window = data[upper_idx[0] - int(half_window_size) : upper_idx[0] + int(half_window_size)]
|
||||
|
||||
max_window = np.max(upper_window)
|
||||
max_idx = np.where(data == max_window)
|
||||
upper_peaks.append(max_idx)
|
||||
|
||||
# if several crossings create same peaks due to overlapping windows, only one (unique) will be taken
|
||||
lower_peaks = np.unique(lower_peaks)
|
||||
upper_peaks = np.unique(upper_peaks)
|
||||
|
||||
return lower_peaks, upper_peaks
|
||||
|
||||
|
||||
def checkerboard_position(lower_crossings_indices, upper_crossings_indices):
|
||||
"""Take crossing positions to generate a characteristic sequence for a corresponding position of the checkerboard inside the frame.
|
||||
Positional description has to be interpreted depending on the input data.
|
||||
|
||||
Args:
|
||||
lower_crossings_indices: Indices where lower threshold was crossed by derivation data.
|
||||
upper_crossings_indices: Indices where upper threshold was crossed by derivation data
|
||||
|
||||
Returns:
|
||||
checkerboard_position: General position where the checkerboard lays inside the frame along the axis of the input data.
|
||||
"""
|
||||
|
||||
# create zipped list with both indices
|
||||
zip_list = []
|
||||
for zl in lower_crossings_indices:
|
||||
zip_list.append(zl)
|
||||
for zu in upper_crossings_indices:
|
||||
zip_list.append(zu)
|
||||
|
||||
zip_list = np.sort(zip_list) # order by indice
|
||||
|
||||
# compare and assign zipped list to original indices lists and corresponding direction (to upper or lower threshold)
|
||||
sequence = []
|
||||
for z in zip_list:
|
||||
if z in lower_crossings_indices:
|
||||
sequence.append('down')
|
||||
else:
|
||||
sequence.append('up')
|
||||
print('sequence:', sequence)
|
||||
|
||||
# depending on order of crossings through upper or lower treshold, we get a characteristic sequence for a position of the checkerboard in the frame
|
||||
if sequence == ['up', 'down', 'up', 'down']: # first down, second up are edges of checkerboard
|
||||
print('in middle')
|
||||
checkerboard_position = 'middle'
|
||||
left_checkerboard_edge = zip_list[1]
|
||||
right_checkerboard_edge = zip_list[2]
|
||||
elif sequence == ['up', 'up', 'down']: # first and second up are edges of checkerboard
|
||||
print('at left')
|
||||
checkerboard_position = 'left'
|
||||
left_checkerboard_edge = zip_list[0]
|
||||
right_checkerboard_edge = zip_list[1]
|
||||
else: # first and second down are edges of checkerboard
|
||||
print('at right')
|
||||
checkerboard_position = 'right'
|
||||
left_checkerboard_edge = zip_list[1]
|
||||
right_checkerboard_edge = zip_list[2]
|
||||
|
||||
return checkerboard_position, left_checkerboard_edge, right_checkerboard_edge # position of checkerboard then will be returned
|
||||
|
||||
|
||||
def filter_data(data, n):
|
||||
"""Filter/smooth data with kernel of length n.
|
||||
|
||||
Args:
|
||||
data: Raw data.
|
||||
n: Number of datapoints the mean gets computed over.
|
||||
|
||||
Returns:
|
||||
filtered_data: Filtered data.
|
||||
"""
|
||||
new_data = np.zeros(len(data)) # empty vector where data will be put in in the following steps
|
||||
for k in np.arange(0, len(data) - n):
|
||||
kk = int(k)
|
||||
f = np.mean(data[kk:kk+n]) # mean over data over window from kk to kk+n
|
||||
kkk = int(kk+n / 2) # position where mean datapoint will be placed (so to say)
|
||||
if k == 0:
|
||||
new_data[:kkk] = f
|
||||
new_data[kkk] = f # assignment of value to datapoint
|
||||
new_data[kkk:] = f
|
||||
for nd in new_data[0:n-1]: # correction of left boundary effects (boundaries up to length of n were same number)
|
||||
nd_idx = np.argwhere(nd)
|
||||
new_data[nd_idx] = data[nd_idx]
|
||||
for nd in new_data[-1 - (n-1):-1]: # same as above, correction of right boundary effect
|
||||
nd_idx = np.argwhere(nd)
|
||||
new_data[nd_idx] = data[nd_idx]
|
||||
|
||||
return new_data
|
258
etrack/distance_calibration.py
Normal file
258
etrack/distance_calibration.py
Normal file
@ -0,0 +1,258 @@
|
||||
from multiprocessing import allow_connection_pickling
|
||||
from turtle import left
|
||||
from xml.dom.expatbuilder import FILTER_ACCEPT
|
||||
from cv2 import MARKER_TRIANGLE_UP, calibrationMatrixValues, mean, threshold
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import cv2
|
||||
import os
|
||||
import sys
|
||||
import glob
|
||||
from IPython import embed
|
||||
from calibration_functions import *
|
||||
|
||||
|
||||
|
||||
class DistanceCalibration():
|
||||
|
||||
def __init__(self, file_name, frame_number, x_0=154, y_0=1318, cam_dist=1.36, tank_width=1.35, tank_height=0.805, width_pixel=1900, height_pixel=200,
|
||||
checkerboard_width=0.24, checkerboard_height=0.18, checkerboard_width_pixel=500, checkerboard_height_pixel=350) -> None:
|
||||
super().__init__()
|
||||
|
||||
self._file_name = file_name
|
||||
self._x_0 = x_0
|
||||
self._y_0 = y_0
|
||||
self._width_pix = width_pixel
|
||||
self._height_pix = height_pixel
|
||||
self._cam_dist = cam_dist
|
||||
self._tank_width = tank_width
|
||||
self._tank_height = tank_height
|
||||
self._cb_width = checkerboard_width
|
||||
self._cb_height = checkerboard_height
|
||||
self._cb_width_pix = checkerboard_width_pixel
|
||||
self._cb_height_pix = checkerboard_height_pixel
|
||||
self._x_factor = tank_width / width_pixel # m/pix
|
||||
self._y_factor = tank_height / height_pixel # m/pix
|
||||
|
||||
self.distance_factor_calculation
|
||||
self.mark_crop_positions
|
||||
|
||||
# if needed include setter: @y_0.setter def y_0(self, value): self._y_0 = value
|
||||
@property
|
||||
def x_0(self):
|
||||
return self._x_0
|
||||
|
||||
@property
|
||||
def y_0(self):
|
||||
return self._y_0
|
||||
|
||||
@property
|
||||
def cam_dist(self):
|
||||
return self._cam_dist
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
return self._width
|
||||
|
||||
@property
|
||||
def height(self):
|
||||
return self._height
|
||||
|
||||
@property
|
||||
def width_pix(self):
|
||||
return self._width_pix
|
||||
|
||||
@property
|
||||
def height_pix(self):
|
||||
return self._height_pix
|
||||
|
||||
@property
|
||||
def cb_width(self):
|
||||
return self._cb_width
|
||||
|
||||
@property
|
||||
def cb_height(self):
|
||||
return self._cb_height
|
||||
|
||||
@property
|
||||
def x_factor(self):
|
||||
return self._x_factor
|
||||
|
||||
@property
|
||||
def y_factor(self):
|
||||
return self._y_factor
|
||||
|
||||
|
||||
def mark_crop_positions(self):
|
||||
task = MarkerTask("crop area", ["bottom left corner", "top left corner", "top right corner", "bottom right corner"], "Mark crop area")
|
||||
im = ImageMarker([task])
|
||||
|
||||
marker_positions = im.mark_movie(file_name, frame_number)
|
||||
print(marker_positions)
|
||||
|
||||
np.save('marker_positions', marker_positions)
|
||||
|
||||
return marker_positions
|
||||
|
||||
|
||||
def detect_checkerboard(self, filename, frame_number, marker_positions):
|
||||
# load frame
|
||||
if not os.path.exists(filename):
|
||||
raise IOError("file %s does not exist!" % filename)
|
||||
video = cv2.VideoCapture()
|
||||
video.open(filename)
|
||||
frame_counter = 0
|
||||
success = True
|
||||
frame = None
|
||||
|
||||
while success and frame_counter <= frame_number: # iterating until frame_counter == frame_number --> success (True)
|
||||
print("Reading frame: %i" % frame_counter, end="\r")
|
||||
success, frame = video.read()
|
||||
frame_counter += 1
|
||||
|
||||
marker_positions = np.load('marker_positions.npy', allow_pickle=True) # load saved numpy marker positions file
|
||||
|
||||
# care: y-axis is inverted, top values are low, bottom values are high
|
||||
|
||||
cropped_frame, frame_width, frame_height, diff_width, diff_height, _, _ = crop_frame(frame, marker_positions) # crop frame to given marker positions
|
||||
|
||||
bottom_left_x = 0
|
||||
bottom_left_y = np.shape(cropped_frame)[0]
|
||||
bottom_right_x = np.shape(cropped_frame)[1]
|
||||
bottom_right_y = np.shape(cropped_frame)[0]
|
||||
top_left_x = 0
|
||||
top_left_y = 0
|
||||
top_right_x = np.shape(cropped_frame)[1]
|
||||
top_right_y = 0
|
||||
|
||||
cropped_marker_positions = [{'bottom left corner': (bottom_left_x, bottom_left_y), 'top left corner': (top_left_x, top_left_y),
|
||||
'top right corner': (top_right_x, top_right_y), 'bottom right corner': (bottom_right_x, bottom_right_y)}]
|
||||
|
||||
thresh_fact = 7 # factor by which the min/max is divided to calculate the upper and lower thresholds
|
||||
|
||||
# filtering/smoothing of data using kernel with n datapoints
|
||||
kernel = 4
|
||||
diff_width = filter_data(diff_width, n=kernel) # for widht (x-axis)
|
||||
diff_height = filter_data(diff_height, n=kernel) # for height (y-axis)
|
||||
|
||||
# input data is derivation of color values of frame
|
||||
lci_width, uci_width = threshold_crossings(diff_width, threshold_factor=thresh_fact) # threshold crossings (=edges of checkerboard) for width (x-axis)
|
||||
lci_height, uci_height = threshold_crossings(diff_height, threshold_factor=thresh_fact) # ..for height (y-axis)
|
||||
|
||||
print('lower crossings:', lci_width)
|
||||
print('upper crossings:', uci_width)
|
||||
|
||||
# position of checkerboard in width
|
||||
print('width..')
|
||||
width_position, left_width_position, right_width_position = checkerboard_position(lci_width, uci_width)
|
||||
|
||||
# position of checkerboard in height
|
||||
print('height..')
|
||||
height_position, left_height_position, right_height_position = checkerboard_position(lci_height, uci_height) # left height refers to top, right height to bottom
|
||||
|
||||
if width_position == 'left' and height_position == 'left':
|
||||
checkerboard_position_tank = 'top left'
|
||||
elif width_position == 'left' and height_position == 'right':
|
||||
checkerboard_position_tank = 'bottom left'
|
||||
elif width_position == 'right' and height_position == 'right':
|
||||
checkerboard_position_tank = 'bottom right'
|
||||
elif width_position == 'right' and height_position == 'left':
|
||||
checkerboard_position_tank = 'top right'
|
||||
else:
|
||||
checkerboard_position_tank = 'middle'
|
||||
|
||||
print(checkerboard_position_tank)
|
||||
|
||||
# final corner positions of checkerboard
|
||||
checkerboard_marker_positions = [{'bottom left corner': (left_width_position, right_height_position), 'top left corner': (left_width_position, left_height_position),
|
||||
'top right corner': (right_width_position, left_height_position), 'bottom right corner': (right_width_position, right_height_position)}]
|
||||
|
||||
print(checkerboard_marker_positions)
|
||||
|
||||
checkerboard_top_right, checkerboard_top_left, checkerboard_bottom_right, checkerboard_bottom_left = assign_checkerboard_positions(checkerboard_marker_positions)
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
ax.imshow(cropped_frame)
|
||||
for p in checkerboard_top_left, checkerboard_top_right, checkerboard_bottom_left, checkerboard_bottom_right:
|
||||
ax.scatter(p[0], p[1])
|
||||
ax.scatter(bottom_left_x, bottom_left_y)
|
||||
ax.scatter(bottom_right_x, bottom_right_y)
|
||||
ax.scatter(top_left_x, top_left_y)
|
||||
ax.scatter(top_right_x, top_right_y)
|
||||
plt.show()
|
||||
|
||||
|
||||
return checkerboard_marker_positions, cropped_marker_positions, checkerboard_position_tank
|
||||
|
||||
|
||||
def distance_factor_calculation(self, checkerboard_marker_positions, marker_positions):
|
||||
|
||||
checkerboard_top_right, checkerboard_top_left, checkerboard_bottom_right, checkerboard_bottom_left = assign_checkerboard_positions(checkerboard_marker_positions)
|
||||
|
||||
checkerboard_width = 0.24
|
||||
checkerboard_height = 0.18
|
||||
|
||||
checkerboard_width_pixel = checkerboard_top_right[0] - checkerboard_top_left[0]
|
||||
checkerboard_height_pixel = checkerboard_bottom_right[1] - checkerboard_top_right[1]
|
||||
|
||||
x_factor = checkerboard_width / checkerboard_width_pixel
|
||||
y_factor = checkerboard_height / checkerboard_height_pixel
|
||||
|
||||
bottom_left_x, bottom_left_y, bottom_right_x, bottom_right_y, top_left_x, top_left_y, top_right_x, top_right_y = assign_marker_positions(marker_positions)
|
||||
|
||||
tank_width_pixel = np.mean([bottom_right_x - bottom_left_x, top_right_x - top_left_x])
|
||||
tank_height_pixel = np.mean([bottom_left_y - top_left_y, bottom_right_y - top_right_y])
|
||||
|
||||
tank_width = tank_width_pixel * x_factor
|
||||
tank_height = tank_height_pixel * y_factor
|
||||
|
||||
print(tank_width, tank_height)
|
||||
|
||||
return x_factor, y_factor
|
||||
|
||||
|
||||
def distance_factor_interpolation(x_factors, y_factors):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
all_x_factor = []
|
||||
all_y_factor = []
|
||||
all_checkerboard_position_tank = []
|
||||
for file_name in glob.glob("/home/efish/etrack/videos/*"):
|
||||
# file_name = "/home/efish/etrack/videos/2022.03.28_4.mp4"
|
||||
frame_number = 10
|
||||
dc = DistanceCalibration(file_name=file_name, frame_number=frame_number)
|
||||
|
||||
dc.mark_crop_positions()
|
||||
|
||||
checkerboard_marker_positions, cropped_marker_positions, checkerboard_position_tank = dc.detect_checkerboard(file_name, frame_number=frame_number, marker_positions=np.load('marker_positions.npy', allow_pickle=True))
|
||||
|
||||
x_factor, y_factor = dc.distance_factor_calculation(checkerboard_marker_positions, marker_positions=cropped_marker_positions)
|
||||
|
||||
all_x_factor.append(x_factor)
|
||||
all_y_factor.append(y_factor)
|
||||
all_checkerboard_position_tank.append(checkerboard_position_tank)
|
||||
|
||||
x_factors = np.load('x_factors.npy')
|
||||
y_factors = np.load('y_factors.npy')
|
||||
all_checkerboard_position_tank = np.load('all_checkerboard_position_tank.npy')
|
||||
|
||||
|
||||
embed()
|
||||
quit()
|
||||
|
||||
# next up: distance calculation with angle
|
||||
# is this needed or are current videos enough?:
|
||||
# laying checkerboard at position directly above and below / left and right to centered checkerboard near edge of tank
|
||||
# calculating x and y factor for centered checkerboard, then for the ones at the edge
|
||||
# --> afterwards interpolate between them to have continuous factors for whole tank
|
||||
# maybe smaller object in tank to have more accurate factor
|
||||
|
||||
# make function to refine checkerboard detection at edges of tank by saying if no lower color values appears near edge --> checkerboard position then == corner of tank?
|
||||
#
|
||||
# mark_crop_positions why failing plot at end?
|
||||
# with rectangles of checkerboard?
|
||||
|
||||
# embed()
|
@ -1,4 +1,6 @@
|
||||
from cv2 import calibrationMatrixValues
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import cv2
|
||||
import os
|
||||
import sys
|
||||
@ -18,7 +20,20 @@ class ImageMarker:
|
||||
self._fig.canvas.mpl_connect('close_event', self._fig_close_event)
|
||||
self._fig.canvas.mpl_connect('key_press_event', self._key_press_event)
|
||||
|
||||
def mark_movie(self, filename, frame_number=0):
|
||||
def mark_movie(self, filename, frame_number=10):
|
||||
""" Interactive GUI to mark the corners of the tank. A specific frame of the video can be chosen. Returns marker positions.
|
||||
|
||||
Args:
|
||||
filename: Videofile
|
||||
frame_number (int, optional): Number of a frame in the videofile. Defaults to 0.
|
||||
|
||||
Raises:
|
||||
IOError: File does not exist.
|
||||
|
||||
Returns:
|
||||
marker_positions: Marker positions of tank corners.
|
||||
"""
|
||||
|
||||
if not os.path.exists(filename):
|
||||
raise IOError("file %s does not exist!" % filename)
|
||||
video = cv2.VideoCapture()
|
||||
@ -26,17 +41,17 @@ class ImageMarker:
|
||||
frame_counter = 0
|
||||
success = True
|
||||
frame = None
|
||||
while success and frame_counter <= frame_number:
|
||||
while success and frame_counter <= frame_number: # iterating until frame_counter == frame_number --> success (True)
|
||||
print("Reading frame: %i" % frame_counter, end="\r")
|
||||
success, frame = video.read()
|
||||
frame_counter += 1
|
||||
if success:
|
||||
self._fig.gca().imshow(frame)
|
||||
self._fig.gca().imshow(frame) # plot wanted frame of video
|
||||
else:
|
||||
print("Could not read frame number %i either failed to open movie or beyond maximum frame number!" % frame_number)
|
||||
return []
|
||||
plt.ion()
|
||||
plt.show(block=False)
|
||||
plt.ion() # turn on interactive mode
|
||||
plt.show(block=False) # block=False allows to continue interact in terminal while the figure is open
|
||||
|
||||
self._task_index = -1
|
||||
if len(self._tasks) > 0:
|
||||
@ -50,6 +65,7 @@ class ImageMarker:
|
||||
self._fig.gca().set_title("All set and done!\n Window will close in 2s")
|
||||
self._fig.canvas.draw()
|
||||
plt.pause(2.0)
|
||||
plt.close() #self._fig.gca().imshow(frame))
|
||||
return [t.marker_positions for t in self._tasks]
|
||||
|
||||
def _key_press_event(self, event):
|
||||
@ -141,11 +157,15 @@ class MarkerTask():
|
||||
if __name__ == "__main__":
|
||||
tank_task = MarkerTask("tank limits", ["bottom left corner", "top left corner", "top right corner", "bottom right corner"], "Mark tank corners")
|
||||
feeder_task = MarkerTask("Feeder positions", list(map(str, range(1, 2))), "Mark feeder positions")
|
||||
tasks = [tank_task, feeder_task]
|
||||
tasks = [tank_task] # feeder_task]
|
||||
im = ImageMarker(tasks)
|
||||
# vid1 = "2020.12.11_lepto48DLC_resnet50_boldnessDec11shuffle1_200000_labeled.mp4"
|
||||
print(sys.argv[0])
|
||||
print (sys.argv[1])
|
||||
vid1 = sys.argv[1]
|
||||
|
||||
vid1 = "/home/efish/efish_tracking/efish_tracking3-Xaver-2022-03-21/videos/2022.01.12_3DLC_resnet50_efish_tracking3Mar21shuffle1_300000_labeled.mp4"
|
||||
marker_positions = im.mark_movie(vid1, 10)
|
||||
print(marker_positions)
|
||||
print(marker_positions)
|
||||
|
||||
# print(sys.argv[0])
|
||||
# print (sys.argv[1])
|
||||
# vid1 = sys.argv[1]
|
||||
|
||||
embed()
|
@ -17,8 +17,19 @@ center_meter = ((center[0] - x_0) * x_factor, (center[1] - y_0) * y_factor)
|
||||
|
||||
class TrackingResult(object):
|
||||
|
||||
def __init__(self, results_file, x_0=0, y_0= 0, width_pixel=1230, height_pixel=1100, width_meter=0.81, height_meter=0.81) -> None:
|
||||
def __init__(self, results_file, x_0=0, y_0= 0, width_pixel=1975, height_pixel=1375, width_meter=0.81, height_meter=0.81) -> None:
|
||||
super().__init__()
|
||||
"""Width refers to the "x-axis" of the tank, height to the "y-axis" of it.
|
||||
|
||||
Args:
|
||||
results_file (_type_): Results file of the before done animal tracking.
|
||||
x_0 (int, optional): . Defaults to 95.
|
||||
y_0 (int, optional): _description_. Defaults to 185.
|
||||
width_pixel (int, optional): Width from one lightened corner of the tank to the other. Defaults to 1975.
|
||||
height_pixel (int, optional): Heigth from one lightened corner of the tank to the other. Defaults to 1375.
|
||||
width_meter (float, optional): Width of the tank in meter. Defaults to 0.81.
|
||||
height_meter (float, optional): Height of the tank in meter. Defaults to 0.81.
|
||||
"""
|
||||
if not os.path.exists(results_file):
|
||||
raise ValueError("File %s does not exist!" % results_file)
|
||||
self._file_name = results_file
|
||||
@ -28,36 +39,51 @@ class TrackingResult(object):
|
||||
self.width_m = width_meter
|
||||
self.height_pix = height_pixel
|
||||
self.height_m = height_meter
|
||||
self.x_factor = self.width_m / self.width_pix # m/pix
|
||||
self.x_factor = self.width_m / self.width_pix # m/pix
|
||||
self.y_factor = self.height_m / self.height_pix # m/pix
|
||||
|
||||
self.center = (np.round(self.x_0 + self.width_pix/2), np.round(self.y_0 + self.height_pix/2))
|
||||
self.center_meter = ((self.center[0] - self.x_0) * self.x_factor, (self.center[1] - self.y_0) * self.y_factor)
|
||||
self.center = (np.round(self.x_0 + self.width_pix/2), np.round(self.y_0 + self.height_pix/2)) # middle of width and height --> center
|
||||
self.center_meter = ((self.center[0] - self.x_0) * self.x_factor, (self.center[1] - self.y_0) * self.y_factor) # center in meter by multipling with factor
|
||||
|
||||
self._data_frame = pd.read_hdf(results_file)
|
||||
self._level_shape = self._data_frame.columns.levshape
|
||||
self._scorer = self._data_frame.columns.levels[0].values
|
||||
self._bodyparts = self._data_frame.columns.levels[1].values if self._level_shape[1] > 0 else []
|
||||
self._positions = self._data_frame.columns.levels[2].values if self._level_shape[2] > 0 else []
|
||||
self._data_frame = pd.read_hdf(results_file) # read dataframe of scorer
|
||||
self._level_shape = self._data_frame.columns.levshape # shape of dataframe (?)
|
||||
self._scorer = self._data_frame.columns.levels[0].values # scorer of dataset
|
||||
self._bodyparts = self._data_frame.columns.levels[1].values if self._level_shape[1] > 0 else [] # tracked body parts
|
||||
self._positions = self._data_frame.columns.levels[2].values if self._level_shape[2] > 0 else [] # position in x and y values and the likelihood of it
|
||||
|
||||
def angle_to_center(self, bodypart=0, twopi=True, origin="topleft", min_likelihood=0.95):
|
||||
if isinstance(bodypart, nb.Number):
|
||||
bp = self._bodyparts[bodypart]
|
||||
elif isinstance(bodypart, str) and bodypart in self._bodyparts:
|
||||
def angle_to_center(self, bodypart=0, twopi=True, inversed_yaxis=False, min_likelihood=0.95):
|
||||
"""Angel of animal position in relation to the center of the tank.
|
||||
|
||||
Args:
|
||||
bodypart (int, optional): Bodypart of the animal. Defaults to 0.
|
||||
twopi (bool, optional): _description_. Defaults to True.
|
||||
inversed_yaxis (bool, optional): Inversed y-axis = True when 0 is at the top of axis. Defaults to False.
|
||||
min_likelihood (float, optional): The likelihood of the position estimation. Defaults to 0.95.
|
||||
|
||||
Raises:
|
||||
ValueError: No valid x-position values.
|
||||
|
||||
Returns:
|
||||
phi: Angle of animal in relation to center.
|
||||
"""
|
||||
if isinstance(bodypart, nb.Number): # check if the instance bodypart of this class is a number
|
||||
bp = self._bodyparts[bodypart]
|
||||
elif isinstance(bodypart, str) and bodypart in self._bodyparts: # or if bodypart is a string
|
||||
bp = bodypart
|
||||
else:
|
||||
raise ValueError("Bodypart %s is not in dataframe!" % bodypart)
|
||||
_, x, y, _, _ = self.position_values(bodypart=bp, min_likelihood=min_likelihood)
|
||||
raise ValueError("Bodypart %s is not in dataframe!" % bodypart) # or if it is existing
|
||||
_, x, y, _, _ = self.position_values(bodypart=bp, min_likelihood=min_likelihood) # set x and y values, already in meter from position_values
|
||||
if x is None:
|
||||
print("Error: no valid angles for %s" % self._file_name)
|
||||
return []
|
||||
x_meter = x - self.center_meter[0]
|
||||
y_meter = y - self.center_meter[1]
|
||||
if origin.lower() == "topleft":
|
||||
y_meter *= -1
|
||||
phi = np.arctan2(y_meter, x_meter) * 180 / np.pi
|
||||
x_to_center = x - self.center_meter[0] #
|
||||
y_to_center = y - self.center_meter[1]
|
||||
if inversed_yaxis == True:
|
||||
y_to_center *= -1
|
||||
phi = np.arctan2(y_to_center, x_to_center) * 180 / np.pi
|
||||
if twopi:
|
||||
phi[phi < 0] = 360 + phi[phi < 0]
|
||||
|
||||
return phi
|
||||
|
||||
def coordinate_transformation(self, position):
|
||||
@ -85,24 +111,24 @@ class TrackingResult(object):
|
||||
def positions(self):
|
||||
return self._positions
|
||||
|
||||
def position_values(self, scorer=0, bodypart=0, framerate=30, interpolate=True, min_likelihood=0.95):
|
||||
"""returns the x and y positions in m and the likelihood of the positions.
|
||||
def position_values(self, scorer=0, bodypart=0, framerate=25, interpolate=True, min_likelihood=0.95):
|
||||
"""Returns the x and y positions of a bodypart over time and the likelihood of it.
|
||||
|
||||
Args:
|
||||
scorer (int, optional): [description]. Defaults to 0.
|
||||
bodypart (int, optional): [description]. Defaults to 0.
|
||||
framerate (int, optional): [description]. Defaults to 30.
|
||||
scorer (int, optional): Scorer of dataset. Defaults to 0.
|
||||
bodypart (int, optional): Bodypart of the animal. Can be seen in etrack.TrackingResults.bodyparts. Defaults to 0.
|
||||
framerate (int, optional): Framerate of the video. Defaults to 25.
|
||||
|
||||
Raises:
|
||||
ValueError: [description]
|
||||
ValueError: [description]
|
||||
ValueError: Scorer not existing in dataframe.
|
||||
ValueError: Bodypart not existing in dataframe.
|
||||
|
||||
Returns:
|
||||
time [np.array]: the time axis
|
||||
x [np.array]: the x-position in m
|
||||
y [np.array]: the y-position in m
|
||||
l [np.array]: the likelihood of the position estimation
|
||||
bp string: the body part
|
||||
time [np.array]: The time axis.
|
||||
x [np.array]: x-position in meter.
|
||||
y [np.array]: y-position in meter.
|
||||
l [np.array]: The likelihood of the position estimation. Originating from animal tracking done before.
|
||||
bp string: The body part of the animal.
|
||||
[type]: [description]
|
||||
"""
|
||||
|
||||
@ -136,7 +162,16 @@ class TrackingResult(object):
|
||||
y3 = np.interp(time, time2, y2)
|
||||
return time, x3, y3, l, bp
|
||||
|
||||
def plot(self, scorer=0, bodypart=0, threshold=0.9, framerate=30):
|
||||
|
||||
def plot(self, scorer=0, bodypart=0, threshold=0.9, framerate=25):
|
||||
"""Plot the position of a bodypart in the tank over time.
|
||||
|
||||
Args:
|
||||
scorer (int, optional): Scorer of dataset. Defaults to 0.
|
||||
bodypart (int, optional): Given bodypart to plot. Defaults to 0.
|
||||
threshold (float, optional): Threshold below which the likelihood has to be. Defaults to 0.9.
|
||||
framerate (int, optional): Framerate of the video. Defaults to 25.
|
||||
"""
|
||||
t, x, y, l, name = self.position_values(scorer=scorer, bodypart=bodypart, framerate=framerate)
|
||||
plt.scatter(x[l > threshold], y[l > threshold], c=t[l > threshold], label=name)
|
||||
plt.scatter(self.center_meter[0], self.center_meter[1], marker="*")
|
||||
@ -148,31 +183,36 @@ class TrackingResult(object):
|
||||
bar.set_label("time [s]")
|
||||
plt.legend()
|
||||
plt.show()
|
||||
from IPython import embed
|
||||
|
||||
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
from IPython import embed
|
||||
filename = "2020.12.04_lepto48DLC_resnet50_boldnessDec11shuffle1_200000.h5"
|
||||
path = "/mnt/movies/merle_verena/boldness/labeled_videos/day_4/"
|
||||
tr = TrackingResult(path+filename)
|
||||
time, x, y, l, bp = tr.position_values(bodypart=2)
|
||||
filename = "/2022.01.12_3DLC_resnet50_efish_tracking3Mar21shuffle1_300000.h5"
|
||||
path = "/home/efish/efish_tracking/efish_tracking3-Xaver-2022-03-21/videos"
|
||||
|
||||
tr = TrackingResult(path+filename) # usage of class with given file
|
||||
time, x, y, l, bp = tr.position_values(bodypart=2) # time, x and y values, likelihood of position estimation, tracked bodypart
|
||||
phi = tr.angle_to_center(0, True, False, 0.95)
|
||||
|
||||
thresh = 0.95
|
||||
time2 = time[l>thresh]
|
||||
x2 = x[l>thresh]
|
||||
y2 = y[l>thresh]
|
||||
x3 = np.interp(time, time2, x2)
|
||||
y3 = np.interp(time, time2, y2)
|
||||
time2 = time[l>thresh] # time values where likelihood of position estimation > threshold
|
||||
x2 = x[l>thresh] # x values with likelihood > threshold
|
||||
y2 = y[l>thresh] # y values -"-
|
||||
x3 = np.interp(time, time2, x2) # x value interpolation at points where likelihood has been under threshold
|
||||
y3 = np.interp(time, time2, y2) # y value -"-
|
||||
|
||||
|
||||
fig, axes = plt.subplots(3,1, sharex=True)
|
||||
axes[0].plot(time, x)
|
||||
axes[0].plot(time, x)
|
||||
axes[0].plot(time, x3)
|
||||
axes[0].set_ylabel('x-position')
|
||||
axes[1].plot(time, y)
|
||||
axes[1].plot(time, y3)
|
||||
axes[2].plot(time, l)
|
||||
axes[1].set_ylabel('y-position')
|
||||
axes[2].plot(time, l)
|
||||
axes[2].set_xlabel('time [s]')
|
||||
axes[2].set_ylabel('likelihood')
|
||||
plt.show()
|
||||
|
||||
embed()
|
2
setup.py
2
setup.py
@ -5,7 +5,7 @@ VERSION = 0.5
|
||||
AUTHOR = "Jan Grewe"
|
||||
CONTACT = "jan.grewe@g-node.org"
|
||||
CLASSIFIERS = "science"
|
||||
DESCRIPTION = "helpers for handling depp lab cut tracking results"
|
||||
DESCRIPTION = "helpers for handling deep lab cut tracking results"
|
||||
|
||||
README = "README.md"
|
||||
with open(README) as f:
|
||||
|
Loading…
Reference in New Issue
Block a user