diff --git a/datasets.py b/datasets.py index e3b93fa..f4a48de 100644 --- a/datasets.py +++ b/datasets.py @@ -50,26 +50,13 @@ class CustomDataset(Dataset): target["iscrowd"] = iscrowd image_id = torch.tensor([idx]) target["image_id"] = image_id - # target["image_name"] = image_name #ToDo: implement this as 3rd return... + target["image_name"] = image_name #ToDo: implement this as 3rd return... return img_tensor, target def __len__(self): return len(self.all_images) -def create_train_test_dataset(path, test_size=0.2): - files = glob.glob(os.path.join(path, '*.png')) - train_test_idx = np.arange(len(files), dtype=int) - np.random.shuffle(train_test_idx) - - train_idx = train_test_idx[int(test_size*len(train_test_idx)):] - test_idx = train_test_idx[:int(test_size*len(train_test_idx))] - - train_data = CustomDataset(path) - test_data = CustomDataset(path) - - return train_data, test_data - def create_train_or_test_dataset(path, train=True): if train == True: pfx='train' diff --git a/inference.py b/inference.py index 88b5357..fafb828 100644 --- a/inference.py +++ b/inference.py @@ -45,6 +45,9 @@ def infere_model(test_loader, model, detection_th=0.8): prog_bar = tqdm(test_loader, total=len(test_loader)) for samples, targets in prog_bar: images = list(image.to(DEVICE) for image in samples) + + embed() + quit() targets = [{k: v for k, v in t.items()} for t in targets] with torch.inference_mode():