diff --git a/README.md b/README.md
index a7333d6..ec92bd2 100644
--- a/README.md
+++ b/README.md
@@ -29,6 +29,8 @@ training and one for testing (both also stored in ./data/dataset).
 
 ### ToDos:
 * on a long scale: only save raw file bounding boxes in frequency and time (t0, t1, f0, f1) and the hyperparameters of the corresponding spectrogram. USE THESE PARAMETERS IN DATASET_FN.
+* rescale image input to (7, 7) * 256 --> width/height in inch * dpi
+* when dataset input it spectrogram use resize transform.
 
 ## model.py
 
diff --git a/confic.py b/confic.py
index e765147..4f8239e 100644
--- a/confic.py
+++ b/confic.py
@@ -1,24 +1,34 @@
 import torch
 import pathlib
 
+# training parameters
 BATCH_SIZE = 8
-RESIZE_TO = 416
 NUM_EPOCHS = 10
 NUM_WORKERS = 4
+DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
 
+# input parameters
 IMG_SIZE = (7, 7) # inches
 IMG_DPI = 256
+RESIZE_TO = 416 # ToDo: alter this parameter to (7, 7) * 256 [IMG_SIZE * IMG_DPI]
 
-DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
-
+# dataset parameters
 CLASSES = ['__backgroud__', '1']
-
 NUM_CLASSES = len(CLASSES)
 
+# data snippet paramters
+MIN_FREQ = 200
+MAX_FREQ = 1500
+DELTA_FREQ = 200
+FREQ_OVERLAP = 25
+
+DELTA_TIME = 60*10
+TIME_OVERLAP = 60*1
+
+# output parameters
 DATA_DIR = 'data/dataset'
 OUTDIR = 'model_outputs'
 INFERENCE_OUTDIR = 'inference_outputs'
-
 for required_folders in [DATA_DIR, OUTDIR, INFERENCE_OUTDIR]:
     if not pathlib.Path(required_folders).exists():
         pathlib.Path(required_folders).mkdir(parents=True, exist_ok=True)