Skip to content
Snippets Groups Projects
Commit ce61a834 authored by Tamino Huxohl's avatar Tamino Huxohl
Browse files

remove output scale param from trainings as this is now done by rescaling the precision factor

parent b68b2c72
No related branches found
No related tags found
No related merge requests found
...@@ -258,12 +258,6 @@ if __name__ == "__main__": ...@@ -258,12 +258,6 @@ if __name__ == "__main__":
default="data/initial/", default="data/initial/",
help="the directory where the dataset for training is found", help="the directory where the dataset for training is found",
) )
parser.add_argument(
"--output_scale",
type=float,
default=1.0,
help="scale the attenuation map by this coefficient",
)
parser.add_argument( parser.add_argument(
"--input_norm", "--input_norm",
type=str, type=str,
...@@ -398,8 +392,6 @@ if __name__ == "__main__": ...@@ -398,8 +392,6 @@ if __name__ == "__main__":
elif args.input_norm == "gaussian": elif args.input_norm == "gaussian":
transform_normalization = GaussianNormTransform() transform_normalization = GaussianNormTransform()
transform_augmentation = ScaleTransform(scale_outputs=args.output_scale)
data_loaders = {} data_loaders = {}
for split in ["train", "validation"]: for split in ["train", "validation"]:
dataset = MuMapPatchDataset( dataset = MuMapPatchDataset(
...@@ -410,7 +402,6 @@ if __name__ == "__main__": ...@@ -410,7 +402,6 @@ if __name__ == "__main__":
shuffle=not args.no_shuffle, shuffle=not args.no_shuffle,
split_name=split, split_name=split,
transform_normalization=transform_normalization, transform_normalization=transform_normalization,
transform_augmentation=transform_augmentation,
logger=logger, logger=logger,
) )
data_loader = torch.utils.data.DataLoader( data_loader = torch.utils.data.DataLoader(
......
...@@ -151,12 +151,6 @@ if __name__ == "__main__": ...@@ -151,12 +151,6 @@ if __name__ == "__main__":
default="data/initial/", default="data/initial/",
help="the directory where the dataset for training is found", help="the directory where the dataset for training is found",
) )
parser.add_argument(
"--output_scale",
type=float,
default=1.0,
help="scale the attenuation map by this coefficient",
)
parser.add_argument( parser.add_argument(
"--input_norm", "--input_norm",
type=str, type=str,
...@@ -288,8 +282,6 @@ if __name__ == "__main__": ...@@ -288,8 +282,6 @@ if __name__ == "__main__":
elif args.input_norm == "gaussian": elif args.input_norm == "gaussian":
transform_normalization = GaussianNormTransform() transform_normalization = GaussianNormTransform()
transform_augmentation = ScaleTransform(scale_outputs=args.output_scale)
data_loaders = {} data_loaders = {}
for split in ["train", "validation"]: for split in ["train", "validation"]:
dataset = MuMapPatchDataset( dataset = MuMapPatchDataset(
...@@ -300,7 +292,6 @@ if __name__ == "__main__": ...@@ -300,7 +292,6 @@ if __name__ == "__main__":
shuffle=not args.no_shuffle, shuffle=not args.no_shuffle,
split_name=split, split_name=split,
transform_normalization=transform_normalization, transform_normalization=transform_normalization,
transform_augmentation=transform_augmentation,
logger=logger, logger=logger,
) )
data_loader = torch.utils.data.DataLoader( data_loader = torch.utils.data.DataLoader(
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment