Skip to content
Snippets Groups Projects
Commit 9b8cb709 authored by Tamino Huxohl's avatar Tamino Huxohl
Browse files

add todos

parent d60df9f1
No related branches found
No related tags found
No related merge requests found
import torch
import torch.nn as nn
class Conv(nn.Sequential):
def __init__(self, in_channels, out_channels):
super().__init__()
self.append(
nn.Conv3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=1,
padding="same",
)
)
self.append(nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding="same"))
self.append(nn.BatchNorm3d(num_features=out_channels))
self.append(nn.ReLU(inplace=True))
class Discriminator(nn.Module):
def __init__(self, in_channels=1):
super().__init__()
#TODO: make fully connected layer dependent on input shape
#TODO: write doc
self.conv = nn.Sequential(
Conv(in_channels=in_channels, out_channels=32),
......@@ -38,7 +32,7 @@ class Discriminator(nn.Module):
nn.ReLU(inplace=True),
nn.Linear(in_features=128, out_features=1),
)
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x, 1)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment