Files
digit-depth/digit_depth/train/mlp_model.py
2022-12-29 23:08:25 +08:00

28 lines
710 B
Python

import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
dropout_p = 0.05
def __init__(
self, input_size=5, output_size=3, hidden_size=32):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, output_size)
self.drop = nn.Dropout(p=self.dropout_p)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.drop(x)
x = F.relu(self.fc2(x))
x = self.drop(x)
x = self.fc3(x)
x = self.drop(x)
x = self.fc4(x)
return x