summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Wang2021-08-24 22:23:46 -0500
committerAnthony Wang2021-08-24 22:23:46 -0500
commit60e12b8eca838f9aba7f632c43ccc73a47c8ed99 (patch)
treec9d80959a2d57fce365923b6ef2959a944aff76b
parentb8133346a4a626451436883f35477c8014de4a4b (diff)
Start working on MNIST net
-rw-r--r--mnist.py126
1 files changed, 126 insertions, 0 deletions
diff --git a/mnist.py b/mnist.py
new file mode 100644
index 0000000..2b4085f
--- /dev/null
+++ b/mnist.py
@@ -0,0 +1,126 @@
+import torch
+from torch import nn
+from torch.autograd import Variable
+from torch.utils.data import DataLoader
+from torchvision import datasets
+from torchvision.transforms import ToTensor, Lambda, Compose
+import matplotlib.pyplot as plt
+
+training_data = datasets.MNIST(
+ root="data",
+ train=True,
+ download=True,
+ transform=ToTensor(),
+)
+
+test_data = datasets.MNIST(
+ root="data",
+ train=False,
+ download=True,
+ transform=ToTensor(),
+)
+
+batch_size = 64
+
+train_loader = DataLoader(training_data, batch_size=batch_size)
+test_loader = DataLoader(test_data, batch_size=batch_size)
+
+
+class CNN(nn.Module):
+ def __init__(self):
+ super(CNN, self).__init__()
+
+ self.layer1 = nn.Sequential(
+ nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, padding=1),
+ nn.BatchNorm2d(32),
+ nn.ReLU(),
+ nn.MaxPool2d(kernel_size=2, stride=2)
+ )
+ self.layer2 = nn.Sequential(
+ nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
+ nn.BatchNorm2d(64),
+ nn.ReLU(),
+ nn.MaxPool2d(2)
+ )
+ self.fc1 = nn.Linear(in_features=64*6*6, out_features=600)
+ self.drop = nn.Dropout2d(0.25)
+ self.fc2 = nn.Linear(in_features=600, out_features=120)
+ self.fc3 = nn.Linear(in_features=120, out_features=10)
+
+
+def forward(self, x):
+ out = self.layer1(x)
+ out = self.layer2(out)
+ out = out.view(out.size(0), -1)
+ out = self.fc1(out)
+ out = self.drop(out)
+ out = self.fc2(out)
+ out = self.fc3(out)
+ return out
+
+
+model = CNN()
+error = nn.CrossEntropyLoss()
+learning_rate = 0.001
+optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
+
+
+num_epochs = 5
+count = 0
+
+loss_list = []
+iteration_list = []
+accuracy_list = []
+
+predictions_list = []
+labels_list = []
+
+for epoch in range(num_epochs):
+ for images, labels in train_loader:
+ train = Variable(images.view(batch_size, 1, 28, 28))
+ labels = Variable(labels)
+
+ outputs = model(train)
+ loss = error(outputs, labels)
+
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ count += 1
+
+ if count % 50 == 0:
+ total = 0
+ correct = 0
+ for images, labels in test_loader:
+ images, labels = images.to(device), labels.to(device)
+ labels_list.append(labels)
+
+ test = Variable(images.view(batch_size, 1, 28, 28))
+ outputs = model(test)
+
+ predictions = torch.max(outputs, 1)[1]
+ predictions_list.append(predictions)
+ correct += (predictions == labels).sum()
+
+ total += len(labels)
+
+ accuracy = correct * batch_size / total
+ loss_list.append(loss.data)
+ iteration_list.append(count)
+ accuracy_list.append(accuracy)
+
+ print("Iteration: {}, Loss: {}, Accuracy: {}%".format(count, loss.data, accuracy))
+
+
+plt.plot(iteration_list, loss_list)
+plt.xlabel("No. of Iteration")
+plt.ylabel("Loss")
+plt.title("Iterations vs Loss")
+plt.show()
+
+plt.plot(iteration_list, accuracy_list)
+plt.xlabel("No. of Iteration")
+plt.ylabel("Accuracy")
+plt.title("Iterations vs Accuracy")
+plt.show()