#!/usr/bin/env python # coding: utf-8 import torch from torch import nn from torch.utils.data import DataLoader from torchvision import datasets from torchvision.transforms import ToTensor import matplotlib.pyplot as plt training_data = datasets.FashionMNIST( root="data", train=True, download=True, transform=ToTensor(), ) test_data = datasets.FashionMNIST( root="data", train=False, download=True, transform=ToTensor(), ) class_names = [ "Haut de t-shirt", "Pantalon", "Pull", "Robe", "Manteau", "Sandale", "Chemise", "Basket", "Sac", "Bottine" ] def plot_images(images, labels, predictions=None): plt.figure(figsize=(10, 10)) for i in range(min(25, len(images))): plt.subplot(5, 5, i + 1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(images[i].numpy().squeeze(), cmap=plt.cm.binary) plt.xlabel(class_names[labels[i]]) if predictions is not None: plt.title(class_names[predictions[i]]) batch_size = 64 train_dataloader = DataLoader(training_data, batch_size=batch_size) test_dataloader = DataLoader(test_data, batch_size=batch_size) images, labels = next(iter(train_dataloader)) plot_images(images, labels) plt.show() device = "cuda" if torch.cuda.is_available() else "cpu" ################################ Notre réseau de neurone est ici ################################ class NeuralNetwork(nn.Module): def __init__(self): super().__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( nn.Linear(28*28, 256), nn.Tanh(),#sig c nn.Linear(256, 1028), nn.Sigmoid(), nn.Linear(1028, 10), ) def forward(self, x): x = self.flatten(x) logits = self.linear_relu_stack(x) return logits ################################################################################################ model = NeuralNetwork().to(device) loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) def train(dataloader, model, loss_fn, optimizer): size = len(dataloader.dataset) model.train() for batch, (X, y) in enumerate(dataloader): X, y = X.to(device), y.to(device) pred = model(X) loss = loss_fn(pred, y) optimizer.zero_grad() loss.backward() optimizer.step() if batch % 100 == 0: loss, current = loss.item(), batch * len(X) print(f"perte: {loss:>7f} [{current:>5d}/{size:>5d}]") def test(dataloader, model, loss_fn): size = len(dataloader.dataset) num_batches = len(dataloader) model.eval() test_loss, correct = 0, 0 with torch.no_grad(): for X, y in dataloader: X, y = X.to(device), y.to(device) pred = model(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1) == y).type(torch.float).sum().item() test_loss /= num_batches correct /= size print(f"Test Erreur: \n Exactitude: {(100*correct):>0.1f}%, Perte: {test_loss:>8f} \n") epochs = 5 for t in range(epochs): print(f"Epoch {t+1}\n-------------------------------") train(train_dataloader, model, loss_fn, optimizer) test(test_dataloader, model, loss_fn) print("Done!") model.eval() x, y = test_data[0][0], test_data[0][1] with torch.no_grad(): pred = model(x) predicted, actual = classes[pred[0].argmax(0)], classes[y] print(f'Prediction: "{predicted}", Vraie Valeure: "{actual}"')