diff --git a/main.py b/main.py
index 2939f46a1b7b8c82c9c366e1f0115613c75a8367..4309779e648637e77e56235e23bcd8fac9382b49 100644
--- a/main.py
+++ b/main.py
@@ -1,6 +1,7 @@
 import os
 import torch
 import torch.nn as nn
+import argparse
 import numpy as np
 
 import data
@@ -9,21 +10,32 @@ import musicgenerator as mg
 device = "cuda" if torch.cuda.is_available() else "cpu"
 
 if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--lr', type=float, default=20, help='initial learning rate')
+    parser.add_argument('--epochs', type=int, default=100, help='upper epoch limit')
+    parser.add_argument('--batch_size', type=int, default=16, help='batch size')
+    parser.add_argument('--sequence_length', type=int, default=32, help='sequence length')
+    parser.add_argument('--dimension_model', type=int, default=128, help='size of word embeddings')
+    parser.add_argument('--nhead', type=int, default=4,  help='the number of heads in the encoder/decoder of the transformer model')
+    parser.add_argument('--dropout', type=float, default=0.2, help='dropout applied to layers (0 = no dropout)')
+    parser.add_argument('--nhid', type=int, default=200, help='number of hidden units per layer')
+    parser.add_argument('--nlayers', type=int, default=2, help='number of layers')
+    args = parser.parse_args()
     ###############################################################################
     # Paramètres
     ###############################################################################
     data_path = os.path.normpath('./training_data/classical_music/')
-    learning_rate = 20
-    batch_size = 16
+    learning_rate = args.lr
+    batch_size = args.batch_size
     split_train_test_valid = (0.8, 0.1, 0.1)
     model_path = None  # os.path.join(data_path, 'model.pt')
-    sequence_length = 32
-    dim_model = 128
-    num_head = 8
-    num_layers = 2
-    num_hid = 200
-    dropout = 0.2
-    epochs = 100
+    sequence_length = args.sequence_length
+    dim_model = args.dimension_model
+    num_head = args.nhead
+    num_layers = args.nlayers
+    num_hid = args.nhid
+    dropout = args.dropout
+    epochs = args.epochs
     nb_log_epoch = 5
     nb_words = 100
     temperature = 1.0
diff --git a/parameter_finder.py b/parameter_finder.py
new file mode 100644
index 0000000000000000000000000000000000000000..d98fc516f0785d6d9b1471b52a49324c29cee7a3
--- /dev/null
+++ b/parameter_finder.py
@@ -0,0 +1,61 @@
+from matplotlib import pyplot as plt
+import subprocess
+import itertools
+import time
+import re
+
+lr = ['0.1', '1', '100']
+epochs = ['20', '50', '100']
+batch_size = ['4', '16', '64']
+sequence_length = ['8', '32', '128']
+dimension_model = ['64', '256', '512']
+nhead = ['2', '4', '8']
+dropout = ['0.0', '0.3', '0.6']
+nhid = ['100', '200', '500']
+nlayers = ['2', '6', '10']
+
+if __name__ == '__main__':
+    best_ppl = None
+    best_config = ''
+    nb_tests = 0
+    for lr_i, epoch_i, batch_size_i, sequence_length_i, dimension_model_i, nhead_i, dropout_i, nhid_i, nlayers_i in itertools.product(
+            lr, epochs, batch_size, sequence_length, dimension_model, nhead, dropout, nhid, nlayers):
+        res_name = f"lr{str.replace(lr_i, '.', '')}_epoch{epoch_i}_batch{batch_size_i}_seq{sequence_length_i}_dim{dimension_model_i}_nhead{nhead_i}_drop{str.replace(dropout_i, '.', '')}_nhid{nhid_i}_nlay{nlayers_i}"
+        nb_tests += 1
+        print("Start config :", res_name)
+        start_time = time.time()
+        res = subprocess.run(['python', 'main.py',
+                              '--lr', lr_i,
+                              '--epochs', epoch_i,
+                              '--batch_size', batch_size_i,
+                              '--sequence_length', sequence_length_i,
+                              '--dimension_model', dimension_model_i,
+                              '--nhead', nhead_i,
+                              '--dropout', dropout_i,
+                              '--nhid', nhid_i,
+                              '--nlayers', nlayers_i
+                              ],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE
+                             )
+        end_time = time.time()
+        res_stdout = res.stdout.decode()
+        res_stderr = res.stderr.decode()
+        match = re.search(r'test ppl\s+([0-9.]+)', res_stdout)
+        if match:
+
+            test_ppl = float(match.group(1))
+
+            if best_ppl is None or test_ppl < best_ppl:
+                best_ppl = test_ppl
+                best_config = res_name
+
+            print(res_name, test_ppl, "time :", end_time - start_time)
+        else:
+            print("ERROR:", res_stdout, res_stderr)
+        if nb_tests >= 15:
+            break
+
+    print("nb tests :", nb_tests)
+    print("best config :", best_config)
+    print("best ppl :", best_ppl)
diff --git a/test.py b/test.py
index 5f6a8e0917e5693e6ccb74518b175e62bf8efb1a..36647c2dfd39a86a395d8616864612976521ce44 100644
--- a/test.py
+++ b/test.py
@@ -1,4 +1,5 @@
 import argparse
+import math
 import random
 
 if __name__ == '__main__':
@@ -9,10 +10,10 @@ if __name__ == '__main__':
     parser.add_argument('--batch_size', type=int, default=16, help='batch size')
     parser.add_argument('--sequence_length', type=int, default=32, help='sequence length')
     parser.add_argument('--dimension_model', type=int, default=128, help='size of word embeddings')
-
     parser.add_argument('--nhead', type=int, default=4,  help='the number of heads in the encoder/decoder of the transformer model')
     parser.add_argument('--dropout', type=float, default=0.2, help='dropout applied to layers (0 = no dropout)')
     parser.add_argument('--nhid', type=int, default=200, help='number of hidden units per layer')
     parser.add_argument('--nlayers', type=int, default=2, help='number of layers')
 
-
+    test_loss = random.uniform(0.0, 15.0)
+    print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
diff --git a/training_data/classical_music/output.mid b/training_data/classical_music/output.mid
index d809b8e8481dec1ff304d4fe97dea41c32ae0c64..dce21e73e0425bb7ebbb5322f415d2394683aaf7 100644
Binary files a/training_data/classical_music/output.mid and b/training_data/classical_music/output.mid differ