diff --git a/backend/SermoIA/myapp/views/questions_view.py b/backend/SermoIA/myapp/views/questions_view.py
index 3d8a7a52855712120985ba6b35ff7b9bfb6acb9f..2abb3c993ec62d1ca44437bc632dcd5477f5c597 100644
--- a/backend/SermoIA/myapp/views/questions_view.py
+++ b/backend/SermoIA/myapp/views/questions_view.py
@@ -22,7 +22,6 @@ from ratelimit import limits, sleep_and_retry
 MAX_CALLS_PER_MINUTE = 3
 from TTS.utils.manage import ModelManager
 
-
 load_dotenv()
 
 client = OpenAI(
@@ -175,7 +174,7 @@ def link_questions_gpt(questionnaire_data):
         linked_questions_text = response.choices[0].message.content.strip()
         linked_questions = linked_questions_text.split("\n\n")
 
-        print(f"GPT-3 Response: {response}")
+        print(f"GPT-4o Response: {response}")
         # print(f"Original questions count: {len(questions)}")
         # print(f"Linked questions count: {len(linked_questions)}")
         
diff --git a/backend/SermoIA/myapp/views/stt_view.py b/backend/SermoIA/myapp/views/stt_view.py
index 43cbc43fa14018d23953b7ab8ea402f547ccdab5..ab1621bbe339d7d9f4041573b9097ef387820341 100755
--- a/backend/SermoIA/myapp/views/stt_view.py
+++ b/backend/SermoIA/myapp/views/stt_view.py
@@ -1,10 +1,15 @@
 import os
+import time
 from django.http import JsonResponse
 from django.views.decorators.csrf import csrf_exempt
 import whisper
-import time
 from myapp.models import AudioFile, Answer
 
+def save_audio_file(audio_file):
+    audio_instance = AudioFile(file=audio_file)
+    audio_instance.save()
+    return audio_instance.file.path
+
 def get_whisper_model(device):
     if not hasattr(get_whisper_model, "model"):
         get_whisper_model.model = whisper.load_model("tiny").to(device)
@@ -15,54 +20,53 @@ def transcribe_audio(file_path, device):
     result = model.transcribe(file_path, fp16=False)
     return result["text"]
 
-@csrf_exempt
-def upload_audio(request):
-    if request.method == 'POST':
-        print("POST Request received, processing...")
-        models_to_test = ["tts_models/en/ljspeech/glow-tts"]
-        device = 'cpu'
+def save_transcription(audio_id, transcript):
+    answer = Answer(question_id=int(audio_id), transcript=transcript)
+    answer.save()
 
-        try:
-            audio_file = request.FILES.get('audio')
-            if not audio_file:
-                return JsonResponse({'error': 'No audio file provided'}, status=400)
-            
-            audio_instance = AudioFile(file=audio_file)
-            audio_instance.save()
-            file_path = audio_instance.file.path
+def handle_transcription(file_path, audio_id, device='cpu'):
+    start_time = time.time()
+    transcript = transcribe_audio(file_path, device)
+    end_time = time.time()
+    print(f"Execution time for STT (Whisper): {end_time - start_time}")
+    
+    if not transcript.strip():
+        raise ValueError('Audio file contains no discernible speech')
 
-            audio_id = request.POST.get('id')
-            if not audio_id:
-                return JsonResponse({'error': 'No ID provided'}, status=400)
-            print(f"ID: {audio_id}")
+    save_transcription(audio_id, transcript)
+    return transcript
 
-            for model_name in models_to_test:
-                try:
-                    start_time = time.time()
-                    transcript = transcribe_audio(file_path, device)
-                    end_time = time.time()
-                    print(f"Execution time for STT (Whisper): {end_time - start_time}")
+def clean_up(file_path):
+    if os.path.exists(file_path):
+        os.remove(file_path)
 
-                    if not transcript.strip():
-                        if os.path.exists(file_path): os.remove(file_path)
-                        return JsonResponse({'error': 'Audio file contains no discernible speech'}, status=400)
-
-                    answer = Answer(question_id=int(audio_id), transcript=transcript)
-                    answer.save()
-
-                    if os.path.exists(file_path): os.remove(file_path)
+@csrf_exempt
+def upload_audio(request):
+    if request.method != 'POST':
+        return JsonResponse({'error': 'Invalid request method.'}, status=400)
 
-                    return JsonResponse({'transcript': str(transcript)}, status=200)
+    print("POST Request received, processing...")
 
-                except Exception as e:
-                    print(f"Error processing model {model_name}: {str(e)}")
-                    return JsonResponse({'error': str(e)}, status=500)
+    audio_file = request.FILES.get('audio')
+    if not audio_file:
+        return JsonResponse({'error': 'No audio file provided'}, status=400)
 
-        except Exception as e:
-            print(f"Error handling request: {str(e)}")
-            return JsonResponse({'error': str(e)}, status=500)
+    audio_id = request.POST.get('id')
+    if not audio_id:
+        return JsonResponse({'error': 'No ID provided'}, status=400)
+    print(f"ID: {audio_id}")
 
-    return JsonResponse({'error': 'Invalid request method.'}, status=400)
+    try:
+        file_path = save_audio_file(audio_file)
+        transcript = handle_transcription(file_path, audio_id)
+        clean_up(file_path)
+        return JsonResponse({'transcript': transcript}, status=200)
+    except ValueError as ve:
+        clean_up(file_path)
+        return JsonResponse({'error': str(ve)}, status=400)
+    except Exception as e:
+        print(f'An error occurred: {str(e)}')
+        return JsonResponse({'error': str(e)}, status=500)
 
 # Analyse de la réponse (Avec ChatGPT d'OpenAI)
 # def analyse_answer()
\ No newline at end of file