diff --git a/VoiceInk/Services/AudioTranscriptionManager.swift b/VoiceInk/Services/AudioTranscriptionManager.swift index df50553..915f9a5 100644 --- a/VoiceInk/Services/AudioTranscriptionManager.swift +++ b/VoiceInk/Services/AudioTranscriptionManager.swift @@ -71,7 +71,8 @@ class AudioTranscriptionManager: ObservableObject { // Get audio duration let audioAsset = AVURLAsset(url: url) - let duration = CMTimeGetSeconds(audioAsset.duration) + let durationTime = try await audioAsset.load(.duration) + let duration = CMTimeGetSeconds(durationTime) // Create permanent copy of the audio file let recordingsDirectory = FileManager.default.urls(for: .applicationSupportDirectory, in: .userDomainMask)[0] @@ -111,7 +112,7 @@ class AudioTranscriptionManager: ObservableObject { audioFileURL: permanentURL.absoluteString ) modelContext.insert(transcription) - try modelContext.save() + modelContext.save() currentTranscription = transcription } catch { logger.error("Enhancement failed: \(error.localizedDescription)") @@ -122,7 +123,7 @@ class AudioTranscriptionManager: ObservableObject { audioFileURL: permanentURL.absoluteString ) modelContext.insert(transcription) - try modelContext.save() + modelContext.save() currentTranscription = transcription } } else { @@ -132,13 +133,13 @@ class AudioTranscriptionManager: ObservableObject { audioFileURL: permanentURL.absoluteString ) modelContext.insert(transcription) - try modelContext.save() + modelContext.save() currentTranscription = transcription } processingPhase = .completed - try? await Task.sleep(nanoseconds: 1_500_000_000) - await finishProcessing() + try await Task.sleep(nanoseconds: 1_500_000_000) + finishProcessing() } catch { await handleError(error) diff --git a/VoiceInk/Services/AudioTranscriptionService.swift b/VoiceInk/Services/AudioTranscriptionService.swift index 93e0069..b77f337 100644 --- a/VoiceInk/Services/AudioTranscriptionService.swift +++ b/VoiceInk/Services/AudioTranscriptionService.swift @@ -59,7 +59,8 @@ class AudioTranscriptionService: ObservableObject { // Get audio duration let audioAsset = AVURLAsset(url: url) - let duration = CMTimeGetSeconds(audioAsset.duration) + let durationTime = try await audioAsset.load(.duration) + let duration = CMTimeGetSeconds(durationTime) // Create a permanent copy of the audio file let recordingsDirectory = FileManager.default.urls(for: .applicationSupportDirectory, in: .userDomainMask)[0] @@ -118,12 +119,7 @@ class AudioTranscriptionService: ObservableObject { audioFileURL: permanentURLString ) modelContext.insert(newTranscription) - do { - try modelContext.save() - } catch { - logger.error("❌ Failed to save transcription: \(error.localizedDescription)") - messageLog += "Failed to save transcription: \(error.localizedDescription)\n" - } + modelContext.save() await MainActor.run { isTranscribing = false @@ -139,12 +135,7 @@ class AudioTranscriptionService: ObservableObject { audioFileURL: permanentURLString ) modelContext.insert(newTranscription) - do { - try modelContext.save() - } catch { - logger.error("❌ Failed to save transcription: \(error.localizedDescription)") - messageLog += "Failed to save transcription: \(error.localizedDescription)\n" - } + modelContext.save() await MainActor.run { isTranscribing = false @@ -160,12 +151,7 @@ class AudioTranscriptionService: ObservableObject { audioFileURL: permanentURLString ) modelContext.insert(newTranscription) - do { - try modelContext.save() - } catch { - logger.error("❌ Failed to save transcription: \(error.localizedDescription)") - messageLog += "Failed to save transcription: \(error.localizedDescription)\n" - } + modelContext.save() await MainActor.run { isTranscribing = false