From b4e30b298868887546dafe178fdf3f1819ea2bdc Mon Sep 17 00:00:00 2001 From: Beingpax Date: Fri, 14 Mar 2025 11:20:20 +0545 Subject: [PATCH] Add structured logging with emojis --- VoiceInk/Whisper/LibWhisper.swift | 26 +++++++++++--------------- VoiceInk/Whisper/WhisperState.swift | 23 +++++++++++++++++++---- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/VoiceInk/Whisper/LibWhisper.swift b/VoiceInk/Whisper/LibWhisper.swift index e85727a..84c0db3 100644 --- a/VoiceInk/Whisper/LibWhisper.swift +++ b/VoiceInk/Whisper/LibWhisper.swift @@ -4,6 +4,7 @@ import whisper #else #error("Unable to import whisper module. Please check your project configuration.") #endif +import os enum WhisperError: Error { case couldNotInitializeContext @@ -15,6 +16,7 @@ actor WhisperContext { private var languageCString: [CChar]? private var prompt: String? private var promptCString: [CChar]? + private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "WhisperContext") private init() { // Private initializer without context @@ -35,7 +37,6 @@ actor WhisperContext { // Leave 2 processors free (i.e. the high-efficiency cores). let maxThreads = max(1, min(8, cpuCount() - 2)) - print("Selecting \(maxThreads) threads") var params = whisper_full_default_params(WHISPER_SAMPLING_GREEDY) // Read language directly from UserDefaults @@ -45,11 +46,11 @@ actor WhisperContext { params.language = languageCString?.withUnsafeBufferPointer { ptr in ptr.baseAddress } - print("Setting language to: \(selectedLanguage)") + logger.notice("๐ŸŒ Using language: \(selectedLanguage)") } else { languageCString = nil params.language = nil - print("Using auto-detection") + logger.notice("๐ŸŒ Using auto language detection") } // Only use prompt for English language @@ -58,15 +59,10 @@ actor WhisperContext { params.initial_prompt = promptCString?.withUnsafeBufferPointer { ptr in ptr.baseAddress } - print("Using prompt for English transcription: \(prompt!)") + logger.notice("๐Ÿ’ฌ Using prompt for transcription") } else { promptCString = nil params.initial_prompt = nil - if selectedLanguage == "en" { - print("No prompt set for English") - } else { - print("Prompt disabled for non-English language") - } } // Adapted from whisper.objc @@ -85,15 +81,15 @@ actor WhisperContext { params.suppress_nst = true // Additional suppression of non-speech tokens whisper_reset_timings(context) - print("About to run whisper_full") + logger.notice("โš™๏ธ Starting whisper transcription") samples.withUnsafeBufferPointer { samples in if (whisper_full(context, params, samples.baseAddress, Int32(samples.count)) != 0) { - print("Failed to run the model") + logger.error("โŒ Failed to run whisper model") } else { // Print detected language info before timings let langId = whisper_full_lang_id(context) let detectedLang = String(cString: whisper_lang_str(langId)) - print("Transcription completed - Selected: \(selectedLanguage), Used: \(detectedLang)") + logger.notice("โœ… Transcription completed - Language: \(detectedLang)") whisper_print_timings(context) } } @@ -125,14 +121,14 @@ actor WhisperContext { var params = whisper_context_default_params() #if targetEnvironment(simulator) params.use_gpu = false - print("Running on the simulator, using CPU") + logger.notice("๐Ÿ–ฅ๏ธ Running on simulator, using CPU") #endif let context = whisper_init_from_file_with_params(path, params) if let context { self.context = context } else { - print("Couldn't load model at \(path)") + logger.error("โŒ Couldn't load model at \(path)") throw WhisperError.couldNotInitializeContext } } @@ -147,7 +143,7 @@ actor WhisperContext { func setPrompt(_ prompt: String?) { self.prompt = prompt - print("Prompt set to: \(prompt ?? "none")") + logger.debug("๐Ÿ’ฌ Prompt set: \(prompt ?? "none")") } } diff --git a/VoiceInk/Whisper/WhisperState.swift b/VoiceInk/Whisper/WhisperState.swift index 1dfee51..38bb7bb 100644 --- a/VoiceInk/Whisper/WhisperState.swift +++ b/VoiceInk/Whisper/WhisperState.swift @@ -90,7 +90,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { let savedModel = availableModels.first(where: { $0.name == savedModelName }) { currentModel = savedModel } - Task { await migrateModelsIfNeeded() } @@ -147,6 +146,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { private func loadModel(_ model: WhisperModel) async throws { guard whisperContext == nil else { return } + logger.notice("๐Ÿ”„ Loading Whisper model: \(model.name)") isModelLoading = true defer { isModelLoading = false } @@ -154,7 +154,9 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { whisperContext = try await WhisperContext.createContext(path: model.url.path) isModelLoaded = true currentModel = model + logger.notice("โœ… Successfully loaded model: \(model.name)") } catch { + logger.error("โŒ Failed to load model: \(model.name) - \(error.localizedDescription)") throw WhisperStateError.modelLoadFailed } } @@ -172,14 +174,18 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { func toggleRecord() async { if isRecording { + logger.notice("๐Ÿ›‘ Stopping recording") await recorder.stopRecording() isRecording = false isVisualizerActive = false if let recordedFile { let duration = Date().timeIntervalSince(transcriptionStartTime ?? Date()) await transcribeAudio(recordedFile, duration: duration) + } else { + logger.error("โŒ No recorded file found after stopping recording") } } else { + logger.notice("๐ŸŽ™๏ธ Starting recording") requestRecordPermission { [self] granted in if granted { Task { @@ -216,9 +222,11 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { private func performBackgroundTasks() async { if let currentModel = self.currentModel, self.whisperContext == nil { + logger.notice("๐Ÿ”„ Preloading model in background: \(currentModel.name)") do { try await self.loadModel(currentModel) } catch { + logger.error("โŒ Background model preloading failed: \(error.localizedDescription)") await MainActor.run { self.messageLog += "Error preloading model: \(error.localizedDescription)\n" } @@ -274,6 +282,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { func downloadModel(_ model: PredefinedModel) async { guard let url = URL(string: model.downloadURL) else { return } + logger.notice("๐Ÿ”ฝ Downloading model: \(model.name)") do { let (data, response) = try await withCheckedThrowingContinuation { (continuation: CheckedContinuation<(Data, URLResponse), Error>) in let task = URLSession.shared.dataTask(with: url) { data, response, error in @@ -312,7 +321,9 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { availableModels.append(WhisperModel(name: model.name, url: destinationURL)) self.downloadProgress.removeValue(forKey: model.name) + logger.notice("โœ… Successfully downloaded model: \(model.name)") } catch { + logger.error("โŒ Failed to download model: \(model.name) - \(error.localizedDescription)") currentError = .modelDownloadFailed self.downloadProgress.removeValue(forKey: model.name) } @@ -322,17 +333,20 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { if shouldCancelRecording { return } guard let currentModel = currentModel else { + logger.error("โŒ Cannot transcribe: No model selected") messageLog += "Cannot transcribe: No model selected.\n" currentError = .modelLoadFailed return } guard let whisperContext = whisperContext else { + logger.error("โŒ Cannot transcribe: Model not loaded") messageLog += "Cannot transcribe: Model not loaded.\n" currentError = .modelLoadFailed return } + logger.notice("๐Ÿ”„ Starting transcription with model: \(currentModel.name)") do { isProcessing = true isTranscribing = true @@ -372,6 +386,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { var text = await whisperContext.getTranscription() text = text.trimmingCharacters(in: .whitespacesAndNewlines) + logger.notice("โœ… Transcription completed successfully, length: \(text.count) characters") if let enhancementService = enhancementService, enhancementService.isEnhancementEnabled, @@ -482,7 +497,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { canTranscribe = false } } catch { - print("Error deleting model: \(error.localizedDescription)") messageLog += "Error deleting model: \(error.localizedDescription)\n" currentError = .modelDeletionFailed } @@ -524,7 +538,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { } private func showRecorderPanel() { - logger.info("Showing recorder panel, type: \(self.recorderType)") + logger.notice("๐Ÿ“ฑ Showing \(recorderType) recorder") if recorderType == "notch" { if notchWindowManager == nil { notchWindowManager = NotchWindowManager(whisperState: self, recorder: recorder) @@ -538,7 +552,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { } miniWindowManager?.show() } - logger.info("Recorder panel shown successfully") } private func hideRecorderPanel() { @@ -568,6 +581,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { private func cleanupResources() async { if !isRecording && !isProcessing { + logger.notice("๐Ÿงน Cleaning up Whisper resources") await whisperContext?.releaseResources() whisperContext = nil isModelLoaded = false @@ -575,6 +589,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate { } func dismissMiniRecorder() async { + logger.notice("๐Ÿ“ฑ Dismissing \(recorderType) recorder") shouldCancelRecording = true if isRecording { await recorder.stopRecording()