Add structured logging with emojis

This commit is contained in:
Beingpax 2025-03-14 11:20:20 +05:45
parent dffa4056e2
commit b4e30b2988
2 changed files with 30 additions and 19 deletions

View File

@ -4,6 +4,7 @@ import whisper
#else
#error("Unable to import whisper module. Please check your project configuration.")
#endif
import os
enum WhisperError: Error {
case couldNotInitializeContext
@ -15,6 +16,7 @@ actor WhisperContext {
private var languageCString: [CChar]?
private var prompt: String?
private var promptCString: [CChar]?
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "WhisperContext")
private init() {
// Private initializer without context
@ -35,7 +37,6 @@ actor WhisperContext {
// Leave 2 processors free (i.e. the high-efficiency cores).
let maxThreads = max(1, min(8, cpuCount() - 2))
print("Selecting \(maxThreads) threads")
var params = whisper_full_default_params(WHISPER_SAMPLING_GREEDY)
// Read language directly from UserDefaults
@ -45,11 +46,11 @@ actor WhisperContext {
params.language = languageCString?.withUnsafeBufferPointer { ptr in
ptr.baseAddress
}
print("Setting language to: \(selectedLanguage)")
logger.notice("🌐 Using language: \(selectedLanguage)")
} else {
languageCString = nil
params.language = nil
print("Using auto-detection")
logger.notice("🌐 Using auto language detection")
}
// Only use prompt for English language
@ -58,15 +59,10 @@ actor WhisperContext {
params.initial_prompt = promptCString?.withUnsafeBufferPointer { ptr in
ptr.baseAddress
}
print("Using prompt for English transcription: \(prompt!)")
logger.notice("💬 Using prompt for transcription")
} else {
promptCString = nil
params.initial_prompt = nil
if selectedLanguage == "en" {
print("No prompt set for English")
} else {
print("Prompt disabled for non-English language")
}
}
// Adapted from whisper.objc
@ -85,15 +81,15 @@ actor WhisperContext {
params.suppress_nst = true // Additional suppression of non-speech tokens
whisper_reset_timings(context)
print("About to run whisper_full")
logger.notice("⚙️ Starting whisper transcription")
samples.withUnsafeBufferPointer { samples in
if (whisper_full(context, params, samples.baseAddress, Int32(samples.count)) != 0) {
print("Failed to run the model")
logger.error("❌ Failed to run whisper model")
} else {
// Print detected language info before timings
let langId = whisper_full_lang_id(context)
let detectedLang = String(cString: whisper_lang_str(langId))
print("Transcription completed - Selected: \(selectedLanguage), Used: \(detectedLang)")
logger.notice("✅ Transcription completed - Language: \(detectedLang)")
whisper_print_timings(context)
}
}
@ -125,14 +121,14 @@ actor WhisperContext {
var params = whisper_context_default_params()
#if targetEnvironment(simulator)
params.use_gpu = false
print("Running on the simulator, using CPU")
logger.notice("🖥️ Running on simulator, using CPU")
#endif
let context = whisper_init_from_file_with_params(path, params)
if let context {
self.context = context
} else {
print("Couldn't load model at \(path)")
logger.error("Couldn't load model at \(path)")
throw WhisperError.couldNotInitializeContext
}
}
@ -147,7 +143,7 @@ actor WhisperContext {
func setPrompt(_ prompt: String?) {
self.prompt = prompt
print("Prompt set to: \(prompt ?? "none")")
logger.debug("💬 Prompt set: \(prompt ?? "none")")
}
}

View File

@ -90,7 +90,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
let savedModel = availableModels.first(where: { $0.name == savedModelName }) {
currentModel = savedModel
}
Task {
await migrateModelsIfNeeded()
}
@ -147,6 +146,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
private func loadModel(_ model: WhisperModel) async throws {
guard whisperContext == nil else { return }
logger.notice("🔄 Loading Whisper model: \(model.name)")
isModelLoading = true
defer { isModelLoading = false }
@ -154,7 +154,9 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
whisperContext = try await WhisperContext.createContext(path: model.url.path)
isModelLoaded = true
currentModel = model
logger.notice("✅ Successfully loaded model: \(model.name)")
} catch {
logger.error("❌ Failed to load model: \(model.name) - \(error.localizedDescription)")
throw WhisperStateError.modelLoadFailed
}
}
@ -172,14 +174,18 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
func toggleRecord() async {
if isRecording {
logger.notice("🛑 Stopping recording")
await recorder.stopRecording()
isRecording = false
isVisualizerActive = false
if let recordedFile {
let duration = Date().timeIntervalSince(transcriptionStartTime ?? Date())
await transcribeAudio(recordedFile, duration: duration)
} else {
logger.error("❌ No recorded file found after stopping recording")
}
} else {
logger.notice("🎙️ Starting recording")
requestRecordPermission { [self] granted in
if granted {
Task {
@ -216,9 +222,11 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
private func performBackgroundTasks() async {
if let currentModel = self.currentModel, self.whisperContext == nil {
logger.notice("🔄 Preloading model in background: \(currentModel.name)")
do {
try await self.loadModel(currentModel)
} catch {
logger.error("❌ Background model preloading failed: \(error.localizedDescription)")
await MainActor.run {
self.messageLog += "Error preloading model: \(error.localizedDescription)\n"
}
@ -274,6 +282,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
func downloadModel(_ model: PredefinedModel) async {
guard let url = URL(string: model.downloadURL) else { return }
logger.notice("🔽 Downloading model: \(model.name)")
do {
let (data, response) = try await withCheckedThrowingContinuation { (continuation: CheckedContinuation<(Data, URLResponse), Error>) in
let task = URLSession.shared.dataTask(with: url) { data, response, error in
@ -312,7 +321,9 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
availableModels.append(WhisperModel(name: model.name, url: destinationURL))
self.downloadProgress.removeValue(forKey: model.name)
logger.notice("✅ Successfully downloaded model: \(model.name)")
} catch {
logger.error("❌ Failed to download model: \(model.name) - \(error.localizedDescription)")
currentError = .modelDownloadFailed
self.downloadProgress.removeValue(forKey: model.name)
}
@ -322,17 +333,20 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
if shouldCancelRecording { return }
guard let currentModel = currentModel else {
logger.error("❌ Cannot transcribe: No model selected")
messageLog += "Cannot transcribe: No model selected.\n"
currentError = .modelLoadFailed
return
}
guard let whisperContext = whisperContext else {
logger.error("❌ Cannot transcribe: Model not loaded")
messageLog += "Cannot transcribe: Model not loaded.\n"
currentError = .modelLoadFailed
return
}
logger.notice("🔄 Starting transcription with model: \(currentModel.name)")
do {
isProcessing = true
isTranscribing = true
@ -372,6 +386,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
var text = await whisperContext.getTranscription()
text = text.trimmingCharacters(in: .whitespacesAndNewlines)
logger.notice("✅ Transcription completed successfully, length: \(text.count) characters")
if let enhancementService = enhancementService,
enhancementService.isEnhancementEnabled,
@ -482,7 +497,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
canTranscribe = false
}
} catch {
print("Error deleting model: \(error.localizedDescription)")
messageLog += "Error deleting model: \(error.localizedDescription)\n"
currentError = .modelDeletionFailed
}
@ -524,7 +538,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
}
private func showRecorderPanel() {
logger.info("Showing recorder panel, type: \(self.recorderType)")
logger.notice("📱 Showing \(recorderType) recorder")
if recorderType == "notch" {
if notchWindowManager == nil {
notchWindowManager = NotchWindowManager(whisperState: self, recorder: recorder)
@ -538,7 +552,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
}
miniWindowManager?.show()
}
logger.info("Recorder panel shown successfully")
}
private func hideRecorderPanel() {
@ -568,6 +581,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
private func cleanupResources() async {
if !isRecording && !isProcessing {
logger.notice("🧹 Cleaning up Whisper resources")
await whisperContext?.releaseResources()
whisperContext = nil
isModelLoaded = false
@ -575,6 +589,7 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
}
func dismissMiniRecorder() async {
logger.notice("📱 Dismissing \(recorderType) recorder")
shouldCancelRecording = true
if isRecording {
await recorder.stopRecording()