Add time logging in history

This commit is contained in:
Beingpax 2025-07-04 20:15:58 +05:45
parent 0faffa0155
commit 7bd2b29d16
7 changed files with 112 additions and 48 deletions

View File

@ -9,13 +9,21 @@ final class Transcription {
var timestamp: Date
var duration: TimeInterval
var audioFileURL: String?
var transcriptionModelName: String?
var aiEnhancementModelName: String?
var transcriptionDuration: TimeInterval?
var enhancementDuration: TimeInterval?
init(text: String, duration: TimeInterval, enhancedText: String? = nil, audioFileURL: String? = nil) {
init(text: String, duration: TimeInterval, enhancedText: String? = nil, audioFileURL: String? = nil, transcriptionModelName: String? = nil, aiEnhancementModelName: String? = nil, transcriptionDuration: TimeInterval? = nil, enhancementDuration: TimeInterval? = nil) {
self.id = UUID()
self.text = text
self.enhancedText = enhancedText
self.timestamp = Date()
self.duration = duration
self.audioFileURL = audioFileURL
self.transcriptionModelName = transcriptionModelName
self.aiEnhancementModelName = aiEnhancementModelName
self.transcriptionDuration = transcriptionDuration
self.enhancementDuration = enhancementDuration
}
}

View File

@ -392,14 +392,16 @@ class AIEnhancementService: ObservableObject {
}
}
func enhance(_ text: String) async throws -> String {
func enhance(_ text: String) async throws -> (String, TimeInterval) {
let startTime = Date()
let enhancementPrompt: EnhancementPrompt = .transcriptionEnhancement
var retryCount = 0
while retryCount < maxRetries {
do {
let result = try await makeRequest(text: text, mode: enhancementPrompt, retryCount: retryCount)
return result
let duration = Date().timeIntervalSince(startTime)
return (result, duration)
} catch let error as EnhancementError {
if shouldRetry(error: error, retryCount: retryCount) {
retryCount += 1

View File

@ -11,7 +11,6 @@ class AudioTranscriptionManager: ObservableObject {
@Published var isProcessing = false
@Published var processingPhase: ProcessingPhase = .idle
@Published var currentTranscription: Transcription?
@Published var messageLog: String = ""
@Published var errorMessage: String?
private var currentTask: Task<Void, Error>?
@ -57,7 +56,6 @@ class AudioTranscriptionManager: ObservableObject {
isProcessing = true
processingPhase = .loading
messageLog = ""
errorMessage = nil
currentTask = Task {
@ -92,6 +90,7 @@ class AudioTranscriptionManager: ObservableObject {
// Transcribe using appropriate service
processingPhase = .transcribing
let transcriptionStart = Date()
var text: String
switch currentModel.provider {
@ -103,6 +102,7 @@ class AudioTranscriptionManager: ObservableObject {
text = try await cloudTranscriptionService.transcribe(audioURL: permanentURL, model: currentModel)
}
let transcriptionDuration = Date().timeIntervalSince(transcriptionStart)
text = text.trimmingCharacters(in: .whitespacesAndNewlines)
// Apply word replacements if enabled
@ -116,12 +116,16 @@ class AudioTranscriptionManager: ObservableObject {
enhancementService.isConfigured {
processingPhase = .enhancing
do {
let enhancedText = try await enhancementService.enhance(text)
let (enhancedText, enhancementDuration) = try await enhancementService.enhance(text)
let transcription = Transcription(
text: text,
duration: duration,
enhancedText: enhancedText,
audioFileURL: permanentURL.absoluteString
audioFileURL: permanentURL.absoluteString,
transcriptionModelName: currentModel.displayName,
aiEnhancementModelName: enhancementService.getAIService()?.currentModel,
transcriptionDuration: transcriptionDuration,
enhancementDuration: enhancementDuration
)
modelContext.insert(transcription)
try modelContext.save()
@ -131,7 +135,9 @@ class AudioTranscriptionManager: ObservableObject {
let transcription = Transcription(
text: text,
duration: duration,
audioFileURL: permanentURL.absoluteString
audioFileURL: permanentURL.absoluteString,
transcriptionModelName: currentModel.displayName,
transcriptionDuration: transcriptionDuration
)
modelContext.insert(transcription)
try modelContext.save()
@ -141,7 +147,9 @@ class AudioTranscriptionManager: ObservableObject {
let transcription = Transcription(
text: text,
duration: duration,
audioFileURL: permanentURL.absoluteString
audioFileURL: permanentURL.absoluteString,
transcriptionModelName: currentModel.displayName,
transcriptionDuration: transcriptionDuration
)
modelContext.insert(transcription)
try modelContext.save()

View File

@ -7,7 +7,6 @@ import os
@MainActor
class AudioTranscriptionService: ObservableObject {
@Published var isTranscribing = false
@Published var messageLog = ""
@Published var currentError: TranscriptionError?
private let modelContext: ModelContext
@ -41,35 +40,28 @@ class AudioTranscriptionService: ObservableObject {
await MainActor.run {
isTranscribing = true
messageLog = "Starting retranscription...\n"
}
do {
// Delegate transcription to appropriate service
let transcriptionStart = Date()
var text: String
switch model.provider {
case .local:
messageLog += "Using local transcription service...\n"
text = try await localTranscriptionService.transcribe(audioURL: url, model: model)
messageLog += "Local transcription completed.\n"
case .nativeApple:
messageLog += "Using Native Apple transcription service...\n"
text = try await nativeAppleTranscriptionService.transcribe(audioURL: url, model: model)
messageLog += "Native Apple transcription completed.\n"
default: // Cloud models
messageLog += "Using cloud transcription service...\n"
text = try await cloudTranscriptionService.transcribe(audioURL: url, model: model)
messageLog += "Cloud transcription completed.\n"
}
// Common post-processing for both local and cloud transcriptions
let transcriptionDuration = Date().timeIntervalSince(transcriptionStart)
text = text.trimmingCharacters(in: .whitespacesAndNewlines)
// Apply word replacements if enabled
if UserDefaults.standard.bool(forKey: "IsWordReplacementEnabled") {
text = WordReplacementService.shared.applyReplacements(to: text)
messageLog += "Word replacements applied.\n"
logger.notice("✅ Word replacements applied")
}
@ -89,7 +81,6 @@ class AudioTranscriptionService: ObservableObject {
try FileManager.default.copyItem(at: url, to: permanentURL)
} catch {
logger.error("❌ Failed to create permanent copy of audio: \(error.localizedDescription)")
messageLog += "Failed to create permanent copy of audio: \(error.localizedDescription)\n"
isTranscribing = false
throw error
}
@ -101,48 +92,47 @@ class AudioTranscriptionService: ObservableObject {
enhancementService.isEnhancementEnabled,
enhancementService.isConfigured {
do {
messageLog += "Enhancing transcription with AI...\n"
let enhancedText = try await enhancementService.enhance(text)
messageLog += "Enhancement completed.\n"
let (enhancedText, enhancementDuration) = try await enhancementService.enhance(text)
let newTranscription = Transcription(
text: text,
duration: duration,
enhancedText: enhancedText,
audioFileURL: permanentURLString
audioFileURL: permanentURLString,
transcriptionModelName: model.displayName,
aiEnhancementModelName: enhancementService.getAIService()?.currentModel,
transcriptionDuration: transcriptionDuration,
enhancementDuration: enhancementDuration
)
modelContext.insert(newTranscription)
do {
try modelContext.save()
} catch {
logger.error("❌ Failed to save transcription: \(error.localizedDescription)")
messageLog += "Failed to save transcription: \(error.localizedDescription)\n"
}
await MainActor.run {
isTranscribing = false
messageLog += "Done: \(enhancedText)\n"
}
return newTranscription
} catch {
messageLog += "Enhancement failed: \(error.localizedDescription). Using original transcription.\n"
let newTranscription = Transcription(
text: text,
duration: duration,
audioFileURL: permanentURLString
audioFileURL: permanentURLString,
transcriptionModelName: model.displayName,
transcriptionDuration: transcriptionDuration
)
modelContext.insert(newTranscription)
do {
try modelContext.save()
} catch {
logger.error("❌ Failed to save transcription: \(error.localizedDescription)")
messageLog += "Failed to save transcription: \(error.localizedDescription)\n"
}
await MainActor.run {
isTranscribing = false
messageLog += "Done: \(text)\n"
}
return newTranscription
@ -151,26 +141,25 @@ class AudioTranscriptionService: ObservableObject {
let newTranscription = Transcription(
text: text,
duration: duration,
audioFileURL: permanentURLString
audioFileURL: permanentURLString,
transcriptionModelName: model.displayName,
transcriptionDuration: transcriptionDuration
)
modelContext.insert(newTranscription)
do {
try modelContext.save()
} catch {
logger.error("❌ Failed to save transcription: \(error.localizedDescription)")
messageLog += "Failed to save transcription: \(error.localizedDescription)\n"
}
await MainActor.run {
isTranscribing = false
messageLog += "Done: \(text)\n"
}
return newTranscription
}
} catch {
logger.error("❌ Transcription failed: \(error.localizedDescription)")
messageLog += "Transcription failed: \(error.localizedDescription)\n"
currentError = .transcriptionFailed
isTranscribing = false
throw error

View File

@ -259,10 +259,6 @@ struct AudioTranscribeView: View {
.scaleEffect(0.8)
Text(transcriptionManager.processingPhase.message)
.font(.headline)
Text(transcriptionManager.messageLog)
.font(.caption)
.foregroundColor(.secondary)
.multilineTextAlignment(.center)
}
.padding()
}

View File

@ -26,7 +26,7 @@ struct TranscriptionCard: View {
.foregroundColor(.secondary)
Spacer()
Text(formatDuration(transcription.duration))
Text(formatTiming(transcription.duration))
.font(.system(size: 14, weight: .medium, design: .default))
.padding(.horizontal, 8)
.padding(.vertical, 4)
@ -85,6 +85,28 @@ struct TranscriptionCard: View {
.padding(.vertical, 8)
AudioPlayerView(url: url)
}
// Metadata section (when expanded)
if isExpanded && hasMetadata {
Divider()
.padding(.vertical, 8)
VStack(alignment: .leading, spacing: 10) {
metadataRow(icon: "hourglass", label: "Audio Duration", value: formatTiming(transcription.duration))
if let modelName = transcription.transcriptionModelName {
metadataRow(icon: "cpu.fill", label: "Transcription Model", value: modelName)
}
if let aiModel = transcription.aiEnhancementModelName {
metadataRow(icon: "sparkles", label: "Enhancement Model", value: aiModel)
}
if let duration = transcription.transcriptionDuration {
metadataRow(icon: "clock.fill", label: "Transcription Time", value: formatTiming(duration))
}
if let duration = transcription.enhancementDuration {
metadataRow(icon: "clock.fill", label: "Enhancement Time", value: formatTiming(duration))
}
}
}
}
}
.padding(16)
@ -114,9 +136,39 @@ struct TranscriptionCard: View {
}
}
private func formatDuration(_ duration: TimeInterval) -> String {
private var hasMetadata: Bool {
transcription.transcriptionModelName != nil ||
transcription.aiEnhancementModelName != nil ||
transcription.transcriptionDuration != nil ||
transcription.enhancementDuration != nil
}
private func formatTiming(_ duration: TimeInterval) -> String {
if duration < 1 {
return String(format: "%.0fms", duration * 1000)
}
if duration < 60 {
return String(format: "%.1fs", duration)
}
let minutes = Int(duration) / 60
let seconds = Int(duration) % 60
return String(format: "%d:%02d", minutes, seconds)
let seconds = duration.truncatingRemainder(dividingBy: 60)
return String(format: "%dm %.0fs", minutes, seconds)
}
private func metadataRow(icon: String, label: String, value: String) -> some View {
HStack(spacing: 12) {
Image(systemName: icon)
.font(.system(size: 13, weight: .medium))
.foregroundColor(.secondary)
.frame(width: 20, alignment: .center)
Text(label)
.font(.system(size: 13, weight: .medium))
.foregroundColor(.primary)
Spacer()
Text(value)
.font(.system(size: 13, weight: .semibold))
.foregroundColor(.secondary)
}
}
}

View File

@ -301,7 +301,10 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
transcriptionService = cloudTranscriptionService
}
let transcriptionStart = Date()
var text = try await transcriptionService.transcribe(audioURL: url, model: model)
let transcriptionDuration = Date().timeIntervalSince(transcriptionStart)
text = text.trimmingCharacters(in: .whitespacesAndNewlines)
if UserDefaults.standard.bool(forKey: "IsWordReplacementEnabled") {
@ -325,28 +328,32 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
do {
if shouldCancelRecording { return }
let textForAI = promptDetectionResult?.processedText ?? text
let enhancedText = try await enhancementService.enhance(textForAI)
let (enhancedText, enhancementDuration) = try await enhancementService.enhance(textForAI)
let newTranscription = Transcription(
text: originalText,
duration: actualDuration,
enhancedText: enhancedText,
audioFileURL: permanentURL?.absoluteString
audioFileURL: permanentURL?.absoluteString,
transcriptionModelName: model.displayName,
aiEnhancementModelName: enhancementService.getAIService()?.currentModel,
transcriptionDuration: transcriptionDuration,
enhancementDuration: enhancementDuration
)
modelContext.insert(newTranscription)
try? modelContext.save()
text = enhancedText
} catch {
// Enhancement failed - save error in enhancedText field and show notification
let newTranscription = Transcription(
text: originalText,
duration: actualDuration,
enhancedText: "Enhancement failed: \(error.localizedDescription)",
audioFileURL: permanentURL?.absoluteString
audioFileURL: permanentURL?.absoluteString,
transcriptionModelName: model.displayName,
transcriptionDuration: transcriptionDuration
)
modelContext.insert(newTranscription)
try? modelContext.save()
// Show notification about enhancement failure
await MainActor.run {
NotificationManager.shared.showNotification(
title: "AI enhancement failed",
@ -358,7 +365,9 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
let newTranscription = Transcription(
text: originalText,
duration: actualDuration,
audioFileURL: permanentURL?.absoluteString
audioFileURL: permanentURL?.absoluteString,
transcriptionModelName: model.displayName,
transcriptionDuration: transcriptionDuration
)
modelContext.insert(newTranscription)
try? modelContext.save()