feat: Enhance audio visualization and recording integration - Improved audio meter visualization, removed debug logs, optimized updates
This commit is contained in:
parent
e45112cfd7
commit
57e5d456a6
@ -483,7 +483,7 @@
|
||||
"@executable_path/../Frameworks",
|
||||
);
|
||||
MACOSX_DEPLOYMENT_TARGET = 14.0;
|
||||
MARKETING_VERSION = 0.96;
|
||||
MARKETING_VERSION = 0.97;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.prakashjoshipax.VoiceInk;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SWIFT_EMIT_LOC_STRINGS = YES;
|
||||
@ -516,7 +516,7 @@
|
||||
"@executable_path/../Frameworks",
|
||||
);
|
||||
MACOSX_DEPLOYMENT_TARGET = 14.0;
|
||||
MARKETING_VERSION = 0.96;
|
||||
MARKETING_VERSION = 0.97;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.prakashjoshipax.VoiceInk;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SWIFT_EMIT_LOC_STRINGS = YES;
|
||||
|
||||
@ -1,236 +0,0 @@
|
||||
import Foundation
|
||||
import AVFoundation
|
||||
import CoreAudio
|
||||
import os
|
||||
|
||||
class AudioEngine: ObservableObject {
|
||||
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "AudioEngine")
|
||||
private lazy var engine = AVAudioEngine()
|
||||
private lazy var mixer = AVAudioMixerNode()
|
||||
@Published var isRunning = false
|
||||
@Published var audioLevel: CGFloat = 0.0
|
||||
|
||||
private var lastUpdateTime: TimeInterval = 0
|
||||
private var inputTap: Any?
|
||||
private let updateInterval: TimeInterval = 0.05
|
||||
private let deviceManager = AudioDeviceManager.shared
|
||||
private var deviceObserver: NSObjectProtocol?
|
||||
private var isConfiguring = false
|
||||
|
||||
init() {
|
||||
setupDeviceChangeObserver()
|
||||
}
|
||||
|
||||
private func setupDeviceChangeObserver() {
|
||||
deviceObserver = AudioDeviceConfiguration.createDeviceChangeObserver { [weak self] in
|
||||
guard let self = self else { return }
|
||||
if self.isRunning {
|
||||
self.handleDeviceChange()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func handleDeviceChange() {
|
||||
guard !isConfiguring else {
|
||||
logger.warning("Device change already in progress, skipping")
|
||||
return
|
||||
}
|
||||
|
||||
isConfiguring = true
|
||||
logger.info("Handling device change - Current engine state: \(self.isRunning ? "Running" : "Stopped")")
|
||||
|
||||
// Stop the engine first
|
||||
stopAudioEngine()
|
||||
|
||||
// Log device change details
|
||||
let currentDeviceID = deviceManager.getCurrentDevice()
|
||||
if let deviceName = deviceManager.getDeviceName(deviceID: currentDeviceID) {
|
||||
logger.info("Switching to device: \(deviceName) (ID: \(currentDeviceID))")
|
||||
}
|
||||
|
||||
// Wait a bit for the system to process the device change
|
||||
DispatchQueue.main.asyncAfter(deadline: .now() + 0.1) { [weak self] in
|
||||
guard let self = self else { return }
|
||||
|
||||
// Try to start with new device
|
||||
self.startAudioEngine()
|
||||
self.isConfiguring = false
|
||||
logger.info("Device change handling completed")
|
||||
}
|
||||
}
|
||||
|
||||
private func setupAudioEngine() {
|
||||
guard inputTap == nil else { return }
|
||||
|
||||
let bus = 0
|
||||
|
||||
// Get the current device (either selected or fallback)
|
||||
let currentDeviceID = deviceManager.getCurrentDevice()
|
||||
|
||||
if currentDeviceID != 0 {
|
||||
do {
|
||||
logger.info("Setting up audio engine with device ID: \(currentDeviceID)")
|
||||
// Log the device type (helps identify Bluetooth devices)
|
||||
if let deviceName = deviceManager.getDeviceName(deviceID: currentDeviceID) {
|
||||
let isBluetoothDevice = deviceName.lowercased().contains("bluetooth")
|
||||
logger.info("Device type: \(isBluetoothDevice ? "Bluetooth" : "Standard") - \(deviceName)")
|
||||
}
|
||||
|
||||
try configureAudioSession(with: currentDeviceID)
|
||||
} catch {
|
||||
logger.error("Audio engine setup failed: \(error.localizedDescription)")
|
||||
logger.error("Device ID: \(currentDeviceID)")
|
||||
if let deviceName = deviceManager.getDeviceName(deviceID: currentDeviceID) {
|
||||
logger.error("Failed device name: \(deviceName)")
|
||||
}
|
||||
// Don't return here, let it try with default device
|
||||
}
|
||||
} else {
|
||||
logger.info("No specific device available, using system default")
|
||||
}
|
||||
|
||||
// Wait briefly for device configuration to take effect
|
||||
Thread.sleep(forTimeInterval: 0.05)
|
||||
|
||||
// Log input format details
|
||||
let inputFormat = engine.inputNode.inputFormat(forBus: bus)
|
||||
logger.info("""
|
||||
Input format details:
|
||||
- Sample Rate: \(inputFormat.sampleRate)
|
||||
- Channel Count: \(inputFormat.channelCount)
|
||||
- Common Format: \(inputFormat.commonFormat.rawValue)
|
||||
|
||||
- Channel Layout: \(inputFormat.channelLayout?.layoutTag ?? 0)
|
||||
""")
|
||||
|
||||
inputTap = engine.inputNode.installTap(onBus: bus, bufferSize: 1024, format: inputFormat) { [weak self] (buffer, time) in
|
||||
self?.processAudioBuffer(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
private func configureAudioSession(with deviceID: AudioDeviceID) throws {
|
||||
logger.info("Starting audio session configuration for device ID: \(deviceID)")
|
||||
// Get the audio format from the selected device
|
||||
let streamFormat = try AudioDeviceConfiguration.configureAudioSession(with: deviceID)
|
||||
logger.info("Got stream format: \(streamFormat.mSampleRate)Hz, \(streamFormat.mChannelsPerFrame) channels")
|
||||
|
||||
// Configure the input node to use the selected device
|
||||
let inputNode = engine.inputNode
|
||||
guard let audioUnit = inputNode.audioUnit else {
|
||||
logger.error("Failed to get audio unit from input node")
|
||||
throw AudioConfigurationError.failedToGetAudioUnit
|
||||
}
|
||||
logger.info("Got audio unit from input node")
|
||||
|
||||
// Set the device for the audio unit
|
||||
try AudioDeviceConfiguration.configureAudioUnit(audioUnit, with: deviceID)
|
||||
logger.info("Configured audio unit with device")
|
||||
|
||||
// Reset the engine to apply the new configuration
|
||||
engine.stop()
|
||||
try engine.reset()
|
||||
logger.info("Reset audio engine")
|
||||
|
||||
// Use async dispatch instead of thread sleep
|
||||
DispatchQueue.global().async {
|
||||
Thread.sleep(forTimeInterval: 0.05)
|
||||
self.logger.info("Audio configuration delay completed")
|
||||
}
|
||||
}
|
||||
|
||||
func startAudioEngine() {
|
||||
guard !isRunning else { return }
|
||||
|
||||
logger.info("Starting audio engine")
|
||||
|
||||
do {
|
||||
setupAudioEngine()
|
||||
logger.info("Audio engine setup completed")
|
||||
|
||||
try engine.prepare()
|
||||
logger.info("Audio engine prepared")
|
||||
|
||||
try engine.start()
|
||||
isRunning = true
|
||||
|
||||
// Log active device and configuration details
|
||||
let currentDeviceID = deviceManager.getCurrentDevice()
|
||||
if let deviceName = deviceManager.getDeviceName(deviceID: currentDeviceID) {
|
||||
let isBluetoothDevice = deviceName.lowercased().contains("bluetooth")
|
||||
logger.info("""
|
||||
Audio engine started successfully:
|
||||
- Device: \(deviceName)
|
||||
- Device ID: \(currentDeviceID)
|
||||
- Device Type: \(isBluetoothDevice ? "Bluetooth" : "Standard")
|
||||
- Engine Status: Running
|
||||
""")
|
||||
}
|
||||
} catch {
|
||||
logger.error("""
|
||||
Audio engine start failed:
|
||||
- Error: \(error.localizedDescription)
|
||||
- Error Details: \(error)
|
||||
- Current Device ID: \(self.deviceManager.getCurrentDevice())
|
||||
- Engine State: \(self.engine.isRunning ? "Running" : "Stopped")
|
||||
""")
|
||||
// Clean up on failure
|
||||
stopAudioEngine()
|
||||
}
|
||||
}
|
||||
|
||||
func stopAudioEngine() {
|
||||
guard isRunning else { return }
|
||||
|
||||
logger.info("Stopping audio engine")
|
||||
if let tap = inputTap {
|
||||
engine.inputNode.removeTap(onBus: 0)
|
||||
inputTap = nil
|
||||
}
|
||||
|
||||
engine.stop()
|
||||
|
||||
// Complete cleanup of the engine
|
||||
engine = AVAudioEngine() // Create a fresh instance
|
||||
mixer = AVAudioMixerNode() // Reset mixer
|
||||
|
||||
isRunning = false
|
||||
audioLevel = 0.0
|
||||
logger.info("Audio engine stopped and reset")
|
||||
}
|
||||
|
||||
private func processAudioBuffer(_ buffer: AVAudioPCMBuffer) {
|
||||
guard let channelData = buffer.floatChannelData?[0] else { return }
|
||||
let frameCount = buffer.frameLength
|
||||
|
||||
let currentTime = CACurrentMediaTime()
|
||||
guard currentTime - lastUpdateTime >= updateInterval else { return }
|
||||
lastUpdateTime = currentTime
|
||||
|
||||
// Use vDSP for faster processing
|
||||
var sum: Float = 0
|
||||
for frame in 0..<Int(frameCount) {
|
||||
let sample = abs(channelData[frame])
|
||||
sum += sample
|
||||
}
|
||||
|
||||
let average = sum / Float(frameCount)
|
||||
let level = CGFloat(average)
|
||||
|
||||
// Apply higher scaling for built-in microphone
|
||||
let currentDeviceID = deviceManager.getCurrentDevice()
|
||||
let isBuiltInMic = deviceManager.getDeviceName(deviceID: currentDeviceID)?.lowercased().contains("built-in") ?? false
|
||||
let scalingFactor: CGFloat = isBuiltInMic ? 11.0 : 5.0 // Higher scaling for built-in mic
|
||||
|
||||
DispatchQueue.main.async {
|
||||
self.audioLevel = min(max(level * scalingFactor, 0), 1)
|
||||
}
|
||||
}
|
||||
|
||||
deinit {
|
||||
if let observer = deviceObserver {
|
||||
NotificationCenter.default.removeObserver(observer)
|
||||
}
|
||||
stopAudioEngine()
|
||||
}
|
||||
}
|
||||
|
||||
@ -3,13 +3,16 @@ import AVFoundation
|
||||
import CoreAudio
|
||||
import os
|
||||
|
||||
actor Recorder {
|
||||
@MainActor // Change to MainActor since we need to interact with UI
|
||||
class Recorder: ObservableObject {
|
||||
private var recorder: AVAudioRecorder?
|
||||
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "Recorder")
|
||||
private let deviceManager = AudioDeviceManager.shared
|
||||
private var deviceObserver: NSObjectProtocol?
|
||||
private var isReconfiguring = false
|
||||
private let mediaController = MediaController.shared
|
||||
@Published var audioMeter = AudioMeter(averagePower: 0, peakPower: 0)
|
||||
private var levelMonitorTimer: Timer?
|
||||
|
||||
enum RecorderError: Error {
|
||||
case couldNotStartRecording
|
||||
@ -139,11 +142,13 @@ actor Recorder {
|
||||
logger.info("Initializing AVAudioRecorder with URL: \(url.path)")
|
||||
let recorder = try AVAudioRecorder(url: url, settings: recordSettings)
|
||||
recorder.delegate = delegate
|
||||
recorder.isMeteringEnabled = true // Enable metering
|
||||
|
||||
logger.info("Attempting to start recording...")
|
||||
if recorder.record() {
|
||||
logger.info("Recording started successfully")
|
||||
self.recorder = recorder
|
||||
startLevelMonitoring()
|
||||
} else {
|
||||
logger.error("Failed to start recording - recorder.record() returned false")
|
||||
logger.error("Current device ID: \(deviceID)")
|
||||
@ -170,6 +175,7 @@ actor Recorder {
|
||||
|
||||
func stopRecording() {
|
||||
logger.info("Stopping recording")
|
||||
stopLevelMonitoring()
|
||||
recorder?.stop()
|
||||
recorder?.delegate = nil // Remove delegate
|
||||
recorder = nil
|
||||
@ -186,10 +192,56 @@ actor Recorder {
|
||||
logger.info("Recording stopped successfully")
|
||||
}
|
||||
|
||||
private func startLevelMonitoring() {
|
||||
levelMonitorTimer = Timer.scheduledTimer(withTimeInterval: 0.05, repeats: true) { [weak self] _ in
|
||||
guard let self = self else { return }
|
||||
self.updateAudioLevel()
|
||||
}
|
||||
}
|
||||
|
||||
private func stopLevelMonitoring() {
|
||||
levelMonitorTimer?.invalidate()
|
||||
levelMonitorTimer = nil
|
||||
audioMeter = AudioMeter(averagePower: 0, peakPower: 0)
|
||||
}
|
||||
|
||||
private func updateAudioLevel() {
|
||||
guard let recorder = recorder else { return }
|
||||
recorder.updateMeters()
|
||||
|
||||
// Get the power values in decibels
|
||||
let averagePowerDb = recorder.averagePower(forChannel: 0)
|
||||
let peakPowerDb = recorder.peakPower(forChannel: 0)
|
||||
|
||||
// Convert from dB to linear scale using proper conversion
|
||||
let normalizedAverage = pow(10, Double(averagePowerDb) / 30)
|
||||
let normalizedPeak = pow(10, Double(peakPowerDb) / 30)
|
||||
|
||||
// Apply standard scaling factor for all devices
|
||||
let scalingFactor = 2.5
|
||||
|
||||
// Update the audio meter with scaled values
|
||||
let scaledAverage = min(normalizedAverage * scalingFactor, 1.0)
|
||||
let scaledPeak = min(normalizedPeak * scalingFactor, 1.0)
|
||||
|
||||
audioMeter = AudioMeter(
|
||||
averagePower: scaledAverage,
|
||||
peakPower: scaledPeak
|
||||
)
|
||||
}
|
||||
|
||||
deinit {
|
||||
logger.info("Deinitializing Recorder")
|
||||
if let observer = deviceObserver {
|
||||
NotificationCenter.default.removeObserver(observer)
|
||||
}
|
||||
Task { @MainActor in
|
||||
stopLevelMonitoring()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct AudioMeter: Equatable {
|
||||
let averagePower: Double
|
||||
let peakPower: Double
|
||||
}
|
||||
|
||||
@ -2,7 +2,7 @@ import SwiftUI
|
||||
|
||||
struct MiniRecorderView: View {
|
||||
@ObservedObject var whisperState: WhisperState
|
||||
@ObservedObject var audioEngine: AudioEngine
|
||||
@ObservedObject var recorder: Recorder
|
||||
@EnvironmentObject var windowManager: MiniWindowManager
|
||||
@State private var showPromptPopover = false
|
||||
|
||||
@ -84,7 +84,7 @@ struct MiniRecorderView: View {
|
||||
NotchStaticVisualizer(color: .white)
|
||||
} else {
|
||||
NotchAudioVisualizer(
|
||||
audioLevel: audioEngine.audioLevel,
|
||||
audioMeter: recorder.audioMeter,
|
||||
color: .white,
|
||||
isActive: whisperState.isRecording
|
||||
)
|
||||
|
||||
@ -6,11 +6,11 @@ class MiniWindowManager: ObservableObject {
|
||||
private var windowController: NSWindowController?
|
||||
private var miniPanel: MiniRecorderPanel?
|
||||
private let whisperState: WhisperState
|
||||
private let audioEngine: AudioEngine
|
||||
private let recorder: Recorder
|
||||
|
||||
init(whisperState: WhisperState, audioEngine: AudioEngine) {
|
||||
init(whisperState: WhisperState, recorder: Recorder) {
|
||||
self.whisperState = whisperState
|
||||
self.audioEngine = audioEngine
|
||||
self.recorder = recorder
|
||||
|
||||
NotificationCenter.default.addObserver(
|
||||
self,
|
||||
@ -55,7 +55,7 @@ class MiniWindowManager: ObservableObject {
|
||||
let metrics = MiniRecorderPanel.calculateWindowMetrics()
|
||||
let panel = MiniRecorderPanel(contentRect: metrics)
|
||||
|
||||
let miniRecorderView = MiniRecorderView(whisperState: whisperState, audioEngine: audioEngine)
|
||||
let miniRecorderView = MiniRecorderView(whisperState: whisperState, recorder: recorder)
|
||||
.environmentObject(self)
|
||||
|
||||
let hostingController = NSHostingController(rootView: miniRecorderView)
|
||||
|
||||
@ -2,7 +2,7 @@ import SwiftUI
|
||||
|
||||
struct NotchRecorderView: View {
|
||||
@ObservedObject var whisperState: WhisperState
|
||||
@ObservedObject var audioEngine: AudioEngine
|
||||
@ObservedObject var recorder: Recorder
|
||||
@EnvironmentObject var windowManager: NotchWindowManager
|
||||
@State private var isHovering = false
|
||||
@State private var showPromptPopover = false
|
||||
@ -92,7 +92,7 @@ struct NotchRecorderView: View {
|
||||
NotchStaticVisualizer(color: .white)
|
||||
} else {
|
||||
NotchAudioVisualizer(
|
||||
audioLevel: audioEngine.audioLevel,
|
||||
audioMeter: recorder.audioMeter,
|
||||
color: .white,
|
||||
isActive: whisperState.isRecording
|
||||
)
|
||||
@ -266,7 +266,7 @@ struct NotchRecordButton: View {
|
||||
}
|
||||
|
||||
struct NotchAudioVisualizer: View {
|
||||
let audioLevel: CGFloat
|
||||
let audioMeter: AudioMeter
|
||||
let color: Color
|
||||
let isActive: Bool
|
||||
|
||||
@ -275,23 +275,33 @@ struct NotchAudioVisualizer: View {
|
||||
private let maxHeight: CGFloat = 18
|
||||
private let audioThreshold: CGFloat = 0.01
|
||||
|
||||
@State private var barHeights: [CGFloat]
|
||||
@State private var barHeights: [BarLevel] = []
|
||||
|
||||
init(audioLevel: CGFloat, color: Color, isActive: Bool) {
|
||||
self.audioLevel = audioLevel
|
||||
struct BarLevel {
|
||||
var average: CGFloat
|
||||
var peak: CGFloat
|
||||
}
|
||||
|
||||
init(audioMeter: AudioMeter, color: Color, isActive: Bool) {
|
||||
self.audioMeter = audioMeter
|
||||
self.color = color
|
||||
self.isActive = isActive
|
||||
_barHeights = State(initialValue: Array(repeating: minHeight, count: 5))
|
||||
_barHeights = State(initialValue: Array(repeating: BarLevel(average: minHeight, peak: minHeight), count: 5))
|
||||
}
|
||||
|
||||
var body: some View {
|
||||
HStack(spacing: 2) {
|
||||
ForEach(0..<barCount, id: \.self) { index in
|
||||
NotchVisualizerBar(height: barHeights[index], color: color)
|
||||
NotchVisualizerBar(
|
||||
averageHeight: barHeights[index].average,
|
||||
peakHeight: barHeights[index].peak,
|
||||
color: color
|
||||
)
|
||||
}
|
||||
}
|
||||
.onReceive(Timer.publish(every: 0.05, on: .main, in: .common).autoconnect()) { _ in
|
||||
if isActive && audioLevel > audioThreshold {
|
||||
.onChange(of: audioMeter) { newMeter in
|
||||
|
||||
if isActive {
|
||||
updateBars()
|
||||
} else {
|
||||
resetBars()
|
||||
@ -303,47 +313,70 @@ struct NotchAudioVisualizer: View {
|
||||
for i in 0..<barCount {
|
||||
let targetHeight = calculateTargetHeight(for: i)
|
||||
let speed = CGFloat.random(in: 0.4...0.8)
|
||||
barHeights[i] += (targetHeight - barHeights[i]) * speed
|
||||
|
||||
withAnimation(.spring(response: 0.2, dampingFraction: 0.7)) {
|
||||
barHeights[i].average += (targetHeight.average - barHeights[i].average) * speed
|
||||
barHeights[i].peak += (targetHeight.peak - barHeights[i].peak) * speed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func resetBars() {
|
||||
for i in 0..<barCount {
|
||||
barHeights[i] = minHeight
|
||||
withAnimation(.spring(response: 0.2, dampingFraction: 0.7)) {
|
||||
for i in 0..<barCount {
|
||||
barHeights[i].average = minHeight
|
||||
barHeights[i].peak = minHeight
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func calculateTargetHeight(for index: Int) -> CGFloat {
|
||||
let normalizedLevel = max(0, audioLevel - audioThreshold)
|
||||
let amplifiedLevel = pow(normalizedLevel, 0.6)
|
||||
let baseHeight = amplifiedLevel * maxHeight * 1.7
|
||||
let variation = CGFloat.random(in: -2...2)
|
||||
private func calculateTargetHeight(for index: Int) -> BarLevel {
|
||||
let positionFactor = CGFloat(index) / CGFloat(barCount - 1)
|
||||
let curve = sin(positionFactor * .pi)
|
||||
|
||||
return max(minHeight, min(baseHeight * curve + variation, maxHeight))
|
||||
let randomFactor = Double.random(in: 0.8...1.2)
|
||||
let averageBase = audioMeter.averagePower * randomFactor
|
||||
let peakBase = audioMeter.peakPower * randomFactor
|
||||
|
||||
let averageHeight = CGFloat(averageBase) * maxHeight * 1.7 * curve
|
||||
let peakHeight = CGFloat(peakBase) * maxHeight * 1.7 * curve
|
||||
|
||||
let finalAverage = max(minHeight, min(averageHeight, maxHeight))
|
||||
let finalPeak = max(minHeight, min(peakHeight, maxHeight))
|
||||
|
||||
|
||||
return BarLevel(
|
||||
average: finalAverage,
|
||||
peak: finalPeak
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
struct NotchVisualizerBar: View {
|
||||
let height: CGFloat
|
||||
let averageHeight: CGFloat
|
||||
let peakHeight: CGFloat
|
||||
let color: Color
|
||||
|
||||
var body: some View {
|
||||
RoundedRectangle(cornerRadius: 1.5)
|
||||
.fill(
|
||||
LinearGradient(
|
||||
gradient: Gradient(colors: [
|
||||
color.opacity(0.6),
|
||||
color.opacity(0.8),
|
||||
color
|
||||
]),
|
||||
startPoint: .bottom,
|
||||
endPoint: .top
|
||||
ZStack(alignment: .bottom) {
|
||||
// Average level bar
|
||||
RoundedRectangle(cornerRadius: 1.5)
|
||||
.fill(
|
||||
LinearGradient(
|
||||
gradient: Gradient(colors: [
|
||||
color.opacity(0.6),
|
||||
color.opacity(0.8),
|
||||
color
|
||||
]),
|
||||
startPoint: .bottom,
|
||||
endPoint: .top
|
||||
)
|
||||
)
|
||||
)
|
||||
.frame(width: 2, height: height)
|
||||
.animation(.spring(response: 0.2, dampingFraction: 0.7, blendDuration: 0), value: height)
|
||||
.frame(width: 2, height: averageHeight)
|
||||
|
||||
}
|
||||
.animation(.spring(response: 0.2, dampingFraction: 0.7, blendDuration: 0), value: averageHeight)
|
||||
.animation(.spring(response: 0.2, dampingFraction: 0.7, blendDuration: 0), value: peakHeight)
|
||||
}
|
||||
}
|
||||
|
||||
@ -355,7 +388,11 @@ struct NotchStaticVisualizer: View {
|
||||
var body: some View {
|
||||
HStack(spacing: 2) {
|
||||
ForEach(0..<barCount, id: \.self) { index in
|
||||
NotchVisualizerBar(height: barHeights[index] * 18, color: color)
|
||||
NotchVisualizerBar(
|
||||
averageHeight: barHeights[index] * 18,
|
||||
peakHeight: barHeights[index] * 18,
|
||||
color: color
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -377,4 +414,4 @@ struct ProcessingIndicator: View {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -6,11 +6,11 @@ class NotchWindowManager: ObservableObject {
|
||||
private var windowController: NSWindowController?
|
||||
private var notchPanel: NotchRecorderPanel?
|
||||
private let whisperState: WhisperState
|
||||
private let audioEngine: AudioEngine
|
||||
private let recorder: Recorder
|
||||
|
||||
init(whisperState: WhisperState, audioEngine: AudioEngine) {
|
||||
init(whisperState: WhisperState, recorder: Recorder) {
|
||||
self.whisperState = whisperState
|
||||
self.audioEngine = audioEngine
|
||||
self.recorder = recorder
|
||||
|
||||
NotificationCenter.default.addObserver(
|
||||
self,
|
||||
@ -58,8 +58,7 @@ class NotchWindowManager: ObservableObject {
|
||||
let metrics = NotchRecorderPanel.calculateWindowMetrics()
|
||||
let panel = NotchRecorderPanel(contentRect: metrics.frame)
|
||||
|
||||
// Create the NotchRecorderView and set it as the content
|
||||
let notchRecorderView = NotchRecorderView(whisperState: whisperState, audioEngine: audioEngine)
|
||||
let notchRecorderView = NotchRecorderView(whisperState: whisperState, recorder: recorder)
|
||||
.environmentObject(self)
|
||||
|
||||
let hostingController = NotchRecorderHostingController(rootView: notchRecorderView)
|
||||
@ -68,7 +67,6 @@ class NotchWindowManager: ObservableObject {
|
||||
self.notchPanel = panel
|
||||
self.windowController = NSWindowController(window: panel)
|
||||
|
||||
// Only use orderFrontRegardless to show without activating
|
||||
panel.orderFrontRegardless()
|
||||
}
|
||||
|
||||
|
||||
@ -1,54 +1,91 @@
|
||||
import SwiftUI
|
||||
|
||||
struct VisualizerView: View {
|
||||
@ObservedObject var audioEngine: AudioEngine
|
||||
@State private var levels: [CGFloat] = Array(repeating: 0, count: 50)
|
||||
private let smoothingFactor: CGFloat = 0.3
|
||||
@ObservedObject var recorder: Recorder
|
||||
private let barCount = 50
|
||||
@State private var levels: [BarLevel] = []
|
||||
private let smoothingFactor: Double = 0.3
|
||||
|
||||
struct BarLevel: Equatable {
|
||||
var average: CGFloat
|
||||
var peak: CGFloat
|
||||
}
|
||||
|
||||
var body: some View {
|
||||
GeometryReader { geometry in
|
||||
HStack(alignment: .center, spacing: 4) {
|
||||
ForEach(0..<levels.count, id: \.self) { index in
|
||||
VisualizerBar(level: levels[index])
|
||||
.frame(width: (geometry.size.width - CGFloat(levels.count - 1) * 2) / CGFloat(levels.count))
|
||||
ForEach(0..<barCount, id: \.self) { index in
|
||||
VisualizerBar(level: levels.isEmpty ? BarLevel(average: 0, peak: 0) : levels[index])
|
||||
.frame(width: (geometry.size.width - CGFloat(barCount - 1) * 4) / CGFloat(barCount))
|
||||
}
|
||||
}
|
||||
.frame(width: geometry.size.width, height: geometry.size.height)
|
||||
.background(Color.black.opacity(0.1))
|
||||
.cornerRadius(10)
|
||||
.onReceive(audioEngine.$audioLevel) { newLevel in
|
||||
updateLevels(with: newLevel)
|
||||
.onAppear {
|
||||
levels = Array(repeating: BarLevel(average: 0, peak: 0), count: barCount)
|
||||
}
|
||||
.onReceive(recorder.$audioMeter) { newMeter in
|
||||
updateLevels(with: newMeter)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func updateLevels(with newLevel: CGFloat) {
|
||||
// Apply smoothing to make transitions more natural
|
||||
for i in 0..<levels.count {
|
||||
let randomFactor = CGFloat.random(in: 0.8...1.2)
|
||||
let targetLevel = min(max(newLevel * randomFactor, 0), 1)
|
||||
let smoothedLevel = levels[i] + (targetLevel - levels[i]) * smoothingFactor
|
||||
private func updateLevels(with meter: AudioMeter) {
|
||||
// Create new levels with randomization for visual interest
|
||||
var newLevels: [BarLevel] = []
|
||||
for i in 0..<barCount {
|
||||
let randomFactor = Double.random(in: 0.8...1.2)
|
||||
let targetAverage = min(max(meter.averagePower * randomFactor, 0), 1)
|
||||
let targetPeak = min(max(meter.peakPower * randomFactor, 0), 1)
|
||||
|
||||
withAnimation(.easeInOut(duration: 0.15)) {
|
||||
levels[i] = smoothedLevel
|
||||
}
|
||||
let currentLevel = levels[i]
|
||||
let smoothedAverage = currentLevel.average + (CGFloat(targetAverage) - currentLevel.average) * CGFloat(smoothingFactor)
|
||||
let smoothedPeak = currentLevel.peak + (CGFloat(targetPeak) - currentLevel.peak) * CGFloat(smoothingFactor)
|
||||
|
||||
newLevels.append(BarLevel(
|
||||
average: smoothedAverage,
|
||||
peak: smoothedPeak
|
||||
))
|
||||
}
|
||||
|
||||
withAnimation(.easeInOut(duration: 0.15)) {
|
||||
levels = newLevels
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct VisualizerBar: View {
|
||||
let level: CGFloat
|
||||
let level: VisualizerView.BarLevel
|
||||
|
||||
var body: some View {
|
||||
GeometryReader { geometry in
|
||||
RoundedRectangle(cornerRadius: 2)
|
||||
.fill(
|
||||
LinearGradient(gradient: Gradient(colors: [.blue, .purple]),
|
||||
startPoint: .bottom,
|
||||
endPoint: .top)
|
||||
)
|
||||
.frame(height: level * geometry.size.height)
|
||||
.position(x: geometry.size.width / 2, y: geometry.size.height / 2)
|
||||
ZStack(alignment: .bottom) {
|
||||
// Average level bar
|
||||
RoundedRectangle(cornerRadius: 2)
|
||||
.fill(
|
||||
LinearGradient(
|
||||
gradient: Gradient(colors: [.blue.opacity(0.7), .purple.opacity(0.7)]),
|
||||
startPoint: .bottom,
|
||||
endPoint: .top
|
||||
)
|
||||
)
|
||||
.frame(height: level.average * geometry.size.height)
|
||||
|
||||
// Peak level indicator
|
||||
RoundedRectangle(cornerRadius: 2)
|
||||
.fill(
|
||||
LinearGradient(
|
||||
gradient: Gradient(colors: [.blue, .purple]),
|
||||
startPoint: .bottom,
|
||||
endPoint: .top
|
||||
)
|
||||
)
|
||||
.frame(height: 2)
|
||||
.offset(y: -level.peak * geometry.size.height + 1)
|
||||
.opacity(level.peak > 0.01 ? 1 : 0)
|
||||
}
|
||||
.frame(maxHeight: geometry.size.height, alignment: .bottom)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,7 +95,6 @@ struct VoiceInkApp: App {
|
||||
WindowManager.shared.configureWindow(window)
|
||||
})
|
||||
.onDisappear {
|
||||
whisperState.audioEngine.stopAudioEngine()
|
||||
whisperState.unloadModel()
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -58,15 +58,14 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
case couldNotLocateModel
|
||||
}
|
||||
|
||||
private let modelsDirectory: URL
|
||||
private let recordingsDirectory: URL
|
||||
private var transcriptionStartTime: Date?
|
||||
private var enhancementService: AIEnhancementService?
|
||||
let modelsDirectory: URL
|
||||
let recordingsDirectory: URL
|
||||
private let enhancementService: AIEnhancementService?
|
||||
private let licenseViewModel: LicenseViewModel
|
||||
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "WhisperState")
|
||||
private var transcriptionStartTime: Date?
|
||||
private var notchWindowManager: NotchWindowManager?
|
||||
private var miniWindowManager: MiniWindowManager?
|
||||
var audioEngine: AudioEngine
|
||||
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "WhisperState")
|
||||
|
||||
init(modelContext: ModelContext, enhancementService: AIEnhancementService? = nil) {
|
||||
self.modelContext = modelContext
|
||||
@ -74,7 +73,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
self.recordingsDirectory = FileManager.default.urls(for: .applicationSupportDirectory, in: .userDomainMask)[0]
|
||||
.appendingPathComponent("com.prakashjoshipax.VoiceInk")
|
||||
.appendingPathComponent("Recordings")
|
||||
self.audioEngine = AudioEngine()
|
||||
self.enhancementService = enhancementService
|
||||
self.licenseViewModel = LicenseViewModel()
|
||||
|
||||
@ -150,7 +148,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
await recorder.stopRecording()
|
||||
isRecording = false
|
||||
isVisualizerActive = false
|
||||
audioEngine.stopAudioEngine()
|
||||
if let recordedFile {
|
||||
let duration = Date().timeIntervalSince(transcriptionStartTime ?? Date())
|
||||
await transcribeAudio(recordedFile, duration: duration)
|
||||
@ -166,10 +163,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
create: true)
|
||||
.appending(path: "output.wav")
|
||||
|
||||
if !self.audioEngine.isRunning {
|
||||
self.audioEngine.startAudioEngine()
|
||||
}
|
||||
|
||||
try await self.recorder.startRecording(toOutputFile: file, delegate: self)
|
||||
|
||||
self.isRecording = true
|
||||
@ -192,7 +185,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
self.messageLog += "\(error.localizedDescription)\n"
|
||||
self.isRecording = false
|
||||
self.isVisualizerActive = false
|
||||
self.audioEngine.stopAudioEngine()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -479,30 +471,17 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
await toggleRecord()
|
||||
}
|
||||
} else {
|
||||
// Serialize audio operations to prevent deadlocks
|
||||
// Start recording first, then show UI
|
||||
Task {
|
||||
do {
|
||||
// First start the audio engine
|
||||
await MainActor.run {
|
||||
audioEngine.startAudioEngine()
|
||||
}
|
||||
|
||||
// Small delay to ensure audio system is ready
|
||||
try await Task.sleep(nanoseconds: 50_000_000) // 50ms
|
||||
|
||||
// Now play the sound
|
||||
SoundManager.shared.playStartSound()
|
||||
|
||||
// Show UI
|
||||
await MainActor.run {
|
||||
showRecorderPanel()
|
||||
isMiniRecorderVisible = true
|
||||
}
|
||||
|
||||
// Finally start recording
|
||||
await toggleRecord()
|
||||
} catch {
|
||||
logger.error("Error during recorder initialization: \(error)")
|
||||
// Start recording immediately
|
||||
await toggleRecord()
|
||||
|
||||
// Play sound and show UI after recording has started
|
||||
SoundManager.shared.playStartSound()
|
||||
|
||||
await MainActor.run {
|
||||
showRecorderPanel()
|
||||
isMiniRecorderVisible = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -512,25 +491,21 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
logger.info("Showing recorder panel, type: \(self.recorderType)")
|
||||
if recorderType == "notch" {
|
||||
if notchWindowManager == nil {
|
||||
notchWindowManager = NotchWindowManager(whisperState: self, audioEngine: audioEngine)
|
||||
notchWindowManager = NotchWindowManager(whisperState: self, recorder: recorder)
|
||||
logger.info("Created new notch window manager")
|
||||
}
|
||||
notchWindowManager?.show()
|
||||
} else {
|
||||
if miniWindowManager == nil {
|
||||
miniWindowManager = MiniWindowManager(whisperState: self, audioEngine: audioEngine)
|
||||
miniWindowManager = MiniWindowManager(whisperState: self, recorder: recorder)
|
||||
logger.info("Created new mini window manager")
|
||||
}
|
||||
miniWindowManager?.show()
|
||||
}
|
||||
// Audio engine is now started separately in handleToggleMiniRecorder
|
||||
// SoundManager.shared.playStartSound() - Moved to handleToggleMiniRecorder
|
||||
logger.info("Recorder panel shown successfully")
|
||||
}
|
||||
|
||||
private func hideRecorderPanel() {
|
||||
audioEngine.stopAudioEngine()
|
||||
|
||||
if isRecording {
|
||||
Task {
|
||||
await toggleRecord()
|
||||
@ -542,30 +517,20 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
if isMiniRecorderVisible {
|
||||
await dismissMiniRecorder()
|
||||
} else {
|
||||
// Start a parallel task for both UI and recording
|
||||
Task {
|
||||
// Play start sound first
|
||||
SoundManager.shared.playStartSound()
|
||||
|
||||
// Start audio engine immediately - this can happen in parallel
|
||||
audioEngine.startAudioEngine()
|
||||
|
||||
// Show UI (this is quick now that we removed animations)
|
||||
await MainActor.run {
|
||||
showRecorderPanel() // Modified version that doesn't start audio engine
|
||||
isMiniRecorderVisible = true
|
||||
}
|
||||
|
||||
// Start recording
|
||||
await toggleRecord()
|
||||
// Start recording first
|
||||
await toggleRecord()
|
||||
|
||||
// Play sound and show UI after recording has started
|
||||
SoundManager.shared.playStartSound()
|
||||
|
||||
await MainActor.run {
|
||||
showRecorderPanel()
|
||||
isMiniRecorderVisible = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func cleanupResources() async {
|
||||
audioEngine.stopAudioEngine()
|
||||
try? await Task.sleep(nanoseconds: 100_000_000)
|
||||
|
||||
if !isRecording && !isProcessing {
|
||||
await whisperContext?.releaseResources()
|
||||
whisperContext = nil
|
||||
@ -616,7 +581,6 @@ class WhisperState: NSObject, ObservableObject, AVAudioRecorderDelegate {
|
||||
whisperContext = nil
|
||||
isModelLoaded = false
|
||||
|
||||
audioEngine.stopAudioEngine()
|
||||
if let recordedFile = recordedFile {
|
||||
try? FileManager.default.removeItem(at: recordedFile)
|
||||
self.recordedFile = nil
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user