Merge pull request #486 from Beingpax/switch-to-core-audio
Replace audio recorder with CoreAudio AUHAL
This commit is contained in:
commit
377168ac15
@ -469,7 +469,7 @@
|
||||
"CODE_SIGN_IDENTITY[sdk=macosx*]" = "Apple Development";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
COMBINE_HIDPI_IMAGES = YES;
|
||||
CURRENT_PROJECT_VERSION = 167;
|
||||
CURRENT_PROJECT_VERSION = 168;
|
||||
DEVELOPMENT_ASSET_PATHS = "\"VoiceInk/Preview Content\"";
|
||||
DEVELOPMENT_TEAM = V6J6A3VWY2;
|
||||
ENABLE_HARDENED_RUNTIME = YES;
|
||||
@ -503,7 +503,7 @@
|
||||
"CODE_SIGN_IDENTITY[sdk=macosx*]" = "Apple Development";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
COMBINE_HIDPI_IMAGES = YES;
|
||||
CURRENT_PROJECT_VERSION = 167;
|
||||
CURRENT_PROJECT_VERSION = 168;
|
||||
DEVELOPMENT_ASSET_PATHS = "\"VoiceInk/Preview Content\"";
|
||||
DEVELOPMENT_TEAM = V6J6A3VWY2;
|
||||
ENABLE_HARDENED_RUNTIME = YES;
|
||||
|
||||
@ -1,304 +0,0 @@
|
||||
import Foundation
|
||||
@preconcurrency import AVFoundation
|
||||
import CoreAudio
|
||||
import os
|
||||
|
||||
@MainActor
|
||||
class AudioEngineRecorder: ObservableObject {
|
||||
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "AudioEngineRecorder")
|
||||
|
||||
private var audioEngine: AVAudioEngine?
|
||||
private var inputNode: AVAudioInputNode?
|
||||
|
||||
nonisolated(unsafe) private var audioFile: AVAudioFile?
|
||||
nonisolated(unsafe) private var recordingFormat: AVAudioFormat?
|
||||
nonisolated(unsafe) private var converter: AVAudioConverter?
|
||||
|
||||
private var isRecording = false
|
||||
private var recordingURL: URL?
|
||||
|
||||
@Published var currentAveragePower: Float = -160.0
|
||||
@Published var currentPeakPower: Float = -160.0
|
||||
|
||||
private let tapBufferSize: AVAudioFrameCount = 4096
|
||||
private let tapBusNumber: AVAudioNodeBus = 0
|
||||
|
||||
private let audioProcessingQueue = DispatchQueue(label: "com.prakashjoshipax.VoiceInk.audioProcessing", qos: .userInitiated)
|
||||
private let fileWriteLock = NSLock()
|
||||
|
||||
var onRecordingError: ((Error) -> Void)?
|
||||
|
||||
private var validationTimer: Timer?
|
||||
private var hasReceivedValidBuffer = false
|
||||
|
||||
func startRecording(toOutputFile url: URL, retryCount: Int = 0) throws {
|
||||
stopRecording()
|
||||
hasReceivedValidBuffer = false
|
||||
|
||||
let engine = AVAudioEngine()
|
||||
audioEngine = engine
|
||||
|
||||
let input = engine.inputNode
|
||||
inputNode = input
|
||||
|
||||
let inputFormat = input.outputFormat(forBus: tapBusNumber)
|
||||
|
||||
guard inputFormat.sampleRate > 0, inputFormat.channelCount > 0 else {
|
||||
logger.error("Invalid input format: sample rate or channel count is zero")
|
||||
throw AudioEngineRecorderError.invalidInputFormat
|
||||
}
|
||||
|
||||
guard let desiredFormat = AVAudioFormat(
|
||||
commonFormat: .pcmFormatInt16,
|
||||
sampleRate: 16000.0,
|
||||
channels: 1,
|
||||
interleaved: false
|
||||
) else {
|
||||
logger.error("Failed to create desired recording format")
|
||||
throw AudioEngineRecorderError.invalidRecordingFormat
|
||||
}
|
||||
|
||||
recordingURL = url
|
||||
|
||||
let createdAudioFile: AVAudioFile
|
||||
do {
|
||||
if FileManager.default.fileExists(atPath: url.path) {
|
||||
try FileManager.default.removeItem(at: url)
|
||||
}
|
||||
|
||||
createdAudioFile = try AVAudioFile(
|
||||
forWriting: url,
|
||||
settings: desiredFormat.settings,
|
||||
commonFormat: desiredFormat.commonFormat,
|
||||
interleaved: desiredFormat.isInterleaved
|
||||
)
|
||||
} catch {
|
||||
logger.error("Failed to create audio file: \(error.localizedDescription)")
|
||||
throw AudioEngineRecorderError.failedToCreateFile(error)
|
||||
}
|
||||
|
||||
guard let audioConverter = AVAudioConverter(from: inputFormat, to: desiredFormat) else {
|
||||
logger.error("Failed to create audio format converter")
|
||||
throw AudioEngineRecorderError.failedToCreateConverter
|
||||
}
|
||||
|
||||
fileWriteLock.lock()
|
||||
recordingFormat = desiredFormat
|
||||
audioFile = createdAudioFile
|
||||
converter = audioConverter
|
||||
fileWriteLock.unlock()
|
||||
|
||||
input.installTap(onBus: tapBusNumber, bufferSize: tapBufferSize, format: inputFormat) { [weak self] (buffer, time) in
|
||||
guard let self = self else { return }
|
||||
|
||||
self.audioProcessingQueue.async {
|
||||
self.processAudioBuffer(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
engine.prepare()
|
||||
|
||||
do {
|
||||
try engine.start()
|
||||
isRecording = true
|
||||
startValidationTimer(url: url, retryCount: retryCount)
|
||||
} catch {
|
||||
logger.error("Failed to start audio engine: \(error.localizedDescription)")
|
||||
input.removeTap(onBus: tapBusNumber)
|
||||
throw AudioEngineRecorderError.failedToStartEngine(error)
|
||||
}
|
||||
}
|
||||
|
||||
private func startValidationTimer(url: URL, retryCount: Int) {
|
||||
validationTimer = Timer.scheduledTimer(withTimeInterval: 1.5, repeats: false) { [weak self] _ in
|
||||
guard let self = self else { return }
|
||||
|
||||
let validationPassed = self.hasReceivedValidBuffer
|
||||
|
||||
if !validationPassed {
|
||||
self.logger.warning("Recording validation failed")
|
||||
self.stopRecording()
|
||||
|
||||
if retryCount < 2 {
|
||||
self.logger.info("Retrying recording (attempt \(retryCount + 1)/2)...")
|
||||
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) {
|
||||
do {
|
||||
try self.startRecording(toOutputFile: url, retryCount: retryCount + 1)
|
||||
} catch {
|
||||
self.logger.error("Retry failed: \(error.localizedDescription)")
|
||||
self.onRecordingError?(error)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.logger.error("Recording failed after 2 retry attempts")
|
||||
self.onRecordingError?(AudioEngineRecorderError.recordingValidationFailed)
|
||||
}
|
||||
} else {
|
||||
self.logger.info("Recording validation successful")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stopRecording() {
|
||||
guard isRecording else { return }
|
||||
|
||||
validationTimer?.invalidate()
|
||||
validationTimer = nil
|
||||
|
||||
inputNode?.removeTap(onBus: tapBusNumber)
|
||||
audioEngine?.stop()
|
||||
audioProcessingQueue.sync { }
|
||||
|
||||
fileWriteLock.lock()
|
||||
audioFile = nil
|
||||
converter = nil
|
||||
recordingFormat = nil
|
||||
fileWriteLock.unlock()
|
||||
|
||||
audioEngine = nil
|
||||
inputNode = nil
|
||||
recordingURL = nil
|
||||
isRecording = false
|
||||
hasReceivedValidBuffer = false
|
||||
|
||||
currentAveragePower = 0.0
|
||||
currentPeakPower = 0.0
|
||||
}
|
||||
|
||||
nonisolated private func processAudioBuffer(_ buffer: AVAudioPCMBuffer) {
|
||||
updateMeters(from: buffer)
|
||||
writeBufferToFile(buffer)
|
||||
}
|
||||
|
||||
nonisolated private func writeBufferToFile(_ buffer: AVAudioPCMBuffer) {
|
||||
fileWriteLock.lock()
|
||||
defer { fileWriteLock.unlock() }
|
||||
|
||||
guard let audioFile = audioFile,
|
||||
let converter = converter,
|
||||
let format = recordingFormat else { return }
|
||||
|
||||
guard buffer.frameLength > 0 else {
|
||||
logTapError(message: "Empty buffer received")
|
||||
return
|
||||
}
|
||||
|
||||
let inputSampleRate = buffer.format.sampleRate
|
||||
let outputSampleRate = format.sampleRate
|
||||
let ratio = outputSampleRate / inputSampleRate
|
||||
let outputCapacity = AVAudioFrameCount(Double(buffer.frameLength) * ratio)
|
||||
|
||||
guard let convertedBuffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: outputCapacity) else {
|
||||
logTapError(message: "Failed to create converted buffer")
|
||||
return
|
||||
}
|
||||
|
||||
var error: NSError?
|
||||
var hasProvidedBuffer = false
|
||||
|
||||
converter.convert(to: convertedBuffer, error: &error) { inNumPackets, outStatus in
|
||||
if hasProvidedBuffer {
|
||||
outStatus.pointee = .noDataNow
|
||||
return nil
|
||||
} else {
|
||||
hasProvidedBuffer = true
|
||||
outStatus.pointee = .haveData
|
||||
return buffer
|
||||
}
|
||||
}
|
||||
|
||||
if let error = error {
|
||||
logTapError(message: "Audio conversion failed: \(error.localizedDescription)")
|
||||
return
|
||||
}
|
||||
|
||||
do {
|
||||
try audioFile.write(from: convertedBuffer)
|
||||
Task { @MainActor in
|
||||
if !self.hasReceivedValidBuffer {
|
||||
self.hasReceivedValidBuffer = true
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
logTapError(message: "File write failed: \(error.localizedDescription)")
|
||||
}
|
||||
}
|
||||
|
||||
nonisolated private func logTapError(message: String) {
|
||||
logger.error("\(message)")
|
||||
}
|
||||
|
||||
nonisolated private func updateMeters(from buffer: AVAudioPCMBuffer) {
|
||||
guard let channelData = buffer.floatChannelData else { return }
|
||||
|
||||
let channelCount = Int(buffer.format.channelCount)
|
||||
let frameLength = Int(buffer.frameLength)
|
||||
|
||||
guard channelCount > 0, frameLength > 0 else { return }
|
||||
|
||||
let channel = channelData[0]
|
||||
var sum: Float = 0.0
|
||||
var peak: Float = 0.0
|
||||
|
||||
for frame in 0..<frameLength {
|
||||
let sample = channel[frame]
|
||||
let absSample = abs(sample)
|
||||
|
||||
if absSample > peak {
|
||||
peak = absSample
|
||||
}
|
||||
|
||||
sum += sample * sample
|
||||
}
|
||||
|
||||
let rms = sqrt(sum / Float(frameLength))
|
||||
|
||||
let averagePowerDb = 20.0 * log10(max(rms, 0.000001))
|
||||
let peakPowerDb = 20.0 * log10(max(peak, 0.000001))
|
||||
|
||||
Task { @MainActor in
|
||||
self.currentAveragePower = averagePowerDb
|
||||
self.currentPeakPower = peakPowerDb
|
||||
}
|
||||
}
|
||||
|
||||
var isCurrentlyRecording: Bool { isRecording }
|
||||
var currentRecordingURL: URL? { recordingURL }
|
||||
}
|
||||
|
||||
// MARK: - Error Types
|
||||
|
||||
enum AudioEngineRecorderError: LocalizedError {
|
||||
case invalidInputFormat
|
||||
case invalidRecordingFormat
|
||||
case failedToCreateFile(Error)
|
||||
case failedToCreateConverter
|
||||
case failedToStartEngine(Error)
|
||||
case bufferConversionFailed
|
||||
case audioConversionError(Error)
|
||||
case fileWriteFailed(Error)
|
||||
case recordingValidationFailed
|
||||
|
||||
var errorDescription: String? {
|
||||
switch self {
|
||||
case .invalidInputFormat:
|
||||
return "Invalid audio input format from device"
|
||||
case .invalidRecordingFormat:
|
||||
return "Failed to create recording format"
|
||||
case .failedToCreateFile(let error):
|
||||
return "Failed to create audio file: \(error.localizedDescription)"
|
||||
case .failedToCreateConverter:
|
||||
return "Failed to create audio format converter"
|
||||
case .failedToStartEngine(let error):
|
||||
return "Failed to start audio engine: \(error.localizedDescription)"
|
||||
case .bufferConversionFailed:
|
||||
return "Failed to create buffer for audio conversion"
|
||||
case .audioConversionError(let error):
|
||||
return "Audio format conversion failed: \(error.localizedDescription)"
|
||||
case .fileWriteFailed(let error):
|
||||
return "Failed to write audio data to file: \(error.localizedDescription)"
|
||||
case .recordingValidationFailed:
|
||||
return "Recording failed to start - no valid audio received from device"
|
||||
}
|
||||
}
|
||||
}
|
||||
900
VoiceInk/CoreAudioRecorder.swift
Normal file
900
VoiceInk/CoreAudioRecorder.swift
Normal file
@ -0,0 +1,900 @@
|
||||
import Foundation
|
||||
import CoreAudio
|
||||
import AudioToolbox
|
||||
import AVFoundation
|
||||
import os
|
||||
|
||||
// MARK: - Core Audio Recorder (AUHAL-based, does not change system default device)
|
||||
final class CoreAudioRecorder {
|
||||
|
||||
// MARK: - Properties
|
||||
|
||||
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "CoreAudioRecorder")
|
||||
|
||||
private var audioUnit: AudioUnit?
|
||||
private var audioFile: ExtAudioFileRef?
|
||||
|
||||
private var isRecording = false
|
||||
private var currentDeviceID: AudioDeviceID = 0
|
||||
private var recordingURL: URL?
|
||||
|
||||
// Device format (what the hardware provides)
|
||||
private var deviceFormat = AudioStreamBasicDescription()
|
||||
// Output format (16kHz mono PCM Int16 for transcription)
|
||||
private var outputFormat = AudioStreamBasicDescription()
|
||||
|
||||
// Conversion buffer
|
||||
private var conversionBuffer: UnsafeMutablePointer<Int16>?
|
||||
private var conversionBufferSize: UInt32 = 0
|
||||
|
||||
// Audio metering (thread-safe)
|
||||
private let meterLock = NSLock()
|
||||
private var _averagePower: Float = -160.0
|
||||
private var _peakPower: Float = -160.0
|
||||
|
||||
var averagePower: Float {
|
||||
meterLock.lock()
|
||||
defer { meterLock.unlock() }
|
||||
return _averagePower
|
||||
}
|
||||
|
||||
var peakPower: Float {
|
||||
meterLock.lock()
|
||||
defer { meterLock.unlock() }
|
||||
return _peakPower
|
||||
}
|
||||
|
||||
// Pre-allocated render buffer (to avoid malloc in real-time callback)
|
||||
private var renderBuffer: UnsafeMutablePointer<Float32>?
|
||||
private var renderBufferSize: UInt32 = 0
|
||||
|
||||
// MARK: - Initialization
|
||||
|
||||
init() {}
|
||||
|
||||
deinit {
|
||||
stopRecording()
|
||||
}
|
||||
|
||||
// MARK: - Public Interface
|
||||
|
||||
/// Starts recording from the specified device to the given URL (WAV format)
|
||||
func startRecording(toOutputFile url: URL, deviceID: AudioDeviceID) throws {
|
||||
// Stop any existing recording
|
||||
stopRecording()
|
||||
|
||||
if deviceID == 0 {
|
||||
logger.error("Cannot start recording - no valid audio device (deviceID is 0)")
|
||||
throw CoreAudioRecorderError.failedToSetDevice(status: 0)
|
||||
}
|
||||
|
||||
// Validate device still exists before proceeding with setup
|
||||
guard isDeviceAvailable(deviceID) else {
|
||||
logger.error("Cannot start recording - device \(deviceID) is no longer available")
|
||||
throw CoreAudioRecorderError.deviceNotAvailable
|
||||
}
|
||||
|
||||
currentDeviceID = deviceID
|
||||
recordingURL = url
|
||||
|
||||
logger.notice("🎙️ Starting recording from device \(deviceID)")
|
||||
logDeviceDetails(deviceID: deviceID)
|
||||
|
||||
// Step 1: Create and configure the AudioUnit (AUHAL)
|
||||
try createAudioUnit()
|
||||
|
||||
// Step 2: Set the input device (does NOT change system default)
|
||||
try setInputDevice(deviceID)
|
||||
|
||||
// Step 3: Configure formats
|
||||
try configureFormats()
|
||||
|
||||
// Step 4: Set up the input callback
|
||||
try setupInputCallback()
|
||||
|
||||
// Step 5: Create the output file
|
||||
try createOutputFile(at: url)
|
||||
|
||||
// Step 6: Initialize and start the AudioUnit
|
||||
try startAudioUnit()
|
||||
|
||||
isRecording = true
|
||||
}
|
||||
|
||||
/// Stops the current recording
|
||||
func stopRecording() {
|
||||
guard isRecording || audioUnit != nil else { return }
|
||||
|
||||
// Stop and dispose AudioUnit
|
||||
if let unit = audioUnit {
|
||||
AudioOutputUnitStop(unit)
|
||||
AudioComponentInstanceDispose(unit)
|
||||
audioUnit = nil
|
||||
}
|
||||
|
||||
// Close audio file
|
||||
if let file = audioFile {
|
||||
ExtAudioFileDispose(file)
|
||||
audioFile = nil
|
||||
}
|
||||
|
||||
// Free conversion buffer
|
||||
if let buffer = conversionBuffer {
|
||||
buffer.deallocate()
|
||||
conversionBuffer = nil
|
||||
conversionBufferSize = 0
|
||||
}
|
||||
|
||||
// Free render buffer
|
||||
if let buffer = renderBuffer {
|
||||
buffer.deallocate()
|
||||
renderBuffer = nil
|
||||
renderBufferSize = 0
|
||||
}
|
||||
|
||||
isRecording = false
|
||||
currentDeviceID = 0
|
||||
recordingURL = nil
|
||||
|
||||
// Reset meters
|
||||
meterLock.lock()
|
||||
_averagePower = -160.0
|
||||
_peakPower = -160.0
|
||||
meterLock.unlock()
|
||||
}
|
||||
|
||||
var isCurrentlyRecording: Bool { isRecording }
|
||||
var currentRecordingURL: URL? { recordingURL }
|
||||
var currentDevice: AudioDeviceID { currentDeviceID }
|
||||
|
||||
/// Switches to a new input device mid-recording without stopping the file write
|
||||
func switchDevice(to newDeviceID: AudioDeviceID) throws {
|
||||
guard isRecording, let unit = audioUnit else {
|
||||
throw CoreAudioRecorderError.audioUnitNotInitialized
|
||||
}
|
||||
|
||||
// Don't switch if it's the same device
|
||||
guard newDeviceID != currentDeviceID else { return }
|
||||
|
||||
let oldDeviceID = currentDeviceID
|
||||
logger.notice("🎙️ Switching recording device from \(oldDeviceID) to \(newDeviceID)")
|
||||
|
||||
// Step 1: Stop the AudioUnit (but keep file open)
|
||||
var status = AudioOutputUnitStop(unit)
|
||||
if status != noErr {
|
||||
logger.warning("🎙️ Warning: AudioOutputUnitStop returned \(status)")
|
||||
}
|
||||
|
||||
// Step 2: Uninitialize to allow reconfiguration
|
||||
status = AudioUnitUninitialize(unit)
|
||||
if status != noErr {
|
||||
logger.warning("🎙️ Warning: AudioUnitUninitialize returned \(status)")
|
||||
}
|
||||
|
||||
// Step 3: Set the new device
|
||||
var device = newDeviceID
|
||||
status = AudioUnitSetProperty(
|
||||
unit,
|
||||
kAudioOutputUnitProperty_CurrentDevice,
|
||||
kAudioUnitScope_Global,
|
||||
0,
|
||||
&device,
|
||||
UInt32(MemoryLayout<AudioDeviceID>.size)
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
// Try to recover by restarting with old device
|
||||
logger.error("Failed to set new device: \(status). Attempting recovery...")
|
||||
var recoveryDevice = oldDeviceID
|
||||
AudioUnitSetProperty(unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &recoveryDevice, UInt32(MemoryLayout<AudioDeviceID>.size))
|
||||
AudioUnitInitialize(unit)
|
||||
AudioOutputUnitStart(unit)
|
||||
throw CoreAudioRecorderError.failedToSetDevice(status: status)
|
||||
}
|
||||
|
||||
// Step 4: Get new device format
|
||||
var formatSize = UInt32(MemoryLayout<AudioStreamBasicDescription>.size)
|
||||
var newDeviceFormat = AudioStreamBasicDescription()
|
||||
status = AudioUnitGetProperty(
|
||||
unit,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Input,
|
||||
1,
|
||||
&newDeviceFormat,
|
||||
&formatSize
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
throw CoreAudioRecorderError.failedToGetDeviceFormat(status: status)
|
||||
}
|
||||
|
||||
// Step 5: Configure callback format for new device
|
||||
var callbackFormat = AudioStreamBasicDescription(
|
||||
mSampleRate: newDeviceFormat.mSampleRate,
|
||||
mFormatID: kAudioFormatLinearPCM,
|
||||
mFormatFlags: kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked,
|
||||
mBytesPerPacket: UInt32(MemoryLayout<Float32>.size) * newDeviceFormat.mChannelsPerFrame,
|
||||
mFramesPerPacket: 1,
|
||||
mBytesPerFrame: UInt32(MemoryLayout<Float32>.size) * newDeviceFormat.mChannelsPerFrame,
|
||||
mChannelsPerFrame: newDeviceFormat.mChannelsPerFrame,
|
||||
mBitsPerChannel: 32,
|
||||
mReserved: 0
|
||||
)
|
||||
|
||||
status = AudioUnitSetProperty(
|
||||
unit,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Output,
|
||||
1,
|
||||
&callbackFormat,
|
||||
UInt32(MemoryLayout<AudioStreamBasicDescription>.size)
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
throw CoreAudioRecorderError.failedToSetFormat(status: status)
|
||||
}
|
||||
|
||||
// Step 6: Reallocate buffers if needed
|
||||
let maxFrames: UInt32 = 4096
|
||||
let bufferSamples = maxFrames * newDeviceFormat.mChannelsPerFrame
|
||||
if bufferSamples > renderBufferSize {
|
||||
renderBuffer?.deallocate()
|
||||
renderBuffer = UnsafeMutablePointer<Float32>.allocate(capacity: Int(bufferSamples))
|
||||
renderBufferSize = bufferSamples
|
||||
}
|
||||
|
||||
// Reallocate conversion buffer if new sample rate requires more space
|
||||
let maxOutputFrames = UInt32(Double(maxFrames) * (outputFormat.mSampleRate / newDeviceFormat.mSampleRate)) + 1
|
||||
if maxOutputFrames > conversionBufferSize {
|
||||
conversionBuffer?.deallocate()
|
||||
conversionBuffer = UnsafeMutablePointer<Int16>.allocate(capacity: Int(maxOutputFrames))
|
||||
conversionBufferSize = maxOutputFrames
|
||||
}
|
||||
|
||||
// Update stored format
|
||||
deviceFormat = newDeviceFormat
|
||||
currentDeviceID = newDeviceID
|
||||
|
||||
// Step 7: Reinitialize and restart
|
||||
status = AudioUnitInitialize(unit)
|
||||
if status != noErr {
|
||||
throw CoreAudioRecorderError.failedToInitialize(status: status)
|
||||
}
|
||||
|
||||
status = AudioOutputUnitStart(unit)
|
||||
if status != noErr {
|
||||
throw CoreAudioRecorderError.failedToStart(status: status)
|
||||
}
|
||||
|
||||
logger.notice("🎙️ Successfully switched to device \(newDeviceID)")
|
||||
}
|
||||
|
||||
// MARK: - AudioUnit Setup
|
||||
|
||||
private func createAudioUnit() throws {
|
||||
var desc = AudioComponentDescription(
|
||||
componentType: kAudioUnitType_Output,
|
||||
componentSubType: kAudioUnitSubType_HALOutput,
|
||||
componentManufacturer: kAudioUnitManufacturer_Apple,
|
||||
componentFlags: 0,
|
||||
componentFlagsMask: 0
|
||||
)
|
||||
|
||||
guard let component = AudioComponentFindNext(nil, &desc) else {
|
||||
logger.error("AudioUnit not found - HAL Output component unavailable")
|
||||
throw CoreAudioRecorderError.audioUnitNotFound
|
||||
}
|
||||
|
||||
var unit: AudioUnit?
|
||||
var status = AudioComponentInstanceNew(component, &unit)
|
||||
guard status == noErr, let audioUnit = unit else {
|
||||
logger.error("Failed to create AudioUnit instance: \(status)")
|
||||
throw CoreAudioRecorderError.failedToCreateAudioUnit(status: status)
|
||||
}
|
||||
|
||||
self.audioUnit = audioUnit
|
||||
|
||||
// Enable input on element 1 (input scope)
|
||||
var enableInput: UInt32 = 1
|
||||
status = AudioUnitSetProperty(
|
||||
audioUnit,
|
||||
kAudioOutputUnitProperty_EnableIO,
|
||||
kAudioUnitScope_Input,
|
||||
1, // Element 1 = input
|
||||
&enableInput,
|
||||
UInt32(MemoryLayout<UInt32>.size)
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to enable audio input: \(status)")
|
||||
throw CoreAudioRecorderError.failedToEnableInput(status: status)
|
||||
}
|
||||
|
||||
// Disable output on element 0 (output scope)
|
||||
var disableOutput: UInt32 = 0
|
||||
status = AudioUnitSetProperty(
|
||||
audioUnit,
|
||||
kAudioOutputUnitProperty_EnableIO,
|
||||
kAudioUnitScope_Output,
|
||||
0, // Element 0 = output
|
||||
&disableOutput,
|
||||
UInt32(MemoryLayout<UInt32>.size)
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to disable audio output: \(status)")
|
||||
throw CoreAudioRecorderError.failedToDisableOutput(status: status)
|
||||
}
|
||||
}
|
||||
|
||||
private func setInputDevice(_ deviceID: AudioDeviceID) throws {
|
||||
guard let audioUnit = audioUnit else {
|
||||
throw CoreAudioRecorderError.audioUnitNotInitialized
|
||||
}
|
||||
|
||||
var device = deviceID
|
||||
let status = AudioUnitSetProperty(
|
||||
audioUnit,
|
||||
kAudioOutputUnitProperty_CurrentDevice,
|
||||
kAudioUnitScope_Global,
|
||||
0,
|
||||
&device,
|
||||
UInt32(MemoryLayout<AudioDeviceID>.size)
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to set input device \(deviceID): \(status)")
|
||||
throw CoreAudioRecorderError.failedToSetDevice(status: status)
|
||||
}
|
||||
}
|
||||
|
||||
private func configureFormats() throws {
|
||||
guard let audioUnit = audioUnit else {
|
||||
throw CoreAudioRecorderError.audioUnitNotInitialized
|
||||
}
|
||||
|
||||
// Get the device's native format (input scope, element 1)
|
||||
var formatSize = UInt32(MemoryLayout<AudioStreamBasicDescription>.size)
|
||||
var status = AudioUnitGetProperty(
|
||||
audioUnit,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Input,
|
||||
1,
|
||||
&deviceFormat,
|
||||
&formatSize
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to get device format: \(status)")
|
||||
throw CoreAudioRecorderError.failedToGetDeviceFormat(status: status)
|
||||
}
|
||||
|
||||
// Configure output format: 16kHz, mono, PCM Int16
|
||||
outputFormat = AudioStreamBasicDescription(
|
||||
mSampleRate: 16000.0,
|
||||
mFormatID: kAudioFormatLinearPCM,
|
||||
mFormatFlags: kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked,
|
||||
mBytesPerPacket: 2,
|
||||
mFramesPerPacket: 1,
|
||||
mBytesPerFrame: 2,
|
||||
mChannelsPerFrame: 1,
|
||||
mBitsPerChannel: 16,
|
||||
mReserved: 0
|
||||
)
|
||||
|
||||
// Set callback format (Float32 for processing, then convert to Int16 for file)
|
||||
var callbackFormat = AudioStreamBasicDescription(
|
||||
mSampleRate: deviceFormat.mSampleRate,
|
||||
mFormatID: kAudioFormatLinearPCM,
|
||||
mFormatFlags: kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked,
|
||||
mBytesPerPacket: UInt32(MemoryLayout<Float32>.size) * deviceFormat.mChannelsPerFrame,
|
||||
mFramesPerPacket: 1,
|
||||
mBytesPerFrame: UInt32(MemoryLayout<Float32>.size) * deviceFormat.mChannelsPerFrame,
|
||||
mChannelsPerFrame: deviceFormat.mChannelsPerFrame,
|
||||
mBitsPerChannel: 32,
|
||||
mReserved: 0
|
||||
)
|
||||
|
||||
status = AudioUnitSetProperty(
|
||||
audioUnit,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Output,
|
||||
1,
|
||||
&callbackFormat,
|
||||
UInt32(MemoryLayout<AudioStreamBasicDescription>.size)
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to set audio format: \(status)")
|
||||
throw CoreAudioRecorderError.failedToSetFormat(status: status)
|
||||
}
|
||||
|
||||
// Log format details
|
||||
let devSampleRate = deviceFormat.mSampleRate
|
||||
let devChannels = deviceFormat.mChannelsPerFrame
|
||||
let devBits = deviceFormat.mBitsPerChannel
|
||||
let outSampleRate = outputFormat.mSampleRate
|
||||
let outChannels = outputFormat.mChannelsPerFrame
|
||||
let outBits = outputFormat.mBitsPerChannel
|
||||
logger.notice("🎙️ Device format: sampleRate=\(devSampleRate), channels=\(devChannels), bitsPerChannel=\(devBits)")
|
||||
logger.notice("🎙️ Output format: sampleRate=\(outSampleRate), channels=\(outChannels), bitsPerChannel=\(outBits)")
|
||||
if devSampleRate != outSampleRate {
|
||||
logger.notice("🎙️ Converting: \(Int(devSampleRate))Hz → \(Int(outSampleRate))Hz")
|
||||
}
|
||||
|
||||
// Pre-allocate buffers for real-time callback (avoid malloc in callback)
|
||||
let maxFrames: UInt32 = 4096
|
||||
let bufferSamples = maxFrames * deviceFormat.mChannelsPerFrame
|
||||
renderBuffer = UnsafeMutablePointer<Float32>.allocate(capacity: Int(bufferSamples))
|
||||
renderBufferSize = bufferSamples
|
||||
|
||||
// Pre-allocate conversion buffer (output is always smaller due to downsampling)
|
||||
let maxOutputFrames = UInt32(Double(maxFrames) * (outputFormat.mSampleRate / deviceFormat.mSampleRate)) + 1
|
||||
conversionBuffer = UnsafeMutablePointer<Int16>.allocate(capacity: Int(maxOutputFrames))
|
||||
conversionBufferSize = maxOutputFrames
|
||||
}
|
||||
|
||||
private func setupInputCallback() throws {
|
||||
guard let audioUnit = audioUnit else {
|
||||
throw CoreAudioRecorderError.audioUnitNotInitialized
|
||||
}
|
||||
|
||||
var callbackStruct = AURenderCallbackStruct(
|
||||
inputProc: inputCallback,
|
||||
inputProcRefCon: Unmanaged.passUnretained(self).toOpaque()
|
||||
)
|
||||
|
||||
let status = AudioUnitSetProperty(
|
||||
audioUnit,
|
||||
kAudioOutputUnitProperty_SetInputCallback,
|
||||
kAudioUnitScope_Global,
|
||||
0,
|
||||
&callbackStruct,
|
||||
UInt32(MemoryLayout<AURenderCallbackStruct>.size)
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to set input callback: \(status)")
|
||||
throw CoreAudioRecorderError.failedToSetCallback(status: status)
|
||||
}
|
||||
}
|
||||
|
||||
private func createOutputFile(at url: URL) throws {
|
||||
// Remove existing file if any
|
||||
if FileManager.default.fileExists(atPath: url.path) {
|
||||
try FileManager.default.removeItem(at: url)
|
||||
}
|
||||
|
||||
// Create ExtAudioFile for writing
|
||||
var fileRef: ExtAudioFileRef?
|
||||
var status = ExtAudioFileCreateWithURL(
|
||||
url as CFURL,
|
||||
kAudioFileWAVEType,
|
||||
&outputFormat,
|
||||
nil,
|
||||
AudioFileFlags.eraseFile.rawValue,
|
||||
&fileRef
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to create audio file at \(url.path): \(status)")
|
||||
throw CoreAudioRecorderError.failedToCreateFile(status: status)
|
||||
}
|
||||
|
||||
audioFile = fileRef
|
||||
|
||||
// Set client format (what we'll write)
|
||||
status = ExtAudioFileSetProperty(
|
||||
fileRef!,
|
||||
kExtAudioFileProperty_ClientDataFormat,
|
||||
UInt32(MemoryLayout<AudioStreamBasicDescription>.size),
|
||||
&outputFormat
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to set file format: \(status)")
|
||||
throw CoreAudioRecorderError.failedToSetFileFormat(status: status)
|
||||
}
|
||||
}
|
||||
|
||||
private func startAudioUnit() throws {
|
||||
guard let audioUnit = audioUnit else {
|
||||
throw CoreAudioRecorderError.audioUnitNotInitialized
|
||||
}
|
||||
|
||||
var status = AudioUnitInitialize(audioUnit)
|
||||
if status != noErr {
|
||||
logger.error("Failed to initialize AudioUnit: \(status)")
|
||||
throw CoreAudioRecorderError.failedToInitialize(status: status)
|
||||
}
|
||||
|
||||
status = AudioOutputUnitStart(audioUnit)
|
||||
if status != noErr {
|
||||
logger.error("Failed to start AudioUnit: \(status)")
|
||||
throw CoreAudioRecorderError.failedToStart(status: status)
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Input Callback
|
||||
|
||||
private let inputCallback: AURenderCallback = { (
|
||||
inRefCon,
|
||||
ioActionFlags,
|
||||
inTimeStamp,
|
||||
inBusNumber,
|
||||
inNumberFrames,
|
||||
ioData
|
||||
) -> OSStatus in
|
||||
|
||||
let recorder = Unmanaged<CoreAudioRecorder>.fromOpaque(inRefCon).takeUnretainedValue()
|
||||
return recorder.handleInputBuffer(
|
||||
ioActionFlags: ioActionFlags,
|
||||
inTimeStamp: inTimeStamp,
|
||||
inBusNumber: inBusNumber,
|
||||
inNumberFrames: inNumberFrames
|
||||
)
|
||||
}
|
||||
|
||||
private func handleInputBuffer(
|
||||
ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>,
|
||||
inTimeStamp: UnsafePointer<AudioTimeStamp>,
|
||||
inBusNumber: UInt32,
|
||||
inNumberFrames: UInt32
|
||||
) -> OSStatus {
|
||||
|
||||
guard let audioUnit = audioUnit, isRecording, let renderBuf = renderBuffer else {
|
||||
return noErr
|
||||
}
|
||||
|
||||
// Use pre-allocated buffer for input data
|
||||
let channelCount = deviceFormat.mChannelsPerFrame
|
||||
let requiredSamples = inNumberFrames * channelCount
|
||||
|
||||
// Safety check - shouldn't happen with 4096 max frames
|
||||
guard requiredSamples <= renderBufferSize else {
|
||||
return noErr
|
||||
}
|
||||
|
||||
let bytesPerFrame = UInt32(MemoryLayout<Float32>.size) * channelCount
|
||||
let bufferSize = inNumberFrames * bytesPerFrame
|
||||
|
||||
var bufferList = AudioBufferList(
|
||||
mNumberBuffers: 1,
|
||||
mBuffers: AudioBuffer(
|
||||
mNumberChannels: channelCount,
|
||||
mDataByteSize: bufferSize,
|
||||
mData: renderBuf
|
||||
)
|
||||
)
|
||||
|
||||
// Render audio from the input
|
||||
let status = AudioUnitRender(
|
||||
audioUnit,
|
||||
ioActionFlags,
|
||||
inTimeStamp,
|
||||
inBusNumber,
|
||||
inNumberFrames,
|
||||
&bufferList
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
return status
|
||||
}
|
||||
|
||||
// Calculate audio meters from input buffer
|
||||
calculateMeters(from: &bufferList, frameCount: inNumberFrames)
|
||||
|
||||
// Convert and write to file
|
||||
convertAndWriteToFile(inputBuffer: &bufferList, frameCount: inNumberFrames)
|
||||
|
||||
return noErr
|
||||
}
|
||||
|
||||
private func calculateMeters(from bufferList: inout AudioBufferList, frameCount: UInt32) {
|
||||
guard let data = bufferList.mBuffers.mData else { return }
|
||||
guard frameCount > 0 else { return }
|
||||
|
||||
let samples = data.assumingMemoryBound(to: Float32.self)
|
||||
let channelCount = Int(deviceFormat.mChannelsPerFrame)
|
||||
let totalSamples = Int(frameCount) * channelCount
|
||||
|
||||
guard totalSamples > 0 else { return }
|
||||
|
||||
var sum: Float = 0.0
|
||||
var peak: Float = 0.0
|
||||
|
||||
for i in 0..<totalSamples {
|
||||
let sample = abs(samples[i])
|
||||
sum += sample * sample
|
||||
if sample > peak {
|
||||
peak = sample
|
||||
}
|
||||
}
|
||||
|
||||
let rms = sqrt(sum / Float(totalSamples))
|
||||
let avgDb = 20.0 * log10(max(rms, 0.000001))
|
||||
let peakDb = 20.0 * log10(max(peak, 0.000001))
|
||||
|
||||
meterLock.lock()
|
||||
_averagePower = avgDb
|
||||
_peakPower = peakDb
|
||||
meterLock.unlock()
|
||||
}
|
||||
|
||||
private func convertAndWriteToFile(inputBuffer: inout AudioBufferList, frameCount: UInt32) {
|
||||
guard let file = audioFile else { return }
|
||||
|
||||
let inputChannels = deviceFormat.mChannelsPerFrame
|
||||
let inputSampleRate = deviceFormat.mSampleRate
|
||||
let outputSampleRate = outputFormat.mSampleRate
|
||||
|
||||
// Get input samples
|
||||
guard let inputData = inputBuffer.mBuffers.mData else { return }
|
||||
let inputSamples = inputData.assumingMemoryBound(to: Float32.self)
|
||||
|
||||
// Calculate output frame count after sample rate conversion
|
||||
let ratio = outputSampleRate / inputSampleRate
|
||||
let outputFrameCount = UInt32(Double(frameCount) * ratio)
|
||||
|
||||
guard outputFrameCount > 0,
|
||||
let outputBuffer = conversionBuffer,
|
||||
outputFrameCount <= conversionBufferSize else { return }
|
||||
|
||||
// Convert Float32 multi-channel → Int16 mono (with sample rate conversion if needed)
|
||||
if inputSampleRate == outputSampleRate {
|
||||
// Direct conversion, just format change and channel mixing
|
||||
for i in 0..<Int(frameCount) {
|
||||
var sample: Float32 = 0
|
||||
// Mix all channels to mono
|
||||
for ch in 0..<Int(inputChannels) {
|
||||
sample += inputSamples[i * Int(inputChannels) + ch]
|
||||
}
|
||||
sample /= Float32(inputChannels)
|
||||
|
||||
// Convert to Int16 with clipping
|
||||
let scaled = sample * 32767.0
|
||||
let clipped = max(-32768.0, min(32767.0, scaled))
|
||||
outputBuffer[i] = Int16(clipped)
|
||||
}
|
||||
} else {
|
||||
// Sample rate conversion needed - use linear interpolation
|
||||
for i in 0..<Int(outputFrameCount) {
|
||||
let inputIndex = Double(i) / ratio
|
||||
let inputIndexInt = Int(inputIndex)
|
||||
let frac = Float32(inputIndex - Double(inputIndexInt))
|
||||
|
||||
var sample: Float32 = 0
|
||||
let idx1 = min(inputIndexInt, Int(frameCount) - 1)
|
||||
let idx2 = min(inputIndexInt + 1, Int(frameCount) - 1)
|
||||
|
||||
// Mix channels and interpolate
|
||||
for ch in 0..<Int(inputChannels) {
|
||||
let s1 = inputSamples[idx1 * Int(inputChannels) + ch]
|
||||
let s2 = inputSamples[idx2 * Int(inputChannels) + ch]
|
||||
sample += s1 + frac * (s2 - s1)
|
||||
}
|
||||
sample /= Float32(inputChannels)
|
||||
|
||||
// Convert to Int16
|
||||
let scaled = sample * 32767.0
|
||||
let clipped = max(-32768.0, min(32767.0, scaled))
|
||||
outputBuffer[i] = Int16(clipped)
|
||||
}
|
||||
}
|
||||
|
||||
// Write to file
|
||||
var outputBufferList = AudioBufferList(
|
||||
mNumberBuffers: 1,
|
||||
mBuffers: AudioBuffer(
|
||||
mNumberChannels: 1,
|
||||
mDataByteSize: outputFrameCount * 2,
|
||||
mData: outputBuffer
|
||||
)
|
||||
)
|
||||
|
||||
let writeStatus = ExtAudioFileWrite(file, outputFrameCount, &outputBufferList)
|
||||
if writeStatus != noErr {
|
||||
logger.error("🎙️ ExtAudioFileWrite failed with status: \(writeStatus)")
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Device Info Logging
|
||||
|
||||
private func logDeviceDetails(deviceID: AudioDeviceID) {
|
||||
// Get device name
|
||||
let deviceName = getDeviceStringProperty(deviceID: deviceID, selector: kAudioDevicePropertyDeviceNameCFString) ?? "Unknown"
|
||||
|
||||
// Get device UID
|
||||
let deviceUID = getDeviceStringProperty(deviceID: deviceID, selector: kAudioDevicePropertyDeviceUID) ?? "Unknown"
|
||||
|
||||
// Get transport type
|
||||
let transportType = getTransportType(deviceID: deviceID)
|
||||
|
||||
// Get manufacturer
|
||||
let manufacturer = getDeviceStringProperty(deviceID: deviceID, selector: kAudioDevicePropertyDeviceManufacturerCFString) ?? "Unknown"
|
||||
|
||||
logger.notice("🎙️ Device info: name=\(deviceName), uid=\(deviceUID)")
|
||||
logger.notice("🎙️ Device details: transport=\(transportType), manufacturer=\(manufacturer)")
|
||||
|
||||
// Get buffer frame size
|
||||
if let bufferSize = getBufferFrameSize(deviceID: deviceID) {
|
||||
let latencyMs = (Double(bufferSize) / 48000.0) * 1000.0 // Approximate latency assuming 48kHz
|
||||
logger.notice("🎙️ Buffer size: \(bufferSize) frames, ~latency: \(String(format: "%.1f", latencyMs))ms")
|
||||
}
|
||||
}
|
||||
|
||||
private func getDeviceStringProperty(deviceID: AudioDeviceID, selector: AudioObjectPropertySelector) -> String? {
|
||||
var address = AudioObjectPropertyAddress(
|
||||
mSelector: selector,
|
||||
mScope: kAudioObjectPropertyScopeGlobal,
|
||||
mElement: kAudioObjectPropertyElementMain
|
||||
)
|
||||
|
||||
var propertySize = UInt32(MemoryLayout<CFString>.size)
|
||||
var property: CFString?
|
||||
|
||||
let status = AudioObjectGetPropertyData(
|
||||
deviceID,
|
||||
&address,
|
||||
0,
|
||||
nil,
|
||||
&propertySize,
|
||||
&property
|
||||
)
|
||||
|
||||
if status == noErr, let cfString = property {
|
||||
return cfString as String
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
private func getTransportType(deviceID: AudioDeviceID) -> String {
|
||||
var address = AudioObjectPropertyAddress(
|
||||
mSelector: kAudioDevicePropertyTransportType,
|
||||
mScope: kAudioObjectPropertyScopeGlobal,
|
||||
mElement: kAudioObjectPropertyElementMain
|
||||
)
|
||||
|
||||
var transportType: UInt32 = 0
|
||||
var propertySize = UInt32(MemoryLayout<UInt32>.size)
|
||||
|
||||
let status = AudioObjectGetPropertyData(
|
||||
deviceID,
|
||||
&address,
|
||||
0,
|
||||
nil,
|
||||
&propertySize,
|
||||
&transportType
|
||||
)
|
||||
|
||||
if status != noErr {
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
switch transportType {
|
||||
case kAudioDeviceTransportTypeBuiltIn:
|
||||
return "Built-in"
|
||||
case kAudioDeviceTransportTypeUSB:
|
||||
return "USB"
|
||||
case kAudioDeviceTransportTypeBluetooth:
|
||||
return "Bluetooth"
|
||||
case kAudioDeviceTransportTypeBluetoothLE:
|
||||
return "Bluetooth LE"
|
||||
case kAudioDeviceTransportTypeAggregate:
|
||||
return "Aggregate"
|
||||
case kAudioDeviceTransportTypeVirtual:
|
||||
return "Virtual"
|
||||
case kAudioDeviceTransportTypePCI:
|
||||
return "PCI"
|
||||
case kAudioDeviceTransportTypeFireWire:
|
||||
return "FireWire"
|
||||
case kAudioDeviceTransportTypeDisplayPort:
|
||||
return "DisplayPort"
|
||||
case kAudioDeviceTransportTypeHDMI:
|
||||
return "HDMI"
|
||||
case kAudioDeviceTransportTypeAVB:
|
||||
return "AVB"
|
||||
case kAudioDeviceTransportTypeThunderbolt:
|
||||
return "Thunderbolt"
|
||||
default:
|
||||
return "Other (\(transportType))"
|
||||
}
|
||||
}
|
||||
|
||||
private func getBufferFrameSize(deviceID: AudioDeviceID) -> UInt32? {
|
||||
var address = AudioObjectPropertyAddress(
|
||||
mSelector: kAudioDevicePropertyBufferFrameSize,
|
||||
mScope: kAudioObjectPropertyScopeGlobal,
|
||||
mElement: kAudioObjectPropertyElementMain
|
||||
)
|
||||
|
||||
var bufferSize: UInt32 = 0
|
||||
var propertySize = UInt32(MemoryLayout<UInt32>.size)
|
||||
|
||||
let status = AudioObjectGetPropertyData(
|
||||
deviceID,
|
||||
&address,
|
||||
0,
|
||||
nil,
|
||||
&propertySize,
|
||||
&bufferSize
|
||||
)
|
||||
|
||||
return status == noErr ? bufferSize : nil
|
||||
}
|
||||
|
||||
/// Checks if a device is currently available using Apple's kAudioDevicePropertyDeviceIsAlive
|
||||
private func isDeviceAvailable(_ deviceID: AudioDeviceID) -> Bool {
|
||||
var address = AudioObjectPropertyAddress(
|
||||
mSelector: kAudioDevicePropertyDeviceIsAlive,
|
||||
mScope: kAudioObjectPropertyScopeGlobal,
|
||||
mElement: kAudioObjectPropertyElementMain
|
||||
)
|
||||
|
||||
var isAlive: UInt32 = 0
|
||||
var propertySize = UInt32(MemoryLayout<UInt32>.size)
|
||||
|
||||
let status = AudioObjectGetPropertyData(
|
||||
deviceID,
|
||||
&address,
|
||||
0,
|
||||
nil,
|
||||
&propertySize,
|
||||
&isAlive
|
||||
)
|
||||
|
||||
return status == noErr && isAlive == 1
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Error Types
|
||||
|
||||
enum CoreAudioRecorderError: LocalizedError {
|
||||
case audioUnitNotFound
|
||||
case audioUnitNotInitialized
|
||||
case deviceNotAvailable
|
||||
case failedToCreateAudioUnit(status: OSStatus)
|
||||
case failedToEnableInput(status: OSStatus)
|
||||
case failedToDisableOutput(status: OSStatus)
|
||||
case failedToSetDevice(status: OSStatus)
|
||||
case failedToGetDeviceFormat(status: OSStatus)
|
||||
case failedToSetFormat(status: OSStatus)
|
||||
case failedToSetCallback(status: OSStatus)
|
||||
case failedToCreateFile(status: OSStatus)
|
||||
case failedToSetFileFormat(status: OSStatus)
|
||||
case failedToInitialize(status: OSStatus)
|
||||
case failedToStart(status: OSStatus)
|
||||
|
||||
var errorDescription: String? {
|
||||
switch self {
|
||||
case .audioUnitNotFound:
|
||||
return "HAL Output AudioUnit not found"
|
||||
case .audioUnitNotInitialized:
|
||||
return "AudioUnit not initialized"
|
||||
case .deviceNotAvailable:
|
||||
return "Audio device is no longer available"
|
||||
case .failedToCreateAudioUnit(let status):
|
||||
return "Failed to create AudioUnit: \(status)"
|
||||
case .failedToEnableInput(let status):
|
||||
return "Failed to enable input: \(status)"
|
||||
case .failedToDisableOutput(let status):
|
||||
return "Failed to disable output: \(status)"
|
||||
case .failedToSetDevice(let status):
|
||||
return "Failed to set input device: \(status)"
|
||||
case .failedToGetDeviceFormat(let status):
|
||||
return "Failed to get device format: \(status)"
|
||||
case .failedToSetFormat(let status):
|
||||
return "Failed to set audio format: \(status)"
|
||||
case .failedToSetCallback(let status):
|
||||
return "Failed to set input callback: \(status)"
|
||||
case .failedToCreateFile(let status):
|
||||
return "Failed to create audio file: \(status)"
|
||||
case .failedToSetFileFormat(let status):
|
||||
return "Failed to set file format: \(status)"
|
||||
case .failedToInitialize(let status):
|
||||
return "Failed to initialize AudioUnit: \(status)"
|
||||
case .failedToStart(let status):
|
||||
return "Failed to start AudioUnit: \(status)"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -16,4 +16,5 @@ extension Notification.Name {
|
||||
static let transcriptionCompleted = Notification.Name("transcriptionCompleted")
|
||||
static let enhancementToggleChanged = Notification.Name("enhancementToggleChanged")
|
||||
static let openFileForTranscription = Notification.Name("openFileForTranscription")
|
||||
static let audioDeviceSwitchRequired = Notification.Name("audioDeviceSwitchRequired")
|
||||
}
|
||||
|
||||
@ -5,10 +5,11 @@ import os
|
||||
|
||||
@MainActor
|
||||
class Recorder: NSObject, ObservableObject {
|
||||
private var recorder: AudioEngineRecorder?
|
||||
private var recorder: CoreAudioRecorder?
|
||||
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "Recorder")
|
||||
private let deviceManager = AudioDeviceManager.shared
|
||||
private var deviceObserver: NSObjectProtocol?
|
||||
private var deviceSwitchObserver: NSObjectProtocol?
|
||||
private var isReconfiguring = false
|
||||
private let mediaController = MediaController.shared
|
||||
private let playbackController = PlaybackController.shared
|
||||
@ -25,8 +26,9 @@ class Recorder: NSObject, ObservableObject {
|
||||
override init() {
|
||||
super.init()
|
||||
setupDeviceChangeObserver()
|
||||
setupDeviceSwitchObserver()
|
||||
}
|
||||
|
||||
|
||||
private func setupDeviceChangeObserver() {
|
||||
deviceObserver = AudioDeviceConfiguration.createDeviceChangeObserver { [weak self] in
|
||||
Task {
|
||||
@ -34,26 +36,71 @@ class Recorder: NSObject, ObservableObject {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private func setupDeviceSwitchObserver() {
|
||||
deviceSwitchObserver = NotificationCenter.default.addObserver(
|
||||
forName: .audioDeviceSwitchRequired,
|
||||
object: nil,
|
||||
queue: .main
|
||||
) { [weak self] notification in
|
||||
Task {
|
||||
await self?.handleDeviceSwitchRequired(notification)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func handleDeviceChange() async {
|
||||
guard !isReconfiguring else { return }
|
||||
guard recorder != nil else { return }
|
||||
|
||||
|
||||
isReconfiguring = true
|
||||
|
||||
|
||||
try? await Task.sleep(nanoseconds: 200_000_000)
|
||||
|
||||
|
||||
await MainActor.run {
|
||||
NotificationCenter.default.post(name: .toggleMiniRecorder, object: nil)
|
||||
}
|
||||
|
||||
|
||||
isReconfiguring = false
|
||||
}
|
||||
|
||||
private func configureAudioSession(with deviceID: AudioDeviceID) async throws {
|
||||
try AudioDeviceConfiguration.setDefaultInputDevice(deviceID)
|
||||
|
||||
private func handleDeviceSwitchRequired(_ notification: Notification) async {
|
||||
guard !isReconfiguring else { return }
|
||||
guard let recorder = recorder else { return }
|
||||
guard let userInfo = notification.userInfo,
|
||||
let newDeviceID = userInfo["newDeviceID"] as? AudioDeviceID else {
|
||||
logger.error("Device switch notification missing newDeviceID")
|
||||
return
|
||||
}
|
||||
|
||||
// Prevent concurrent device switches and handleDeviceChange() interference
|
||||
isReconfiguring = true
|
||||
defer { isReconfiguring = false }
|
||||
|
||||
logger.notice("🎙️ Device switch required: switching to device \(newDeviceID)")
|
||||
|
||||
do {
|
||||
try recorder.switchDevice(to: newDeviceID)
|
||||
|
||||
// Notify user about the switch
|
||||
if let deviceName = deviceManager.availableDevices.first(where: { $0.id == newDeviceID })?.name {
|
||||
await MainActor.run {
|
||||
NotificationManager.shared.showNotification(
|
||||
title: "Switched to: \(deviceName)",
|
||||
type: .info
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
logger.notice("🎙️ Successfully switched recording to device \(newDeviceID)")
|
||||
} catch {
|
||||
logger.error("❌ Failed to switch device: \(error.localizedDescription)")
|
||||
|
||||
// If switch fails, stop recording and notify user
|
||||
await handleRecordingError(error)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func startRecording(toOutputFile url: URL) async throws {
|
||||
deviceManager.isRecordingActive = true
|
||||
|
||||
@ -75,26 +122,12 @@ class Recorder: NSObject, ObservableObject {
|
||||
hasDetectedAudioInCurrentSession = false
|
||||
|
||||
let deviceID = deviceManager.getCurrentDevice()
|
||||
do {
|
||||
try await configureAudioSession(with: deviceID)
|
||||
} catch {
|
||||
logger.warning("⚠️ Failed to configure audio session for device \(deviceID), attempting to continue: \(error.localizedDescription)")
|
||||
}
|
||||
|
||||
do {
|
||||
let engineRecorder = AudioEngineRecorder()
|
||||
recorder = engineRecorder
|
||||
let coreAudioRecorder = CoreAudioRecorder()
|
||||
recorder = coreAudioRecorder
|
||||
|
||||
// Set up error callback to handle runtime recording failures
|
||||
engineRecorder.onRecordingError = { [weak self] error in
|
||||
Task { @MainActor in
|
||||
await self?.handleRecordingError(error)
|
||||
}
|
||||
}
|
||||
|
||||
try engineRecorder.startRecording(toOutputFile: url)
|
||||
|
||||
logger.info("✅ AudioEngineRecorder started successfully")
|
||||
try coreAudioRecorder.startRecording(toOutputFile: url, deviceID: deviceID)
|
||||
|
||||
audioRestorationTask?.cancel()
|
||||
audioRestorationTask = nil
|
||||
@ -142,7 +175,7 @@ class Recorder: NSObject, ObservableObject {
|
||||
throw RecorderError.couldNotStartRecording
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func stopRecording() {
|
||||
audioLevelCheckTask?.cancel()
|
||||
audioMeterUpdateTask?.cancel()
|
||||
@ -175,8 +208,8 @@ class Recorder: NSObject, ObservableObject {
|
||||
private func updateAudioMeter() {
|
||||
guard let recorder = recorder else { return }
|
||||
|
||||
let averagePower = recorder.currentAveragePower
|
||||
let peakPower = recorder.currentPeakPower
|
||||
let averagePower = recorder.averagePower
|
||||
let peakPower = recorder.peakPower
|
||||
|
||||
let minVisibleDb: Float = -60.0
|
||||
let maxVisibleDb: Float = 0.0
|
||||
@ -217,6 +250,9 @@ class Recorder: NSObject, ObservableObject {
|
||||
if let observer = deviceObserver {
|
||||
NotificationCenter.default.removeObserver(observer)
|
||||
}
|
||||
if let observer = deviceSwitchObserver {
|
||||
NotificationCenter.default.removeObserver(observer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -3,12 +3,11 @@ import AVFoundation
|
||||
import CoreAudio
|
||||
import os
|
||||
|
||||
/// Audio device configuration queries (does NOT modify system default device)
|
||||
class AudioDeviceConfiguration {
|
||||
private static let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "AudioDeviceConfiguration")
|
||||
|
||||
|
||||
|
||||
|
||||
/// Gets the current system default input device (for reference only)
|
||||
static func getDefaultInputDevice() -> AudioDeviceID? {
|
||||
var defaultDeviceID = AudioDeviceID(0)
|
||||
var propertySize = UInt32(MemoryLayout<AudioDeviceID>.size)
|
||||
@ -31,36 +30,8 @@ class AudioDeviceConfiguration {
|
||||
}
|
||||
return defaultDeviceID
|
||||
}
|
||||
|
||||
static func setDefaultInputDevice(_ deviceID: AudioDeviceID) throws {
|
||||
var deviceIDCopy = deviceID
|
||||
let propertySize = UInt32(MemoryLayout<AudioDeviceID>.size)
|
||||
var address = AudioObjectPropertyAddress(
|
||||
mSelector: kAudioHardwarePropertyDefaultInputDevice,
|
||||
mScope: kAudioObjectPropertyScopeGlobal,
|
||||
mElement: kAudioObjectPropertyElementMain
|
||||
)
|
||||
|
||||
let setDeviceResult = AudioObjectSetPropertyData(
|
||||
AudioObjectID(kAudioObjectSystemObject),
|
||||
&address,
|
||||
0,
|
||||
nil,
|
||||
propertySize,
|
||||
&deviceIDCopy
|
||||
)
|
||||
|
||||
if setDeviceResult != noErr {
|
||||
logger.error("Failed to set input device: \(setDeviceResult)")
|
||||
throw AudioConfigurationError.failedToSetInputDevice(status: setDeviceResult)
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a device change observer
|
||||
/// - Parameters:
|
||||
/// - handler: The closure to execute when device changes
|
||||
/// - queue: The queue to execute the handler on (defaults to main queue)
|
||||
/// - Returns: The observer token
|
||||
|
||||
/// Creates a device change observer that calls handler on the specified queue
|
||||
static func createDeviceChangeObserver(
|
||||
handler: @escaping () -> Void,
|
||||
queue: OperationQueue = .main
|
||||
@ -72,15 +43,4 @@ class AudioDeviceConfiguration {
|
||||
using: { _ in handler() }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
enum AudioConfigurationError: LocalizedError {
|
||||
case failedToSetInputDevice(status: OSStatus)
|
||||
|
||||
var errorDescription: String? {
|
||||
switch self {
|
||||
case .failedToSetInputDevice(let status):
|
||||
return "Failed to set input device: \(status)"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -10,6 +10,7 @@ struct PrioritizedDevice: Codable, Identifiable {
|
||||
}
|
||||
|
||||
enum AudioInputMode: String, CaseIterable {
|
||||
case systemDefault = "System Default"
|
||||
case custom = "Custom Device"
|
||||
case prioritized = "Prioritized"
|
||||
}
|
||||
@ -20,84 +21,77 @@ class AudioDeviceManager: ObservableObject {
|
||||
@Published var selectedDeviceID: AudioDeviceID?
|
||||
@Published var inputMode: AudioInputMode = .custom
|
||||
@Published var prioritizedDevices: [PrioritizedDevice] = []
|
||||
var fallbackDeviceID: AudioDeviceID?
|
||||
|
||||
var isRecordingActive: Bool = false
|
||||
|
||||
static let shared = AudioDeviceManager()
|
||||
|
||||
init() {
|
||||
setupFallbackDevice()
|
||||
loadPrioritizedDevices()
|
||||
|
||||
if let savedMode = UserDefaults.standard.audioInputModeRawValue,
|
||||
let mode = AudioInputMode(rawValue: savedMode) {
|
||||
inputMode = mode
|
||||
} else {
|
||||
inputMode = .custom
|
||||
inputMode = .systemDefault
|
||||
}
|
||||
|
||||
loadAvailableDevices { [weak self] in
|
||||
self?.migrateFromSystemDefaultIfNeeded()
|
||||
self?.initializeSelectedDevice()
|
||||
}
|
||||
|
||||
setupDeviceChangeNotifications()
|
||||
}
|
||||
|
||||
private func migrateFromSystemDefaultIfNeeded() {
|
||||
if let savedModeRaw = UserDefaults.standard.audioInputModeRawValue,
|
||||
savedModeRaw == "System Default" {
|
||||
logger.info("Migrating from System Default mode to Custom mode")
|
||||
|
||||
if let fallbackID = fallbackDeviceID {
|
||||
selectedDeviceID = fallbackID
|
||||
if let device = availableDevices.first(where: { $0.id == fallbackID }) {
|
||||
UserDefaults.standard.selectedAudioDeviceUID = device.uid
|
||||
logger.info("Migrated to Custom mode with device: \(device.name)")
|
||||
}
|
||||
}
|
||||
|
||||
UserDefaults.standard.audioInputModeRawValue = AudioInputMode.custom.rawValue
|
||||
}
|
||||
}
|
||||
|
||||
func setupFallbackDevice() {
|
||||
let deviceID: AudioDeviceID? = getDeviceProperty(
|
||||
deviceID: AudioObjectID(kAudioObjectSystemObject),
|
||||
selector: kAudioHardwarePropertyDefaultInputDevice
|
||||
/// Returns the current system default input device from macOS
|
||||
func getSystemDefaultDevice() -> AudioDeviceID? {
|
||||
var deviceID = AudioDeviceID(0)
|
||||
var propertySize = UInt32(MemoryLayout<AudioDeviceID>.size)
|
||||
var address = AudioObjectPropertyAddress(
|
||||
mSelector: kAudioHardwarePropertyDefaultInputDevice,
|
||||
mScope: kAudioObjectPropertyScopeGlobal,
|
||||
mElement: kAudioObjectPropertyElementMain
|
||||
)
|
||||
|
||||
if let deviceID = deviceID {
|
||||
fallbackDeviceID = deviceID
|
||||
if let name = getDeviceName(deviceID: deviceID) {
|
||||
logger.info("Fallback device set to: \(name) (ID: \(deviceID))")
|
||||
}
|
||||
} else {
|
||||
logger.error("Failed to get fallback device")
|
||||
|
||||
let status = AudioObjectGetPropertyData(
|
||||
AudioObjectID(kAudioObjectSystemObject),
|
||||
&address,
|
||||
0,
|
||||
nil,
|
||||
&propertySize,
|
||||
&deviceID
|
||||
)
|
||||
|
||||
guard status == noErr, deviceID != 0 else {
|
||||
logger.error("Failed to get system default device: \(status)")
|
||||
return nil
|
||||
}
|
||||
return deviceID
|
||||
}
|
||||
|
||||
func getSystemDefaultDeviceName() -> String? {
|
||||
guard let deviceID = getSystemDefaultDevice() else { return nil }
|
||||
return getDeviceName(deviceID: deviceID)
|
||||
}
|
||||
|
||||
private func initializeSelectedDevice() {
|
||||
if inputMode == .prioritized {
|
||||
switch inputMode {
|
||||
case .systemDefault:
|
||||
logger.notice("🎙️ Using System Default mode")
|
||||
case .prioritized:
|
||||
selectHighestPriorityAvailableDevice()
|
||||
return
|
||||
}
|
||||
|
||||
if let savedUID = UserDefaults.standard.selectedAudioDeviceUID {
|
||||
if let device = availableDevices.first(where: { $0.uid == savedUID }) {
|
||||
selectedDeviceID = device.id
|
||||
logger.info("Loaded saved device UID: \(savedUID), mapped to ID: \(device.id)")
|
||||
if let name = getDeviceName(deviceID: device.id) {
|
||||
logger.info("Using saved device: \(name)")
|
||||
case .custom:
|
||||
if let savedUID = UserDefaults.standard.selectedAudioDeviceUID {
|
||||
if let device = availableDevices.first(where: { $0.uid == savedUID }) {
|
||||
selectedDeviceID = device.id
|
||||
} else {
|
||||
logger.warning("🎙️ Saved device UID \(savedUID) is no longer available")
|
||||
UserDefaults.standard.removeObject(forKey: UserDefaults.Keys.selectedAudioDeviceUID)
|
||||
fallbackToDefaultDevice()
|
||||
}
|
||||
} else {
|
||||
logger.warning("Saved device UID \(savedUID) is no longer available")
|
||||
UserDefaults.standard.removeObject(forKey: UserDefaults.Keys.selectedAudioDeviceUID)
|
||||
fallbackToDefaultDevice()
|
||||
}
|
||||
} else {
|
||||
fallbackToDefaultDevice()
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,7 +100,7 @@ class AudioDeviceManager: ObservableObject {
|
||||
}
|
||||
|
||||
private func fallbackToDefaultDevice() {
|
||||
logger.info("Current device unavailable, selecting new device...")
|
||||
logger.notice("🎙️ Current device unavailable, selecting new device...")
|
||||
|
||||
guard let newDeviceID = findBestAvailableDevice() else {
|
||||
logger.error("No input devices available!")
|
||||
@ -116,21 +110,18 @@ class AudioDeviceManager: ObservableObject {
|
||||
}
|
||||
|
||||
let newDeviceName = getDeviceName(deviceID: newDeviceID) ?? "Unknown Device"
|
||||
logger.info("Auto-selecting new device: \(newDeviceName)")
|
||||
logger.notice("🎙️ Auto-selecting new device: \(newDeviceName)")
|
||||
selectDevice(id: newDeviceID)
|
||||
}
|
||||
|
||||
func findBestAvailableDevice() -> AudioDeviceID? {
|
||||
if let device = availableDevices.first(where: { isBuiltInDevice($0.id) }) {
|
||||
logger.info("Found built-in device: \(device.name)")
|
||||
return device.id
|
||||
}
|
||||
|
||||
if let device = availableDevices.first {
|
||||
logger.warning("No built-in device found, using first available: \(device.name)")
|
||||
logger.warning("🎙️ No built-in device found, using: \(device.name)")
|
||||
return device.id
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -188,8 +179,14 @@ class AudioDeviceManager: ObservableObject {
|
||||
guard let self = self else { return }
|
||||
self.availableDevices = devices.map { ($0.id, $0.uid, $0.name) }
|
||||
if let currentID = self.selectedDeviceID, !devices.contains(where: { $0.id == currentID }) {
|
||||
self.logger.warning("Currently selected device is no longer available")
|
||||
self.fallbackToDefaultDevice()
|
||||
self.logger.warning("🎙️ Currently selected device is no longer available")
|
||||
if !self.isRecordingActive {
|
||||
if self.inputMode == .prioritized {
|
||||
self.selectHighestPriorityAvailableDevice()
|
||||
} else {
|
||||
self.fallbackToDefaultDevice()
|
||||
}
|
||||
}
|
||||
}
|
||||
completion?()
|
||||
}
|
||||
@ -244,25 +241,11 @@ class AudioDeviceManager: ObservableObject {
|
||||
}
|
||||
|
||||
func selectDevice(id: AudioDeviceID) {
|
||||
logger.info("Selecting device with ID: \(id)")
|
||||
if let name = getDeviceName(deviceID: id) {
|
||||
logger.info("Selected device name: \(name)")
|
||||
}
|
||||
|
||||
if let deviceToSelect = availableDevices.first(where: { $0.id == id }) {
|
||||
let uid = deviceToSelect.uid
|
||||
DispatchQueue.main.async {
|
||||
self.selectedDeviceID = id
|
||||
UserDefaults.standard.selectedAudioDeviceUID = uid
|
||||
self.logger.info("Device selection saved with UID: \(uid)")
|
||||
|
||||
do {
|
||||
try AudioDeviceConfiguration.setDefaultInputDevice(id)
|
||||
self.logger.info("✅ Set device as system default immediately")
|
||||
} catch {
|
||||
self.logger.error("Failed to set device as system default: \(error.localizedDescription)")
|
||||
}
|
||||
|
||||
self.notifyDeviceChange()
|
||||
}
|
||||
} else {
|
||||
@ -279,14 +262,6 @@ class AudioDeviceManager: ObservableObject {
|
||||
self.selectedDeviceID = id
|
||||
UserDefaults.standard.audioInputModeRawValue = AudioInputMode.custom.rawValue
|
||||
UserDefaults.standard.selectedAudioDeviceUID = uid
|
||||
|
||||
do {
|
||||
try AudioDeviceConfiguration.setDefaultInputDevice(id)
|
||||
self.logger.info("✅ Set device as system default immediately")
|
||||
} catch {
|
||||
self.logger.error("Failed to set device as system default: \(error.localizedDescription)")
|
||||
}
|
||||
|
||||
self.notifyDeviceChange()
|
||||
}
|
||||
} else {
|
||||
@ -299,22 +274,18 @@ class AudioDeviceManager: ObservableObject {
|
||||
inputMode = mode
|
||||
UserDefaults.standard.audioInputModeRawValue = mode.rawValue
|
||||
|
||||
if selectedDeviceID == nil {
|
||||
if inputMode == .custom {
|
||||
switch mode {
|
||||
case .systemDefault:
|
||||
break
|
||||
case .custom:
|
||||
if selectedDeviceID == nil {
|
||||
if let firstDevice = availableDevices.first {
|
||||
selectDevice(id: firstDevice.id)
|
||||
}
|
||||
} else if inputMode == .prioritized {
|
||||
selectHighestPriorityAvailableDevice()
|
||||
}
|
||||
} else {
|
||||
if let currentDeviceID = selectedDeviceID {
|
||||
do {
|
||||
try AudioDeviceConfiguration.setDefaultInputDevice(currentDeviceID)
|
||||
logger.info("✅ Set current device as system default when mode changed")
|
||||
} catch {
|
||||
logger.error("Failed to set device as system default: \(error.localizedDescription)")
|
||||
}
|
||||
case .prioritized:
|
||||
if selectedDeviceID == nil {
|
||||
selectHighestPriorityAvailableDevice()
|
||||
}
|
||||
}
|
||||
|
||||
@ -323,13 +294,13 @@ class AudioDeviceManager: ObservableObject {
|
||||
|
||||
func getCurrentDevice() -> AudioDeviceID {
|
||||
switch inputMode {
|
||||
case .systemDefault:
|
||||
return getSystemDefaultDevice() ?? findBestAvailableDevice() ?? 0
|
||||
case .custom:
|
||||
if let id = selectedDeviceID, isDeviceAvailable(id) {
|
||||
return id
|
||||
} else {
|
||||
// Use smart device finding instead of stale fallback
|
||||
return findBestAvailableDevice() ?? 0
|
||||
}
|
||||
return findBestAvailableDevice() ?? 0
|
||||
case .prioritized:
|
||||
let sortedDevices = prioritizedDevices.sorted { $0.priority < $1.priority }
|
||||
for device in sortedDevices {
|
||||
@ -337,7 +308,6 @@ class AudioDeviceManager: ObservableObject {
|
||||
return available.id
|
||||
}
|
||||
}
|
||||
// Use smart device finding instead of stale fallback
|
||||
return findBestAvailableDevice() ?? 0
|
||||
}
|
||||
}
|
||||
@ -346,14 +316,12 @@ class AudioDeviceManager: ObservableObject {
|
||||
if let data = UserDefaults.standard.prioritizedDevicesData,
|
||||
let devices = try? JSONDecoder().decode([PrioritizedDevice].self, from: data) {
|
||||
prioritizedDevices = devices
|
||||
logger.info("Loaded \(devices.count) prioritized devices")
|
||||
}
|
||||
}
|
||||
|
||||
func savePrioritizedDevices() {
|
||||
if let data = try? JSONEncoder().encode(prioritizedDevices) {
|
||||
UserDefaults.standard.prioritizedDevicesData = data
|
||||
logger.info("Saved \(self.prioritizedDevices.count) prioritized devices")
|
||||
}
|
||||
}
|
||||
|
||||
@ -398,15 +366,7 @@ class AudioDeviceManager: ObservableObject {
|
||||
for device in sortedDevices {
|
||||
if let availableDevice = availableDevices.first(where: { $0.uid == device.id }) {
|
||||
selectedDeviceID = availableDevice.id
|
||||
logger.info("Selected prioritized device: \(device.name) (Priority: \(device.priority))")
|
||||
|
||||
do {
|
||||
try AudioDeviceConfiguration.setDefaultInputDevice(availableDevice.id)
|
||||
logger.info("✅ Set prioritized device as system default immediately")
|
||||
} catch {
|
||||
logger.error("Failed to set prioritized device: \(error.localizedDescription)")
|
||||
continue
|
||||
}
|
||||
logger.notice("🎙️ Selected prioritized device: \(device.name)")
|
||||
notifyDeviceChange()
|
||||
return
|
||||
}
|
||||
@ -439,24 +399,58 @@ class AudioDeviceManager: ObservableObject {
|
||||
|
||||
if status != noErr {
|
||||
logger.error("Failed to add device change listener: \(status)")
|
||||
} else {
|
||||
logger.info("Successfully added device change listener")
|
||||
}
|
||||
}
|
||||
|
||||
private func handleDeviceListChange() {
|
||||
logger.info("Device list change detected")
|
||||
|
||||
// Don't change devices while recording is active
|
||||
// This prevents audio engine errors during recording startup
|
||||
if isRecordingActive {
|
||||
logger.info("Recording is active - deferring device change handling")
|
||||
return
|
||||
}
|
||||
logger.notice("🎙️ Device list change detected")
|
||||
|
||||
loadAvailableDevices { [weak self] in
|
||||
guard let self = self else { return }
|
||||
|
||||
if self.inputMode == .systemDefault {
|
||||
self.notifyDeviceChange()
|
||||
return
|
||||
}
|
||||
|
||||
if self.isRecordingActive {
|
||||
guard let currentID = self.selectedDeviceID else { return }
|
||||
|
||||
if !self.isDeviceAvailable(currentID) {
|
||||
self.logger.warning("🎙️ Recording device \(currentID) no longer available - requesting switch")
|
||||
|
||||
let newDeviceID: AudioDeviceID?
|
||||
if self.inputMode == .prioritized {
|
||||
let sortedDevices = self.prioritizedDevices.sorted { $0.priority < $1.priority }
|
||||
let priorityDeviceID = sortedDevices.compactMap { device in
|
||||
self.availableDevices.first(where: { $0.uid == device.id })?.id
|
||||
}.first
|
||||
|
||||
if let deviceID = priorityDeviceID {
|
||||
newDeviceID = deviceID
|
||||
} else {
|
||||
self.logger.warning("🎙️ No priority devices available, using fallback")
|
||||
newDeviceID = self.findBestAvailableDevice()
|
||||
}
|
||||
} else {
|
||||
newDeviceID = self.findBestAvailableDevice()
|
||||
}
|
||||
|
||||
if let deviceID = newDeviceID {
|
||||
self.selectedDeviceID = deviceID
|
||||
NotificationCenter.default.post(
|
||||
name: .audioDeviceSwitchRequired,
|
||||
object: nil,
|
||||
userInfo: ["newDeviceID": deviceID]
|
||||
)
|
||||
} else {
|
||||
self.logger.error("No audio input devices available!")
|
||||
NotificationCenter.default.post(name: .toggleMiniRecorder, object: nil)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if self.inputMode == .prioritized {
|
||||
self.selectHighestPriorityAvailableDevice()
|
||||
} else if self.inputMode == .custom,
|
||||
|
||||
114
VoiceInk/Services/LogExporter.swift
Normal file
114
VoiceInk/Services/LogExporter.swift
Normal file
@ -0,0 +1,114 @@
|
||||
import Foundation
|
||||
import OSLog
|
||||
|
||||
/// Utility class for exporting app logs since launch for diagnostic purposes
|
||||
final class LogExporter {
|
||||
static let shared = LogExporter()
|
||||
|
||||
private let logger = Logger(subsystem: "com.prakashjoshipax.voiceink", category: "LogExporter")
|
||||
private let subsystem = "com.prakashjoshipax.voiceink"
|
||||
|
||||
/// Timestamp when the app was launched
|
||||
let launchDate: Date
|
||||
|
||||
private init() {
|
||||
self.launchDate = Date()
|
||||
logger.notice("🎙️ LogExporter initialized, launch timestamp recorded")
|
||||
}
|
||||
|
||||
/// Exports logs since app launch to a file and returns the file URL
|
||||
func exportLogs() async throws -> URL {
|
||||
logger.notice("🎙️ Starting log export since \(self.launchDate)")
|
||||
|
||||
let logs = try await fetchLogsSinceLaunch()
|
||||
|
||||
let fileURL = try saveLogsToFile(logs)
|
||||
|
||||
logger.notice("🎙️ Log export completed: \(fileURL.path)")
|
||||
|
||||
return fileURL
|
||||
}
|
||||
|
||||
/// Fetches logs from OSLogStore since app launch
|
||||
private func fetchLogsSinceLaunch() async throws -> [String] {
|
||||
let store = try OSLogStore(scope: .currentProcessIdentifier)
|
||||
|
||||
// Get logs since launch
|
||||
let position = store.position(date: launchDate)
|
||||
|
||||
// Create predicate to filter by our subsystem
|
||||
let predicate = NSPredicate(format: "subsystem == %@", subsystem)
|
||||
|
||||
let entries = try store.getEntries(at: position, matching: predicate)
|
||||
|
||||
var logLines: [String] = []
|
||||
|
||||
// Add header
|
||||
let dateFormatter = DateFormatter()
|
||||
dateFormatter.dateFormat = "yyyy-MM-dd HH:mm:ss.SSS"
|
||||
|
||||
logLines.append("=== VoiceInk Diagnostic Logs ===")
|
||||
logLines.append("Export Date: \(dateFormatter.string(from: Date()))")
|
||||
logLines.append("App Launch: \(dateFormatter.string(from: launchDate))")
|
||||
logLines.append("Subsystem: \(subsystem)")
|
||||
logLines.append("================================")
|
||||
logLines.append("")
|
||||
|
||||
for entry in entries {
|
||||
guard let logEntry = entry as? OSLogEntryLog else { continue }
|
||||
|
||||
let timestamp = dateFormatter.string(from: logEntry.date)
|
||||
let level = logLevelString(logEntry.level)
|
||||
let category = logEntry.category
|
||||
let message = logEntry.composedMessage
|
||||
|
||||
logLines.append("[\(timestamp)] [\(level)] [\(category)] \(message)")
|
||||
}
|
||||
|
||||
if logLines.count <= 6 { // Only header lines
|
||||
logLines.append("No logs found since app launch.")
|
||||
}
|
||||
|
||||
return logLines
|
||||
}
|
||||
|
||||
/// Converts OSLogEntryLog.Level to a readable string
|
||||
private func logLevelString(_ level: OSLogEntryLog.Level) -> String {
|
||||
switch level {
|
||||
case .undefined:
|
||||
return "UNDEFINED"
|
||||
case .debug:
|
||||
return "DEBUG"
|
||||
case .info:
|
||||
return "INFO"
|
||||
case .notice:
|
||||
return "NOTICE"
|
||||
case .error:
|
||||
return "ERROR"
|
||||
case .fault:
|
||||
return "FAULT"
|
||||
@unknown default:
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
/// Saves logs to a file in the Downloads folder
|
||||
private func saveLogsToFile(_ logs: [String]) throws -> URL {
|
||||
let dateFormatter = DateFormatter()
|
||||
dateFormatter.dateFormat = "yyyy-MM-dd_HH-mm-ss"
|
||||
let timestamp = dateFormatter.string(from: Date())
|
||||
|
||||
let fileName = "VoiceInk_Logs_\(timestamp).log"
|
||||
|
||||
// Get Downloads folder
|
||||
guard let downloadsURL = FileManager.default.urls(for: .downloadsDirectory, in: .userDomainMask).first else {
|
||||
throw NSError(domain: "LogExporter", code: 1, userInfo: [NSLocalizedDescriptionKey: "Downloads directory unavailable"])
|
||||
}
|
||||
let fileURL = downloadsURL.appendingPathComponent(fileName)
|
||||
|
||||
let content = logs.joined(separator: "\n")
|
||||
try content.write(to: fileURL, atomically: true, encoding: .utf8)
|
||||
|
||||
return fileURL
|
||||
}
|
||||
}
|
||||
@ -101,11 +101,11 @@ class SystemInfoService {
|
||||
|
||||
private func getCurrentAudioDevice() -> String {
|
||||
let audioManager = AudioDeviceManager.shared
|
||||
if let deviceID = audioManager.selectedDeviceID ?? audioManager.fallbackDeviceID,
|
||||
let deviceName = audioManager.getDeviceName(deviceID: deviceID) {
|
||||
let deviceID = audioManager.getCurrentDevice()
|
||||
if deviceID != 0, let deviceName = audioManager.getDeviceName(deviceID: deviceID) {
|
||||
return deviceName
|
||||
}
|
||||
return "System Default"
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
private func getAvailableAudioDevices() -> String {
|
||||
|
||||
@ -17,10 +17,13 @@ struct AudioInputSettingsView: View {
|
||||
private var mainContent: some View {
|
||||
VStack(spacing: 40) {
|
||||
inputModeSection
|
||||
|
||||
if audioDeviceManager.inputMode == .custom {
|
||||
|
||||
switch audioDeviceManager.inputMode {
|
||||
case .systemDefault:
|
||||
systemDefaultSection
|
||||
case .custom:
|
||||
customDeviceSection
|
||||
} else if audioDeviceManager.inputMode == .prioritized {
|
||||
case .prioritized:
|
||||
prioritizedDevicesSection
|
||||
}
|
||||
}
|
||||
@ -54,25 +57,50 @@ struct AudioInputSettingsView: View {
|
||||
}
|
||||
}
|
||||
|
||||
private var systemDefaultSection: some View {
|
||||
VStack(alignment: .leading, spacing: 20) {
|
||||
Text("Current Device")
|
||||
.font(.title2)
|
||||
.fontWeight(.semibold)
|
||||
|
||||
HStack {
|
||||
Image(systemName: "display")
|
||||
.foregroundStyle(.secondary)
|
||||
|
||||
Text(audioDeviceManager.getSystemDefaultDeviceName() ?? "No device available")
|
||||
.foregroundStyle(.primary)
|
||||
|
||||
Spacer()
|
||||
|
||||
Label("Active", systemImage: "wave.3.right")
|
||||
.font(.caption)
|
||||
.foregroundStyle(.green)
|
||||
.padding(.horizontal, 10)
|
||||
.padding(.vertical, 4)
|
||||
.background(
|
||||
Capsule()
|
||||
.fill(.green.opacity(0.1))
|
||||
)
|
||||
}
|
||||
.padding()
|
||||
.background(CardBackground(isSelected: false))
|
||||
}
|
||||
}
|
||||
|
||||
private var customDeviceSection: some View {
|
||||
VStack(alignment: .leading, spacing: 20) {
|
||||
HStack {
|
||||
Text("Available Devices")
|
||||
.font(.title2)
|
||||
.fontWeight(.semibold)
|
||||
|
||||
|
||||
Spacer()
|
||||
|
||||
|
||||
Button(action: { audioDeviceManager.loadAvailableDevices() }) {
|
||||
Label("Refresh", systemImage: "arrow.clockwise")
|
||||
}
|
||||
.buttonStyle(.borderless)
|
||||
}
|
||||
|
||||
Text("Note: Selecting a device here will override your Mac\'s system-wide default microphone.")
|
||||
.font(.caption)
|
||||
.foregroundColor(.secondary)
|
||||
.padding(.bottom, 8)
|
||||
|
||||
VStack(spacing: 12) {
|
||||
ForEach(audioDeviceManager.availableDevices, id: \.id) { device in
|
||||
@ -106,14 +134,10 @@ struct AudioInputSettingsView: View {
|
||||
Text("Prioritized Devices")
|
||||
.font(.title2)
|
||||
.fontWeight(.semibold)
|
||||
Text("Devices will be used in order of priority. If a device is unavailable, the next one will be tried. If no prioritized device is available, the system default microphone will be used.")
|
||||
Text("Devices will be used in order of priority. If a device is unavailable, the next one will be tried. If no prioritized device is available, the built-in microphone will be used.")
|
||||
.font(.subheadline)
|
||||
.foregroundStyle(.secondary)
|
||||
.fixedSize(horizontal: false, vertical: true)
|
||||
Text("Warning: Using a prioritized device will override your Mac\'s system-wide default microphone if it becomes active.")
|
||||
.font(.caption)
|
||||
.foregroundColor(.secondary)
|
||||
.padding(.top, 4)
|
||||
}
|
||||
|
||||
if audioDeviceManager.prioritizedDevices.isEmpty {
|
||||
@ -241,9 +265,10 @@ struct InputModeCard: View {
|
||||
let mode: AudioInputMode
|
||||
let isSelected: Bool
|
||||
let action: () -> Void
|
||||
|
||||
|
||||
private var icon: String {
|
||||
switch mode {
|
||||
case .systemDefault: return "display"
|
||||
case .custom: return "mic.circle.fill"
|
||||
case .prioritized: return "list.number"
|
||||
}
|
||||
@ -251,6 +276,7 @@ struct InputModeCard: View {
|
||||
|
||||
private var description: String {
|
||||
switch mode {
|
||||
case .systemDefault: return "Use your Mac's default input"
|
||||
case .custom: return "Select a specific input device"
|
||||
case .prioritized: return "Set up device priority order"
|
||||
}
|
||||
|
||||
75
VoiceInk/Views/Settings/DiagnosticsSettingsView.swift
Normal file
75
VoiceInk/Views/Settings/DiagnosticsSettingsView.swift
Normal file
@ -0,0 +1,75 @@
|
||||
import SwiftUI
|
||||
|
||||
struct DiagnosticsSettingsView: View {
|
||||
@State private var isExportingLogs = false
|
||||
@State private var exportedLogURL: URL?
|
||||
@State private var showLogExportError = false
|
||||
@State private var logExportError: String = ""
|
||||
|
||||
var body: some View {
|
||||
VStack(alignment: .leading, spacing: 12) {
|
||||
Text("Export logs to help troubleshoot issues.")
|
||||
.settingsDescription()
|
||||
|
||||
HStack(spacing: 12) {
|
||||
Button {
|
||||
exportDiagnosticLogs()
|
||||
} label: {
|
||||
HStack {
|
||||
if isExportingLogs {
|
||||
ProgressView()
|
||||
.controlSize(.small)
|
||||
} else {
|
||||
Image(systemName: "doc.text.magnifyingglass")
|
||||
}
|
||||
Text("Export Diagnostic Logs")
|
||||
}
|
||||
.frame(maxWidth: .infinity)
|
||||
}
|
||||
.controlSize(.large)
|
||||
.disabled(isExportingLogs)
|
||||
|
||||
if let url = exportedLogURL {
|
||||
Button {
|
||||
NSWorkspace.shared.activateFileViewerSelecting([url])
|
||||
} label: {
|
||||
Label("Show in Finder", systemImage: "folder")
|
||||
}
|
||||
.controlSize(.large)
|
||||
}
|
||||
}
|
||||
|
||||
if exportedLogURL != nil {
|
||||
Text("Logs exported to Downloads folder.")
|
||||
.font(.caption)
|
||||
.foregroundColor(.green)
|
||||
}
|
||||
}
|
||||
.alert("Export Failed", isPresented: $showLogExportError) {
|
||||
Button("OK", role: .cancel) { }
|
||||
} message: {
|
||||
Text(logExportError)
|
||||
}
|
||||
}
|
||||
|
||||
private func exportDiagnosticLogs() {
|
||||
isExportingLogs = true
|
||||
exportedLogURL = nil
|
||||
|
||||
Task {
|
||||
do {
|
||||
let url = try await LogExporter.shared.exportLogs()
|
||||
await MainActor.run {
|
||||
exportedLogURL = url
|
||||
isExportingLogs = false
|
||||
}
|
||||
} catch {
|
||||
await MainActor.run {
|
||||
logExportError = error.localizedDescription
|
||||
showLogExportError = true
|
||||
isExportingLogs = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -397,11 +397,11 @@ struct SettingsView: View {
|
||||
|
||||
Button {
|
||||
ImportExportService.shared.exportSettings(
|
||||
enhancementService: enhancementService,
|
||||
whisperPrompt: whisperState.whisperPrompt,
|
||||
hotkeyManager: hotkeyManager,
|
||||
menuBarManager: menuBarManager,
|
||||
mediaController: MediaController.shared,
|
||||
enhancementService: enhancementService,
|
||||
whisperPrompt: whisperState.whisperPrompt,
|
||||
hotkeyManager: hotkeyManager,
|
||||
menuBarManager: menuBarManager,
|
||||
mediaController: MediaController.shared,
|
||||
playbackController: PlaybackController.shared,
|
||||
soundManager: SoundManager.shared,
|
||||
whisperState: whisperState
|
||||
@ -414,6 +414,14 @@ struct SettingsView: View {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SettingsSection(
|
||||
icon: "ant.circle",
|
||||
title: "Diagnostics",
|
||||
subtitle: "Export logs for troubleshooting"
|
||||
) {
|
||||
DiagnosticsSettingsView()
|
||||
}
|
||||
}
|
||||
.padding(.horizontal, 20)
|
||||
.padding(.vertical, 6)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user