vOOice/VoiceInk/Services/CloudTranscription/ElevenLabsTranscriptionService.swift
2025-07-09 10:20:02 +05:45

105 lines
4.5 KiB
Swift

import Foundation
class ElevenLabsTranscriptionService {
func transcribe(audioURL: URL, model: any TranscriptionModel) async throws -> String {
let config = try getAPIConfig(for: model)
let boundary = "Boundary-\(UUID().uuidString)"
var request = URLRequest(url: config.url)
request.httpMethod = "POST"
request.setValue("multipart/form-data; boundary=\(boundary)", forHTTPHeaderField: "Content-Type")
request.setValue("application/json", forHTTPHeaderField: "Accept")
request.setValue(config.apiKey, forHTTPHeaderField: "xi-api-key")
let body = try createElevenLabsRequestBody(audioURL: audioURL, modelName: config.modelName, boundary: boundary)
let (data, response) = try await URLSession.shared.upload(for: request, from: body)
guard let httpResponse = response as? HTTPURLResponse else {
throw CloudTranscriptionError.networkError(URLError(.badServerResponse))
}
if !(200...299).contains(httpResponse.statusCode) {
let errorMessage = String(data: data, encoding: .utf8) ?? "No error message"
throw CloudTranscriptionError.apiRequestFailed(statusCode: httpResponse.statusCode, message: errorMessage)
}
do {
let transcriptionResponse = try JSONDecoder().decode(TranscriptionResponse.self, from: data)
return transcriptionResponse.text
} catch {
throw CloudTranscriptionError.noTranscriptionReturned
}
}
private func getAPIConfig(for model: any TranscriptionModel) throws -> APIConfig {
guard let apiKey = UserDefaults.standard.string(forKey: "ElevenLabsAPIKey"), !apiKey.isEmpty else {
throw CloudTranscriptionError.missingAPIKey
}
let apiURL = URL(string: "https://api.elevenlabs.io/v1/speech-to-text")!
return APIConfig(url: apiURL, apiKey: apiKey, modelName: model.name)
}
private func createElevenLabsRequestBody(audioURL: URL, modelName: String, boundary: String) throws -> Data {
var body = Data()
let crlf = "\r\n"
guard let audioData = try? Data(contentsOf: audioURL) else {
throw CloudTranscriptionError.audioFileNotFound
}
// File
body.append("--\(boundary)\(crlf)".data(using: .utf8)!)
body.append("Content-Disposition: form-data; name=\"file\"; filename=\"\(audioURL.lastPathComponent)\"\(crlf)".data(using: .utf8)!)
body.append("Content-Type: audio/wav\(crlf)\(crlf)".data(using: .utf8)!)
body.append(audioData)
body.append(crlf.data(using: .utf8)!)
// Model ID
body.append("--\(boundary)\(crlf)".data(using: .utf8)!)
body.append("Content-Disposition: form-data; name=\"model_id\"\(crlf)\(crlf)".data(using: .utf8)!)
body.append(modelName.data(using: .utf8)!)
body.append(crlf.data(using: .utf8)!)
// Disable audio event tagging
body.append("--\(boundary)\(crlf)".data(using: .utf8)!)
body.append("Content-Disposition: form-data; name=\"tag_audio_events\"\(crlf)\(crlf)".data(using: .utf8)!)
body.append("false".data(using: .utf8)!)
body.append(crlf.data(using: .utf8)!)
body.append("--\(boundary)\(crlf)".data(using: .utf8)!)
body.append("Content-Disposition: form-data; name=\"temperature\"\(crlf)\(crlf)".data(using: .utf8)!)
body.append("0".data(using: .utf8)!)
body.append(crlf.data(using: .utf8)!)
let selectedLanguage = UserDefaults.standard.string(forKey: "SelectedLanguage") ?? "auto"
if selectedLanguage != "auto", !selectedLanguage.isEmpty {
body.append("--\(boundary)\(crlf)".data(using: .utf8)!)
body.append("Content-Disposition: form-data; name=\"language_code\"\(crlf)\(crlf)".data(using: .utf8)!)
body.append(selectedLanguage.data(using: .utf8)!)
body.append(crlf.data(using: .utf8)!)
}
body.append("--\(boundary)--\(crlf)".data(using: .utf8)!)
return body
}
private struct APIConfig {
let url: URL
let apiKey: String
let modelName: String
}
private struct TranscriptionResponse: Decodable {
let text: String
let language: String?
let duration: Double?
let x_groq: GroqMetadata?
struct GroqMetadata: Decodable {
let id: String?
}
}
}