diff --git a/VoiceInk/Views/APIKeyManagementView.swift b/VoiceInk/Views/APIKeyManagementView.swift index fa078b8..1169451 100644 --- a/VoiceInk/Views/APIKeyManagementView.swift +++ b/VoiceInk/Views/APIKeyManagementView.swift @@ -259,97 +259,6 @@ struct APIKeyManagementView: View { .background(Color.secondary.opacity(0.05)) .cornerRadius(8) } - - // Ollama Information - DisclosureGroup { - VStack(alignment: .leading, spacing: 12) { - // Important Warning about Model Size - HStack(alignment: .top, spacing: 8) { - Image(systemName: "exclamationmark.triangle.fill") - .frame(width: 20) - .foregroundColor(.orange) - VStack(alignment: .leading, spacing: 2) { - Text("Important: Model Selection") - .font(.subheadline) - .bold() - .foregroundColor(.orange) - Text("Smaller models (< 7B parameters) significantly impact transcription enhancement quality. For optimal results, use models with 14B+ parameters. Also reasoning models don't work with transcript enhancement. So avoid them.") - .font(.caption) - .foregroundColor(.secondary) - } - } - .padding(8) - .background(Color.orange.opacity(0.1)) - .cornerRadius(6) - - // Local Processing - HStack(alignment: .top) { - Image(systemName: "cpu") - .frame(width: 20) - .foregroundColor(.secondary) - VStack(alignment: .leading, spacing: 2) { - Text("Local Processing") - .font(.subheadline) - .bold() - Text("Ollama runs entirely on your system, processing all text locally without sending data to external servers.") - .font(.caption) - .foregroundColor(.secondary) - } - } - - // System Requirements - HStack(alignment: .top) { - Image(systemName: "memorychip") - .frame(width: 20) - .foregroundColor(.secondary) - VStack(alignment: .leading, spacing: 2) { - Text("System Requirements") - .font(.subheadline) - .bold() - Text("Local processing requires significant system resources. Larger, more capable models need more RAM (32GB+ recommended for optimal performance).") - .font(.caption) - .foregroundColor(.secondary) - } - } - - // Use Cases - HStack(alignment: .top) { - Image(systemName: "checkmark.shield") - .frame(width: 20) - .foregroundColor(.secondary) - VStack(alignment: .leading, spacing: 2) { - Text("Best For") - .font(.subheadline) - .bold() - Text("• Privacy-focused users who need data to stay local\n• Systems with powerful hardware\n• Users who can prioritize quality over processing speed") - .font(.caption) - .foregroundColor(.secondary) - } - } - - // Recommendation Note - HStack(alignment: .top) { - Image(systemName: "lightbulb") - .frame(width: 20) - .foregroundColor(.secondary) - VStack(alignment: .leading, spacing: 2) { - Text("Recommendation") - .font(.subheadline) - .bold() - Text("For optimal transcription enhancement, either use cloud providers or ensure you're using a larger local model (14B+ parameters). Smaller models may produce poor or inconsistent results.") - .font(.caption) - .foregroundColor(.secondary) - } - } - } - } label: { - Label("Important Information About Local AI", systemImage: "info.circle.fill") - .font(.subheadline) - .foregroundColor(.secondary) - } - .padding(8) - .background(Color.secondary.opacity(0.05)) - .cornerRadius(8) } .padding(16) .background(Color.secondary.opacity(0.03))