update providers_config.json models and refactor IntroFlow.kt UI

This commit is contained in:
jonasgaudian
2026-02-15 20:53:21 +01:00
parent f4fcffe90a
commit 7d18f8eb04
4 changed files with 103 additions and 106 deletions

View File

@@ -58,17 +58,17 @@
"websiteUrl": "https://platform.openai.com/",
"isCustom": false,
"models": [
{
"modelId": "gpt-5.2",
"displayName": "GPT-5.2",
"provider": "openai",
"description": "Balanced performance with enhanced reasoning and creativity."
},
{
"modelId": "gpt-5.1-instant",
"displayName": "GPT-5.1 Instant",
"provider": "openai",
"description": "The standard high-speed efficiency model replacing older 'Nano' tiers."
},
{
"modelId": "gpt-5-nano",
"displayName": "GPT-5 Nano",
"provider": "openai",
"description": "Fast and cheap model sufficient for most tasks."
}
]
},
@@ -120,15 +120,15 @@
"key": "gemini",
"displayName": "Google Gemini",
"baseUrl": "https://generativelanguage.googleapis.com/",
"endpoint": "v1beta/models/gemini-3-flash-preview:generateContent",
"endpoint": "v1beta/models/gemini-2.5-pro:generateContent",
"websiteUrl": "https://ai.google/",
"isCustom": false,
"models": [
{
"modelId": "gemini-3-flash-preview",
"displayName": "Gemini 3 Flash",
"modelId": "gemini-2.5-pro",
"displayName": "Gemini 2.5 Pro",
"provider": "gemini",
"description": "Current default: Massive context, grounded, and extremely fast."
"description": "Stable release: State-of-the-art reasoning with 1M context."
},
{
"modelId": "gemini-3-pro-preview",
@@ -155,12 +155,6 @@
"websiteUrl": "https://groq.com/",
"isCustom": false,
"models": [
{
"modelId": "llama-4-scout-17b",
"displayName": "Llama 4 Scout",
"provider": "groq",
"description": "Powerful Llama 4 model running at extreme speed."
},
{
"modelId": "meta-llama/llama-4-maverick",
"displayName": "Llama 4 Maverick",
@@ -216,10 +210,10 @@
"description": "World's fastest inference (2000+ tokens/sec) on Wafer-Scale Engines."
},
{
"modelId": "llama3.1-8b",
"displayName": "Llama 3.1 8B",
"modelId": "llama-4-scout",
"displayName": "Llama 4 Scout",
"provider": "cerebras",
"description": "Instant speed for simple tasks."
"description": "High-quality 17B active param model running at 2,600 tokens/sec."
}
]
},
@@ -238,10 +232,10 @@
"description": "Hosted via the Hugging Face serverless router (Free tier limits apply)."
},
{
"modelId": "microsoft/Phi-3.5-mini-instruct",
"displayName": "Phi 3.5 Mini",
"modelId": "Qwen/Qwen2.5-72B-Instruct",
"displayName": "Qwen 2.5 72B",
"provider": "huggingface",
"description": "Highly capable small model from Microsoft."
"description": "High-quality open model with excellent reasoning and multilingual capabilities."
}
]
}