migrate to gitea

This commit is contained in:
jonasgaudian
2026-02-13 00:15:36 +01:00
commit 269cc9e417
407 changed files with 66841 additions and 0 deletions

View File

@@ -0,0 +1,249 @@
{
"providers": [
{
"key": "together",
"displayName": "Together AI",
"baseUrl": "https://api.together.xyz/v1/",
"endpoint": "chat/completions",
"websiteUrl": "https://www.together.ai/",
"isCustom": false,
"models": [
{
"modelId": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"displayName": "Llama 3.3 70B Turbo",
"provider": "together",
"description": "Fast, accurate, and cost-effective open-source model."
},
{
"modelId": "meta-llama/Llama-4-Maverick-17B-Instruct",
"displayName": "Llama 4 Maverick 17B",
"provider": "together",
"description": "Next-gen efficient architecture; outperforms older 70B models."
},
{
"modelId": "deepseek-ai/DeepSeek-V3",
"displayName": "DeepSeek V3",
"provider": "together",
"description": "Top-tier open-source model specializing in code and logic."
}
]
},
{
"key": "mistral",
"displayName": "Mistral AI",
"baseUrl": "https://api.mistral.ai/",
"endpoint": "v1/chat/completions",
"websiteUrl": "https://mistral.ai",
"isCustom": false,
"models": [
{
"modelId": "ministral-8b-latest",
"displayName": "Ministral 8B",
"provider": "mistral",
"description": "Extremely efficient edge model for low-latency tasks."
},
{
"modelId": "mistral-large-latest",
"displayName": "Mistral Large 3",
"provider": "mistral",
"description": "Flagship model with top-tier reasoning and multilingual capabilities."
}
]
},
{
"key": "openai",
"displayName": "OpenAI",
"baseUrl": "https://api.openai.com/",
"endpoint": "v1/chat/completions",
"websiteUrl": "https://platform.openai.com/",
"isCustom": false,
"models": [
{
"modelId": "gpt-5.1-instant",
"displayName": "GPT-5.1 Instant",
"provider": "openai",
"description": "The standard high-speed efficiency model replacing older 'Nano' tiers."
},
{
"modelId": "gpt-5-nano",
"displayName": "GPT-5 Nano",
"provider": "openai",
"description": "Fast and cheap model sufficient for most tasks."
}
]
},
{
"key": "anthropic",
"displayName": "Anthropic",
"baseUrl": "https://api.anthropic.com/",
"endpoint": "v1/messages",
"websiteUrl": "https://www.anthropic.com/",
"isCustom": false,
"models": [
{
"modelId": "claude-sonnet-5-20260203",
"displayName": "Claude Sonnet 5",
"provider": "anthropic",
"description": "Latest stable workhorse (Feb 2026), balancing speed and top-tier reasoning."
},
{
"modelId": "claude-4.5-haiku",
"displayName": "Claude 4.5 Haiku",
"provider": "anthropic",
"description": "Fastest Claude model for pure speed and simple tasks."
}
]
},
{
"key": "deepseek",
"displayName": "DeepSeek",
"baseUrl": "https://api.deepseek.com/",
"endpoint": "chat/completions",
"websiteUrl": "https://www.deepseek.com/",
"isCustom": false,
"models": [
{
"modelId": "deepseek-reasoner",
"displayName": "DeepSeek R1",
"provider": "deepseek",
"description": "Reasoning-focused model (Chain of Thought) for complex math/code."
},
{
"modelId": "deepseek-chat",
"displayName": "DeepSeek V3",
"provider": "deepseek",
"description": "General purpose chat model, specialized in code and reasoning."
}
]
},
{
"key": "gemini",
"displayName": "Google Gemini",
"baseUrl": "https://generativelanguage.googleapis.com/",
"endpoint": "v1beta/models/gemini-3-flash-preview:generateContent",
"websiteUrl": "https://ai.google/",
"isCustom": false,
"models": [
{
"modelId": "gemini-3-flash-preview",
"displayName": "Gemini 3 Flash",
"provider": "gemini",
"description": "Current default: Massive context, grounded, and extremely fast."
},
{
"modelId": "gemini-3-pro-preview",
"displayName": "Gemini 3 Pro",
"provider": "gemini",
"description": "Top-tier reasoning model for complex agentic workflows."
}
]
},
{
"key": "openrouter",
"displayName": "OpenRouter",
"baseUrl": "https://openrouter.ai/api/",
"endpoint": "v1/chat/completions",
"websiteUrl": "https://openrouter.ai",
"isCustom": false,
"models": []
},
{
"key": "groq",
"displayName": "Groq",
"baseUrl": "https://api.groq.com/openai/",
"endpoint": "v1/chat/completions",
"websiteUrl": "https://groq.com/",
"isCustom": false,
"models": [
{
"modelId": "llama-4-scout-17b",
"displayName": "Llama 4 Scout",
"provider": "groq",
"description": "Powerful Llama 4 model running at extreme speed."
},
{
"modelId": "llama-3.3-70b-versatile",
"displayName": "Llama 3.3 70B",
"provider": "groq",
"description": "Previous gen flagship, highly reliable and fast on Groq chips."
}
]
},
{
"key": "xai",
"displayName": "xAI Grok",
"baseUrl": "https://api.x.ai/",
"endpoint": "v1/chat/completions",
"websiteUrl": "https://x.ai",
"isCustom": false,
"models": [
{
"modelId": "grok-4-1-fast-reasoning",
"displayName": "Grok 4.1 Fast",
"provider": "xai",
"description": "Fast, flexible, and capable of reasoning."
}
]
},
{
"key": "nvidia",
"displayName": "NVIDIA NIM",
"baseUrl": "https://integrate.api.nvidia.com/",
"endpoint": "v1/chat/completions",
"websiteUrl": "https://build.nvidia.com/explore",
"isCustom": false,
"models": [
{
"modelId": "meta/llama-3.3-70b-instruct",
"displayName": "Llama 3.3 70B",
"provider": "nvidia",
"description": "Standard high-performance open model accelerated by NVIDIA."
}
]
},
{
"key": "cerebras",
"displayName": "Cerebras",
"baseUrl": "https://api.cerebras.ai/",
"endpoint": "v1/chat/completions",
"websiteUrl": "https://inference.cerebras.ai/",
"isCustom": false,
"models": [
{
"modelId": "llama-3.3-70b",
"displayName": "Llama 3.3 70B (Instant)",
"provider": "cerebras",
"description": "World's fastest inference (2000+ tokens/sec) on Wafer-Scale Engines."
},
{
"modelId": "llama3.1-8b",
"displayName": "Llama 3.1 8B",
"provider": "cerebras",
"description": "Instant speed for simple tasks."
}
]
},
{
"key": "huggingface",
"displayName": "Hugging Face",
"baseUrl": "https://router.huggingface.co/",
"endpoint": "v1/chat/completions",
"websiteUrl": "https://huggingface.co/settings/tokens",
"isCustom": false,
"models": [
{
"modelId": "meta-llama/Llama-3.3-70B-Instruct",
"displayName": "Llama 3.3 70B",
"provider": "huggingface",
"description": "Hosted via the Hugging Face serverless router (Free tier limits apply)."
},
{
"modelId": "microsoft/Phi-3.5-mini-instruct",
"displayName": "Phi 3.5 Mini",
"provider": "huggingface",
"description": "Highly capable small model from Microsoft."
}
]
}
]
}