Spaces:
Running
Running
update minimax to official api
Browse files
README.md
CHANGED
|
@@ -19,7 +19,7 @@ AnyCoder is an AI-powered code generator that helps you create applications by d
|
|
| 19 |
|
| 20 |
## Features
|
| 21 |
|
| 22 |
-
- **Multi-Model Support**: Choose from Moonshot Kimi-K2, Kimi K2 Turbo (Preview), DeepSeek V3, DeepSeek R1, ERNIE-4.5-VL, MiniMax
|
| 23 |
- Claude-Opus-4.1 (via Poe)
|
| 24 |
- **Flexible Input**: Describe your app in text, upload a UI design image (for multimodal models), provide a reference file (PDF, TXT, MD, CSV, DOCX, or image), or enter a website URL for redesign
|
| 25 |
- **Web Search Integration**: Enable real-time web search (Tavily, with advanced search depth) to enhance code generation with up-to-date information and best practices
|
|
@@ -48,6 +48,7 @@ export DASHSCOPE_API_KEY="your_dashscope_api_key" # Required for Qwen3-30B mode
|
|
| 48 |
export POE_API_KEY="your_poe_api_key" # Required for GPT-5, Grok-4, and Grok-Code-Fast-1 via Poe
|
| 49 |
export GEMINI_API_KEY="your_gemini_api_key" # Required for Gemini models
|
| 50 |
export MOONSHOT_API_KEY="your_moonshot_api_key" # Required for Kimi models
|
|
|
|
| 51 |
```
|
| 52 |
|
| 53 |
## Usage
|
|
@@ -77,7 +78,7 @@ python app.py
|
|
| 77 |
- DeepSeek V3.1 Terminus
|
| 78 |
- DeepSeek V3.2-Exp
|
| 79 |
- DeepSeek R1
|
| 80 |
-
- MiniMax
|
| 81 |
- Qwen3-235B-A22B
|
| 82 |
- Qwen3-4B-Instruct-2507
|
| 83 |
- Qwen3-4B-Thinking-2507
|
|
@@ -125,6 +126,7 @@ python app.py
|
|
| 125 |
- `HF_TOKEN`: Your Hugging Face API token (required)
|
| 126 |
- `GEMINI_API_KEY`: Your Google Gemini API key (required to use Gemini models)
|
| 127 |
- `MOONSHOT_API_KEY`: Your Moonshot AI API key (required to use Kimi models)
|
|
|
|
| 128 |
|
| 129 |
## Project Structure
|
| 130 |
|
|
|
|
| 19 |
|
| 20 |
## Features
|
| 21 |
|
| 22 |
+
- **Multi-Model Support**: Choose from Moonshot Kimi-K2, Kimi K2 Turbo (Preview), DeepSeek V3, DeepSeek R1, ERNIE-4.5-VL, MiniMax M2, Qwen3-235B-A22B, Qwen3-30B-A3B-Instruct-2507, Qwen3-30B-A3B-Thinking-2507, SmolLM3-3B, GLM-4.1V-9B-Thinking, Gemini 2.5 Flash and Gemini 2.5 Pro (OpenAI-compatible)
|
| 23 |
- Claude-Opus-4.1 (via Poe)
|
| 24 |
- **Flexible Input**: Describe your app in text, upload a UI design image (for multimodal models), provide a reference file (PDF, TXT, MD, CSV, DOCX, or image), or enter a website URL for redesign
|
| 25 |
- **Web Search Integration**: Enable real-time web search (Tavily, with advanced search depth) to enhance code generation with up-to-date information and best practices
|
|
|
|
| 48 |
export POE_API_KEY="your_poe_api_key" # Required for GPT-5, Grok-4, and Grok-Code-Fast-1 via Poe
|
| 49 |
export GEMINI_API_KEY="your_gemini_api_key" # Required for Gemini models
|
| 50 |
export MOONSHOT_API_KEY="your_moonshot_api_key" # Required for Kimi models
|
| 51 |
+
export MINIMAX_API_KEY="your_minimax_api_key" # Required for MiniMax M2 model
|
| 52 |
```
|
| 53 |
|
| 54 |
## Usage
|
|
|
|
| 78 |
- DeepSeek V3.1 Terminus
|
| 79 |
- DeepSeek V3.2-Exp
|
| 80 |
- DeepSeek R1
|
| 81 |
+
- MiniMax M2
|
| 82 |
- Qwen3-235B-A22B
|
| 83 |
- Qwen3-4B-Instruct-2507
|
| 84 |
- Qwen3-4B-Thinking-2507
|
|
|
|
| 126 |
- `HF_TOKEN`: Your Hugging Face API token (required)
|
| 127 |
- `GEMINI_API_KEY`: Your Google Gemini API key (required to use Gemini models)
|
| 128 |
- `MOONSHOT_API_KEY`: Your Moonshot AI API key (required to use Kimi models)
|
| 129 |
+
- `MINIMAX_API_KEY`: Your MiniMax API key (required to use MiniMax M2 model)
|
| 130 |
|
| 131 |
## Project Structure
|
| 132 |
|
app.py
CHANGED
|
@@ -2427,13 +2427,13 @@ AVAILABLE_MODELS = [
|
|
| 2427 |
"description": "Qwen3 Max Preview model via DashScope International API"
|
| 2428 |
},
|
| 2429 |
{
|
| 2430 |
-
"name": "MiniMax M2
|
| 2431 |
-
"id": "
|
| 2432 |
-
"description": "MiniMax M2
|
| 2433 |
}
|
| 2434 |
]
|
| 2435 |
# Default model selection
|
| 2436 |
-
DEFAULT_MODEL_NAME = "MiniMax M2
|
| 2437 |
DEFAULT_MODEL = None
|
| 2438 |
for _m in AVAILABLE_MODELS:
|
| 2439 |
if _m.get("name") == DEFAULT_MODEL_NAME:
|
|
@@ -2522,11 +2522,11 @@ def get_inference_client(model_id, provider="auto"):
|
|
| 2522 |
api_key=os.getenv("OPENROUTER_API_KEY"),
|
| 2523 |
base_url="https://openrouter.ai/api/v1",
|
| 2524 |
)
|
| 2525 |
-
elif model_id == "
|
| 2526 |
-
# Use
|
| 2527 |
return OpenAI(
|
| 2528 |
-
api_key=os.getenv("
|
| 2529 |
-
base_url="https://
|
| 2530 |
)
|
| 2531 |
elif model_id == "step-3":
|
| 2532 |
# Use StepFun API client for Step-3 model
|
|
|
|
| 2427 |
"description": "Qwen3 Max Preview model via DashScope International API"
|
| 2428 |
},
|
| 2429 |
{
|
| 2430 |
+
"name": "MiniMax M2",
|
| 2431 |
+
"id": "MiniMax-M2",
|
| 2432 |
+
"description": "MiniMax M2 model via MiniMax API for code generation and general tasks"
|
| 2433 |
}
|
| 2434 |
]
|
| 2435 |
# Default model selection
|
| 2436 |
+
DEFAULT_MODEL_NAME = "MiniMax M2"
|
| 2437 |
DEFAULT_MODEL = None
|
| 2438 |
for _m in AVAILABLE_MODELS:
|
| 2439 |
if _m.get("name") == DEFAULT_MODEL_NAME:
|
|
|
|
| 2522 |
api_key=os.getenv("OPENROUTER_API_KEY"),
|
| 2523 |
base_url="https://openrouter.ai/api/v1",
|
| 2524 |
)
|
| 2525 |
+
elif model_id == "MiniMax-M2":
|
| 2526 |
+
# Use MiniMax API client for MiniMax M2 model
|
| 2527 |
return OpenAI(
|
| 2528 |
+
api_key=os.getenv("MINIMAX_API_KEY"),
|
| 2529 |
+
base_url="https://api.minimax.io/v1",
|
| 2530 |
)
|
| 2531 |
elif model_id == "step-3":
|
| 2532 |
# Use StepFun API client for Step-3 model
|