Spaces:
Sleeping
Sleeping
| """ | |
| Jade Code IDE - LLM Providers | |
| Suporte a múltiplos providers: Groq, Cerebras, OpenRouter | |
| """ | |
| from abc import ABC, abstractmethod | |
| from typing import Optional, List, Dict, Any | |
| import httpx | |
| class LLMProvider(ABC): | |
| """Classe base para providers de LLM.""" | |
| def __init__(self, api_key: str): | |
| self.api_key = api_key | |
| def chat(self, messages: List[Dict], model: str, tools: Optional[List] = None) -> Dict: | |
| """Envia mensagem pro LLM e retorna resposta.""" | |
| pass | |
| def list_models(self) -> List[Dict]: | |
| """Lista modelos disponíveis.""" | |
| pass | |
| class GroqProvider(LLMProvider): | |
| """Provider para Groq API.""" | |
| BASE_URL = "https://api.groq.com/openai/v1" | |
| MODELS = [ | |
| {"id": "moonshotai/kimi-k2-instruct-0905", "name": "Kimi K2 Instruct", "context": 128000}, | |
| {"id": "meta-llama/llama-4-maverick-17b-128e-instruct", "name": "Llama 4 Maverick 17B", "context": 128000}, | |
| {"id": "llama-3.3-70b-versatile", "name": "Llama 3.3 70B", "context": 128000}, | |
| ] | |
| def chat(self, messages: List[Dict], model: str, tools: Optional[List] = None) -> Dict: | |
| headers = { | |
| "Authorization": f"Bearer {self.api_key}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": model, | |
| "messages": messages, | |
| "temperature": 0.3, | |
| "max_tokens": 4096 | |
| } | |
| if tools: | |
| payload["tools"] = tools | |
| payload["tool_choice"] = "auto" | |
| with httpx.Client(timeout=60.0) as client: | |
| response = client.post( | |
| f"{self.BASE_URL}/chat/completions", | |
| headers=headers, | |
| json=payload | |
| ) | |
| response.raise_for_status() | |
| return response.json() | |
| def list_models(self) -> List[Dict]: | |
| return self.MODELS | |
| class CerebrasProvider(LLMProvider): | |
| """Provider para Cerebras API (extremamente rápido).""" | |
| BASE_URL = "https://api.cerebras.ai/v1" | |
| MODELS = [ | |
| {"id": "gpt-oss-120b", "name": "GPT OSS 120B", "context": 128000}, | |
| {"id": "qwen-3-235b-a22b-instruct-2507", "name": "Qwen 3 235B", "context": 128000}, | |
| {"id": "zai-glm-4.6", "name": "GLM 4.6", "context": 128000}, | |
| {"id": "llama3.1-70b", "name": "Llama 3.1 70B", "context": 128000}, | |
| ] | |
| def chat(self, messages: List[Dict], model: str, tools: Optional[List] = None) -> Dict: | |
| headers = { | |
| "Authorization": f"Bearer {self.api_key}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": model, | |
| "messages": messages, | |
| "temperature": 0.3, | |
| "max_tokens": 4096 | |
| } | |
| # Cerebras suporta tools também | |
| if tools: | |
| payload["tools"] = tools | |
| payload["tool_choice"] = "auto" | |
| with httpx.Client(timeout=60.0) as client: | |
| response = client.post( | |
| f"{self.BASE_URL}/chat/completions", | |
| headers=headers, | |
| json=payload | |
| ) | |
| response.raise_for_status() | |
| return response.json() | |
| def list_models(self) -> List[Dict]: | |
| return self.MODELS | |
| class OpenRouterProvider(LLMProvider): | |
| """Provider para OpenRouter (acesso a múltiplos modelos).""" | |
| BASE_URL = "https://openrouter.ai/api/v1" | |
| MODELS = [ | |
| {"id": "xiaomi/mimo-v2-flash:free", "name": "Xiaomi Mimo V2 Flash", "context": 128000}, | |
| {"id": "nex-agi/deepseek-v3.1-nex-n1:free", "name": "DeepSeek V3.1 Nex", "context": 128000}, | |
| {"id": "qwen/qwen3-coder:free", "name": "Qwen 3 Coder", "context": 128000}, | |
| {"id": "anthropic/claude-3.5-sonnet", "name": "Claude 3.5 Sonnet", "context": 200000}, | |
| {"id": "google/gemini-2.0-flash-exp:free", "name": "Gemini 2.0 Flash (Free)", "context": 1000000}, | |
| ] | |
| def chat(self, messages: List[Dict], model: str, tools: Optional[List] = None) -> Dict: | |
| headers = { | |
| "Authorization": f"Bearer {self.api_key}", | |
| "Content-Type": "application/json", | |
| "HTTP-Referer": "https://jade-code-ide.github.io", | |
| "X-Title": "Jade Code IDE" | |
| } | |
| payload = { | |
| "model": model, | |
| "messages": messages, | |
| "temperature": 0.3, | |
| "max_tokens": 4096 | |
| } | |
| if tools: | |
| payload["tools"] = tools | |
| payload["tool_choice"] = "auto" | |
| with httpx.Client(timeout=120.0) as client: | |
| response = client.post( | |
| f"{self.BASE_URL}/chat/completions", | |
| headers=headers, | |
| json=payload | |
| ) | |
| response.raise_for_status() | |
| return response.json() | |
| def list_models(self) -> List[Dict]: | |
| return self.MODELS | |
| # === Factory === | |
| def get_provider(provider_name: str, api_key: str) -> LLMProvider: | |
| """Retorna o provider correto baseado no nome.""" | |
| providers = { | |
| "groq": GroqProvider, | |
| "cerebras": CerebrasProvider, | |
| "openrouter": OpenRouterProvider | |
| } | |
| provider_class = providers.get(provider_name.lower()) | |
| if not provider_class: | |
| raise ValueError(f"Provider '{provider_name}' não suportado. Use: {list(providers.keys())}") | |
| return provider_class(api_key) | |
| def list_all_providers() -> List[Dict]: | |
| """Lista todos os providers disponíveis.""" | |
| return [ | |
| {"id": "groq", "name": "Groq", "description": "Rápido e gratuito"}, | |
| {"id": "cerebras", "name": "Cerebras", "description": "Ultra rápido"}, | |
| {"id": "openrouter", "name": "OpenRouter", "description": "Acesso a múltiplos modelos"}, | |
| ] | |