|
|
""" |
|
|
Centralized Gemini API configuration. |
|
|
Allows users to configure model settings from .env file. |
|
|
""" |
|
|
|
|
|
import os |
|
|
from typing import Optional |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
class GeminiConfig: |
|
|
"""Centralized configuration for Gemini API.""" |
|
|
|
|
|
|
|
|
DEFAULT_MODEL = "gemini-2.5-flash" |
|
|
|
|
|
|
|
|
MODEL_NAME: str = os.getenv("GEMINI_MODEL", DEFAULT_MODEL) |
|
|
API_KEY: str = os.getenv("GEMINI_API_KEY", "") |
|
|
|
|
|
|
|
|
TEMPERATURE_PRECISE = 0.0 |
|
|
TEMPERATURE_LOW = 0.1 |
|
|
TEMPERATURE_MEDIUM = 0.2 |
|
|
TEMPERATURE_HIGH = 0.7 |
|
|
|
|
|
|
|
|
MAX_OUTPUT_TOKENS_SMALL = 8192 |
|
|
MAX_OUTPUT_TOKENS_MEDIUM = 16384 |
|
|
MAX_OUTPUT_TOKENS_LARGE = 32768 |
|
|
|
|
|
|
|
|
MAX_RETRIES = 3 |
|
|
RETRY_DELAY = 1.0 |
|
|
|
|
|
@classmethod |
|
|
def validate(cls) -> bool: |
|
|
"""Validate that required configuration is present.""" |
|
|
if not cls.API_KEY: |
|
|
raise ValueError( |
|
|
"GEMINI_API_KEY not found in environment variables. " |
|
|
"Please set it in your .env file." |
|
|
) |
|
|
return True |
|
|
|
|
|
@classmethod |
|
|
def get_model_name(cls) -> str: |
|
|
"""Get the configured model name.""" |
|
|
return cls.MODEL_NAME |
|
|
|
|
|
@classmethod |
|
|
def get_api_key(cls) -> str: |
|
|
"""Get the API key.""" |
|
|
cls.validate() |
|
|
return cls.API_KEY |
|
|
|
|
|
@classmethod |
|
|
def get_base_config(cls, temperature: float = TEMPERATURE_LOW, |
|
|
max_tokens: int = MAX_OUTPUT_TOKENS_MEDIUM) -> dict: |
|
|
""" |
|
|
Get base configuration for Gemini API calls. |
|
|
|
|
|
Args: |
|
|
temperature: Temperature setting (0.0-1.0) |
|
|
max_tokens: Maximum output tokens |
|
|
|
|
|
Returns: |
|
|
Configuration dictionary |
|
|
""" |
|
|
return { |
|
|
"temperature": temperature, |
|
|
"max_output_tokens": max_tokens, |
|
|
"top_p": 0.95, |
|
|
} |
|
|
|
|
|
@classmethod |
|
|
def get_json_config(cls, schema: dict, |
|
|
temperature: float = TEMPERATURE_PRECISE, |
|
|
max_tokens: int = MAX_OUTPUT_TOKENS_MEDIUM) -> dict: |
|
|
""" |
|
|
Get configuration for JSON schema-enforced responses. |
|
|
|
|
|
Args: |
|
|
schema: JSON schema dictionary |
|
|
temperature: Temperature setting (default: 0.0 for precision) |
|
|
max_tokens: Maximum output tokens |
|
|
|
|
|
Returns: |
|
|
Configuration dictionary with schema enforcement |
|
|
""" |
|
|
config = cls.get_base_config(temperature, max_tokens) |
|
|
config.update({ |
|
|
"response_mime_type": "application/json", |
|
|
"response_schema": schema |
|
|
}) |
|
|
return config |
|
|
|