diff --git a/README.md b/README.md index e5a348d8f..f3081be76 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,22 @@ AIHawk's core architecture remains **open source**, allowing developers to inspe +--- + +## Supported LLM Providers + +AIHawk supports multiple LLM providers. Set `LLM_MODEL_TYPE` in `config.py` and provide the corresponding API key in `data_folder/secrets.yaml`: + +| Provider | `LLM_MODEL_TYPE` | Example Model | +|----------|-------------------|---------------| +| OpenAI | `openai` | `gpt-4o-mini` | +| Anthropic Claude | `claude` | `claude-sonnet-4-20250514` | +| Google Gemini | `gemini` | `gemini-pro` | +| Ollama | `ollama` | `llama3` | +| HuggingFace | `huggingface` | `meta-llama/Meta-Llama-3-8B-Instruct` | +| Perplexity | `perplexity` | `llama-3.1-sonar-small-128k-online` | +| [MiniMax](https://www.minimax.io) | `minimax` | `MiniMax-M2.7` | + --- diff --git a/config.py b/config.py index 78d53e0cd..f35a065e9 100644 --- a/config.py +++ b/config.py @@ -16,6 +16,7 @@ JOB_MAX_APPLICATIONS = 5 JOB_MIN_APPLICATIONS = 1 +# Supported LLM providers: openai, claude, ollama, gemini, huggingface, perplexity, minimax LLM_MODEL_TYPE = 'openai' LLM_MODEL = 'gpt-4o-mini' # Only required for OLLAMA models diff --git a/src/libs/llm_manager.py b/src/libs/llm_manager.py index c7db55f2e..f173a7047 100644 --- a/src/libs/llm_manager.py +++ b/src/libs/llm_manager.py @@ -31,6 +31,7 @@ FINISH_REASON, GEMINI, HUGGINGFACE, + MINIMAX, ID, INPUT_TOKENS, INTERESTS, @@ -181,6 +182,23 @@ def invoke(self, prompt: str) -> BaseMessage: return response +class MiniMaxModel(AIModel): + def __init__(self, api_key: str, llm_model: str): + from langchain_openai import ChatOpenAI + + self.model = ChatOpenAI( + model_name=llm_model, + openai_api_key=api_key, + openai_api_base="https://api.minimax.io/v1", + temperature=0.4, + ) + + def invoke(self, prompt: str) -> BaseMessage: + logger.debug("Invoking MiniMax API") + response = self.model.invoke(prompt) + return response + + class AIAdapter: def __init__(self, config: dict, api_key: str): self.model = self._create_model(config, api_key) @@ -205,6 +223,8 @@ def _create_model(self, config: dict, api_key: str) -> AIModel: return HuggingFaceModel(api_key, llm_model) elif llm_model_type == PERPLEXITY: return PerplexityModel(api_key, llm_model) + elif llm_model_type == MINIMAX: + return MiniMaxModel(api_key, llm_model) else: raise ValueError(f"Unsupported model type: {llm_model_type}") @@ -213,7 +233,7 @@ def invoke(self, prompt: str) -> str: class LLMLogger: - def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel, GeminiModel]): + def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel, GeminiModel, MiniMaxModel]): self.llm = llm logger.debug(f"LLMLogger successfully initialized with LLM: {llm}") @@ -325,7 +345,7 @@ def log_request(prompts, parsed_reply: Dict[str, Dict]): class LoggerChatModel: - def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel, GeminiModel]): + def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel, GeminiModel, MiniMaxModel]): self.llm = llm logger.debug(f"LoggerChatModel successfully initialized with LLM: {llm}") diff --git a/src/utils/constants.py b/src/utils/constants.py index 54f34214e..28362f1de 100644 --- a/src/utils/constants.py +++ b/src/utils/constants.py @@ -75,3 +75,4 @@ GEMINI = "gemini" HUGGINGFACE = "huggingface" PERPLEXITY = "perplexity" +MINIMAX = "minimax"