Skip to content
This repository was archived by the owner on Apr 16, 2026. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,22 @@ AIHawk's core architecture remains **open source**, allowing developers to inspe



---

## Supported LLM Providers

AIHawk supports multiple LLM providers. Set `LLM_MODEL_TYPE` in `config.py` and provide the corresponding API key in `data_folder/secrets.yaml`:

| Provider | `LLM_MODEL_TYPE` | Example Model |
|----------|-------------------|---------------|
| OpenAI | `openai` | `gpt-4o-mini` |
| Anthropic Claude | `claude` | `claude-sonnet-4-20250514` |
| Google Gemini | `gemini` | `gemini-pro` |
| Ollama | `ollama` | `llama3` |
| HuggingFace | `huggingface` | `meta-llama/Meta-Llama-3-8B-Instruct` |
| Perplexity | `perplexity` | `llama-3.1-sonar-small-128k-online` |
| [MiniMax](https://www.minimax.io) | `minimax` | `MiniMax-M2.7` |

---


Expand Down
1 change: 1 addition & 0 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
JOB_MAX_APPLICATIONS = 5
JOB_MIN_APPLICATIONS = 1

# Supported LLM providers: openai, claude, ollama, gemini, huggingface, perplexity, minimax
LLM_MODEL_TYPE = 'openai'
LLM_MODEL = 'gpt-4o-mini'
# Only required for OLLAMA models
Expand Down
24 changes: 22 additions & 2 deletions src/libs/llm_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
FINISH_REASON,
GEMINI,
HUGGINGFACE,
MINIMAX,
ID,
INPUT_TOKENS,
INTERESTS,
Expand Down Expand Up @@ -181,6 +182,23 @@ def invoke(self, prompt: str) -> BaseMessage:
return response


class MiniMaxModel(AIModel):
def __init__(self, api_key: str, llm_model: str):
from langchain_openai import ChatOpenAI

self.model = ChatOpenAI(
model_name=llm_model,
openai_api_key=api_key,
openai_api_base="https://api.minimax.io/v1",
temperature=0.4,
)

def invoke(self, prompt: str) -> BaseMessage:
logger.debug("Invoking MiniMax API")
response = self.model.invoke(prompt)
return response


class AIAdapter:
def __init__(self, config: dict, api_key: str):
self.model = self._create_model(config, api_key)
Expand All @@ -205,6 +223,8 @@ def _create_model(self, config: dict, api_key: str) -> AIModel:
return HuggingFaceModel(api_key, llm_model)
elif llm_model_type == PERPLEXITY:
return PerplexityModel(api_key, llm_model)
elif llm_model_type == MINIMAX:
return MiniMaxModel(api_key, llm_model)
else:
raise ValueError(f"Unsupported model type: {llm_model_type}")

Expand All @@ -213,7 +233,7 @@ def invoke(self, prompt: str) -> str:


class LLMLogger:
def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel, GeminiModel]):
def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel, GeminiModel, MiniMaxModel]):
self.llm = llm
logger.debug(f"LLMLogger successfully initialized with LLM: {llm}")

Expand Down Expand Up @@ -325,7 +345,7 @@ def log_request(prompts, parsed_reply: Dict[str, Dict]):


class LoggerChatModel:
def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel, GeminiModel]):
def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel, GeminiModel, MiniMaxModel]):
self.llm = llm
logger.debug(f"LoggerChatModel successfully initialized with LLM: {llm}")

Expand Down
1 change: 1 addition & 0 deletions src/utils/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,3 +75,4 @@
GEMINI = "gemini"
HUGGINGFACE = "huggingface"
PERPLEXITY = "perplexity"
MINIMAX = "minimax"