-
Notifications
You must be signed in to change notification settings - Fork 359
Expand file tree
/
Copy path__main__.py
More file actions
65 lines (60 loc) · 1.77 KB
/
__main__.py
File metadata and controls
65 lines (60 loc) · 1.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from dotenv import load_dotenv
import time # Add this import
load_dotenv()
from integuru.main import call_agent
import asyncio
import click
if __name__ == "__main__":
@click.command()
@click.option(
"--model", default="gpt-4o", help="The LLM model to use (default is gpt-4o)"
)
@click.option("--prompt", required=True, help="The prompt for the model")
@click.option(
"--har-path",
default="./network_requests.har",
help="The HAR file path (default is ./network_requests.har)",
)
@click.option(
"--cookie-path",
default="./cookies.json",
help="The cookie file path (default is ./cookies.json)",
)
@click.option(
"--max_steps", default=20, type=int, help="The max_steps (default is 20)"
)
@click.option(
"--input_variables",
multiple=True,
type=(str, str),
help="Input variables in the format key value",
)
@click.option(
"--generate-code",
is_flag=True,
default=False,
help="Whether to generate the full integration code",
)
@click.option(
"--use-local-llm",
is_flag=True,
default=False,
help="Whether to use a local LLM instead of sending data to OpenAI",
)
def cli(
model, prompt, har_path, cookie_path, max_steps, input_variables, generate_code, use_local_llm
):
input_vars = dict(input_variables)
asyncio.run(
call_agent(
model,
prompt,
har_path,
cookie_path,
input_variables=input_vars,
max_steps=max_steps,
to_generate_code=generate_code,
use_local_llm=use_local_llm,
)
)
cli()