Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
"gpt-4-32k": "gpt-4-32k-0613",
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
"gpt-oss-120b": "gpt-oss-120b" ##### PALAK's change
}

_MODEL_INFO: Dict[str, ModelInfo] = {
Expand Down Expand Up @@ -121,6 +122,12 @@
"json_output": True,
"family": ModelFamily.GPT_35,
},
"gpt-oss-120b": {
"vision": False,
"function_calling": True,
"json_output": True,
"family": ModelFamily.UNKNOWN ###### PALAK's change
}
}

_MODEL_TOKEN_LIMITS: Dict[str, int] = {
Expand All @@ -141,6 +148,7 @@
"gpt-3.5-turbo-instruct": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k-0613": 16385,
"gpt-oss-120b": 131072, ##### PALAK's change
}


Expand Down
1 change: 1 addition & 0 deletions python/packages/autogen-magentic-one/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ You can install the Magentic-One package and then run the example code to see ho
4. Magentic-One code uses code execution, you need to have [Docker installed](https://docs.docker.com/engine/install/) to run any examples.
5. Magentic-One uses playwright to interact with web pages. You need to install the playwright dependencies. Run the following command to install the playwright dependencies:


```bash
playwright install --with-deps chromium
```
Expand Down
31 changes: 16 additions & 15 deletions python/packages/autogen-magentic-one/examples/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ async def main(logs_dir: str, hil_mode: bool, save_screenshots: bool) -> None:

# Create an appropriate client
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))
assert client.model_info["family"] == "gpt-4o", "This example requires the gpt-4o model"
# assert client.model_info["family"] == "gpt-4o", "This example requires the gpt-4o model" ##### PALAK: temporary change

async with DockerCommandLineCodeExecutor(work_dir=logs_dir) as code_executor:
# Register agents.
Expand All @@ -49,9 +49,9 @@ async def main(logs_dir: str, hil_mode: bool, save_screenshots: bool) -> None:
)
executor = AgentProxy(AgentId("Executor", "default"), runtime)

# Register agents.
await MultimodalWebSurfer.register(runtime, "WebSurfer", MultimodalWebSurfer)
web_surfer = AgentProxy(AgentId("WebSurfer", "default"), runtime)
# # Register agents.
# await MultimodalWebSurfer.register(runtime, "WebSurfer", MultimodalWebSurfer)
# web_surfer = AgentProxy(AgentId("WebSurfer", "default"), runtime)

await FileSurfer.register(runtime, "file_surfer", lambda: FileSurfer(model_client=client))
file_surfer = AgentProxy(AgentId("file_surfer", "default"), runtime)
Expand All @@ -63,7 +63,8 @@ async def main(logs_dir: str, hil_mode: bool, save_screenshots: bool) -> None:
)
user_proxy = AgentProxy(AgentId("UserProxy", "default"), runtime)

agent_list = [web_surfer, coder, executor, file_surfer]
# agent_list = [web_surfer, coder, executor, file_surfer]
agent_list = [coder, executor, file_surfer]
if hil_mode:
agent_list.append(user_proxy)

Expand All @@ -82,16 +83,16 @@ async def main(logs_dir: str, hil_mode: bool, save_screenshots: bool) -> None:

runtime.start()

actual_surfer = await runtime.try_get_underlying_agent_instance(web_surfer.id, type=MultimodalWebSurfer)
await actual_surfer.init(
model_client=client,
downloads_folder=logs_dir,
start_page="https://www.bing.com",
browser_channel="chromium",
headless=True,
debug_dir=logs_dir,
to_save_screenshots=save_screenshots,
)
# actual_surfer = await runtime.try_get_underlying_agent_instance(web_surfer.id, type=MultimodalWebSurfer)
# await actual_surfer.init(
# model_client=client,
# downloads_folder=logs_dir,
# start_page="https://www.bing.com",
# browser_channel="chromium",
# headless=True,
# debug_dir=logs_dir,
# to_save_screenshots=save_screenshots,
# )

await runtime.send_message(RequestReplyMessage(), user_proxy.id)
await runtime.stop_when_idle()
Expand Down
Loading