Add gpt-4o-mini, update examples (#253)

This commit is contained in:
Eric Zhu 2024-07-23 18:05:16 -07:00 committed by GitHub
parent 2288aee72b
commit 176932ef9c
12 changed files with 29 additions and 22 deletions

View File

@ -71,7 +71,7 @@ async def main():
runtime = SingleThreadedAgentRuntime()
agent = await runtime.register_and_get(
"simple-agent",
lambda: SimpleAgent(OpenAIChatCompletionClient(model="gpt-3.5-turbo", api_key="YOUR_API_KEY")),
lambda: SimpleAgent(OpenAIChatCompletionClient(model="gpt-4o-mini", api_key="YOUR_API_KEY")),
# Leave out the api_key field if you have the API key in the environment variable OPENAI_API_KEY
)
# Start the runtime processing messages.
@ -257,7 +257,7 @@ async def main() -> None:
agent = await runtime.register_and_get(
"tool-agent",
lambda: ToolAgent(
OpenAIChatCompletionClient(model="gpt-3.5-turbo", api_key="YOUR_API_KEY"),
OpenAIChatCompletionClient(model="gpt-4o-mini", api_key="YOUR_API_KEY"),
tools=[
FunctionTool(get_stock_price, description="Get the stock price."),
],

View File

@ -47,7 +47,7 @@ async def main() -> None:
runtime = SingleThreadedAgentRuntime()
agent = await runtime.register_and_get(
"chat_agent",
lambda: ChatCompletionAgent("Chat agent", get_chat_completion_client_from_envs(model="gpt-3.5-turbo")),
lambda: ChatCompletionAgent("Chat agent", get_chat_completion_client_from_envs(model="gpt-4o-mini")),
)
run_context = runtime.start()

View File

@ -81,7 +81,7 @@ async def main() -> None:
"Jack",
lambda: ChatCompletionAgent(
description="Jack a comedian",
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
system_messages=[
SystemMessage("You are a comedian likes to make jokes. " "When you are done talking, say 'TERMINATE'.")
],
@ -92,7 +92,7 @@ async def main() -> None:
"Cathy",
lambda: ChatCompletionAgent(
description="Cathy a poet",
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
system_messages=[
SystemMessage("You are a poet likes to write poems. " "When you are done talking, say 'TERMINATE'.")
],

View File

@ -255,14 +255,14 @@ async def main() -> None:
"ReviewerAgent",
lambda: ReviewerAgent(
description="Code Reviewer",
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
),
)
await runtime.register(
"CoderAgent",
lambda: CoderAgent(
description="Coder",
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
),
)
run_context = runtime.start()

View File

@ -118,7 +118,7 @@ async def main() -> None:
lambda: GroupChatParticipant(
description="A data scientist",
system_messages=[SystemMessage("You are a data scientist.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
),
)
agent2 = await runtime.register_and_get(
@ -126,7 +126,7 @@ async def main() -> None:
lambda: GroupChatParticipant(
description="An engineer",
system_messages=[SystemMessage("You are an engineer.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
),
)
agent3 = await runtime.register_and_get(
@ -134,7 +134,7 @@ async def main() -> None:
lambda: GroupChatParticipant(
description="An artist",
system_messages=[SystemMessage("You are an artist.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
),
)

View File

@ -117,7 +117,7 @@ async def main() -> None:
lambda: ReferenceAgent(
description="Reference Agent 1",
system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo", temperature=0.1),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini", temperature=0.1),
),
)
await runtime.register(
@ -125,7 +125,7 @@ async def main() -> None:
lambda: ReferenceAgent(
description="Reference Agent 2",
system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo", temperature=0.5),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini", temperature=0.5),
),
)
await runtime.register(
@ -133,7 +133,7 @@ async def main() -> None:
lambda: ReferenceAgent(
description="Reference Agent 3",
system_messages=[SystemMessage("You are a helpful assistant that can answer questions.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo", temperature=1.0),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini", temperature=1.0),
),
)
await runtime.register(
@ -145,7 +145,7 @@ async def main() -> None:
"...synthesize these responses into a single, high-quality response... Responses from models:"
)
],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
num_references=3,
),
)

View File

@ -214,7 +214,7 @@ async def main(question: str) -> None:
await runtime.register(
"MathSolver1",
lambda: MathSolver(
get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
get_chat_completion_client_from_envs(model="gpt-4o-mini"),
neighbor_names=["MathSolver2", "MathSolver4"],
max_round=3,
),
@ -222,7 +222,7 @@ async def main(question: str) -> None:
await runtime.register(
"MathSolver2",
lambda: MathSolver(
get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
get_chat_completion_client_from_envs(model="gpt-4o-mini"),
neighbor_names=["MathSolver1", "MathSolver3"],
max_round=3,
),
@ -230,7 +230,7 @@ async def main(question: str) -> None:
await runtime.register(
"MathSolver3",
lambda: MathSolver(
get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
get_chat_completion_client_from_envs(model="gpt-4o-mini"),
neighbor_names=["MathSolver2", "MathSolver4"],
max_round=3,
),
@ -238,7 +238,7 @@ async def main(question: str) -> None:
await runtime.register(
"MathSolver4",
lambda: MathSolver(
get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
get_chat_completion_client_from_envs(model="gpt-4o-mini"),
neighbor_names=["MathSolver1", "MathSolver3"],
max_round=3,
),

View File

@ -134,7 +134,7 @@ async def main() -> None:
lambda: ToolEnabledAgent(
description="Tool Use Agent",
system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
tools=tools,
),
)

View File

@ -52,7 +52,7 @@ async def main() -> None:
lambda: ToolEnabledAgent(
description="Tool Use Agent",
system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
tools=tools,
),
)

View File

@ -197,7 +197,7 @@ async def main() -> None:
lambda: ToolUseAgent(
description="Tool Use Agent",
system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
tools=tools,
),
)

View File

@ -37,7 +37,7 @@ async def main() -> None:
lambda: ToolEnabledAgent(
description="Tool Use Agent",
system_messages=[SystemMessage("You are a helpful AI Assistant. Use your tools to solve problems.")],
model_client=get_chat_completion_client_from_envs(model="gpt-3.5-turbo"),
model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"),
tools=[
# Define a tool that gets the stock price.
FunctionTool(

View File

@ -6,6 +6,7 @@ from ._model_client import ModelCapabilities
# This is a moving target, so correctness is checked by the model value returned by openai against expected values at runtime``
_MODEL_POINTERS = {
"gpt-4o": "gpt-4o-2024-05-13",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview": "gpt-4-0125-preview",
"gpt-4": "gpt-4-0613",
@ -20,6 +21,11 @@ _MODEL_CAPABILITIES: Dict[str, ModelCapabilities] = {
"function_calling": True,
"json_output": True,
},
"gpt-4o-mini-2024-07-18": {
"vision": True,
"function_calling": True,
"json_output": True,
},
"gpt-4-turbo-2024-04-09": {
"vision": True,
"function_calling": True,
@ -79,6 +85,7 @@ _MODEL_CAPABILITIES: Dict[str, ModelCapabilities] = {
_MODEL_TOKEN_LIMITS: Dict[str, int] = {
"gpt-4o-2024-05-13": 128000,
"gpt-4o-mini-2024-07-18": 128000,
"gpt-4-turbo-2024-04-09": 128000,
"gpt-4-0125-preview": 128000,
"gpt-4-1106-preview": 128000,