mirror of
https://github.com/getzep/graphiti.git
synced 2025-06-27 02:00:02 +00:00
update to 4.1 models (#352)
This commit is contained in:
parent
ed26852531
commit
c8d5c45269
@ -283,7 +283,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model='gpt-4o-mini', temperature=0).bind_tools(tools)"
|
||||
"llm = ChatOpenAI(model='gpt-4.1-mini', temperature=0).bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -28,7 +28,7 @@ from .client import CrossEncoderClient
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_MODEL = 'gpt-4o-mini'
|
||||
DEFAULT_MODEL = 'gpt-4.1-nano'
|
||||
|
||||
|
||||
class BooleanClassifier(BaseModel):
|
||||
|
@ -43,7 +43,7 @@ class LLMConfig:
|
||||
This is required for making authorized requests.
|
||||
|
||||
model (str, optional): The specific LLM model to use for generating responses.
|
||||
Defaults to "gpt-4o-mini", which appears to be a custom model name.
|
||||
Defaults to "gpt-4.1-mini", which appears to be a custom model name.
|
||||
Common values might include "gpt-3.5-turbo" or "gpt-4".
|
||||
|
||||
base_url (str, optional): The base URL of the LLM API service.
|
||||
|
@ -30,7 +30,7 @@ from .errors import RateLimitError, RefusalError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_MODEL = 'gpt-4o-mini'
|
||||
DEFAULT_MODEL = 'gpt-4.1-mini'
|
||||
|
||||
|
||||
class OpenAIClient(LLMClient):
|
||||
|
@ -31,7 +31,7 @@ from .errors import RateLimitError, RefusalError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_MODEL = 'gpt-4o-mini'
|
||||
DEFAULT_MODEL = 'gpt-4.1-mini'
|
||||
|
||||
|
||||
class OpenAIGenericClient(LLMClient):
|
||||
|
@ -9,7 +9,7 @@ NEO4J_PASSWORD=demodemo
|
||||
# OpenAI API Configuration
|
||||
# Required for LLM operations
|
||||
OPENAI_API_KEY=your_openai_api_key_here
|
||||
MODEL_NAME=gpt-4o-mini
|
||||
MODEL_NAME=gpt-4.1-mini
|
||||
|
||||
# Optional: Only needed for non-standard OpenAI endpoints
|
||||
# OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
|
@ -66,7 +66,7 @@ uv run graphiti_mcp_server.py
|
||||
With options:
|
||||
|
||||
```bash
|
||||
uv run graphiti_mcp_server.py --model gpt-4o-mini --transport sse
|
||||
uv run graphiti_mcp_server.py --model gpt-4.1-mini --transport sse
|
||||
```
|
||||
|
||||
Available arguments:
|
||||
@ -96,7 +96,7 @@ Before running the Docker Compose setup, you need to configure the environment v
|
||||
```
|
||||
# Required for LLM operations
|
||||
OPENAI_API_KEY=your_openai_api_key_here
|
||||
MODEL_NAME=gpt-4o-mini
|
||||
MODEL_NAME=gpt-4.1-mini
|
||||
# Optional: OPENAI_BASE_URL only needed for non-standard OpenAI endpoints
|
||||
# OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
```
|
||||
@ -105,7 +105,7 @@ Before running the Docker Compose setup, you need to configure the environment v
|
||||
2. **Using environment variables directly**:
|
||||
- You can also set the environment variables when running the Docker Compose command:
|
||||
```bash
|
||||
OPENAI_API_KEY=your_key MODEL_NAME=gpt-4o-mini docker compose up
|
||||
OPENAI_API_KEY=your_key MODEL_NAME=gpt-4.1-mini docker compose up
|
||||
```
|
||||
|
||||
#### Neo4j Configuration
|
||||
@ -162,7 +162,7 @@ To use the Graphiti MCP server with an MCP-compatible client, configure it to co
|
||||
"NEO4J_USER": "neo4j",
|
||||
"NEO4J_PASSWORD": "demodemo",
|
||||
"OPENAI_API_KEY": "${OPENAI_API_KEY}",
|
||||
"MODEL_NAME": "gpt-4o-mini"
|
||||
"MODEL_NAME": "gpt-4.1-mini"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -200,7 +200,7 @@ Or start the server with uv and connect to it:
|
||||
"NEO4J_USER": "neo4j",
|
||||
"NEO4J_PASSWORD": "demodemo",
|
||||
"OPENAI_API_KEY": "${OPENAI_API_KEY}",
|
||||
"MODEL_NAME": "gpt-4o-mini"
|
||||
"MODEL_NAME": "gpt-4.1-mini"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ from graphiti_core.utils.maintenance.graph_data_operations import clear_data
|
||||
|
||||
load_dotenv()
|
||||
|
||||
DEFAULT_LLM_MODEL = 'gpt-4o-mini'
|
||||
DEFAULT_LLM_MODEL = 'gpt-4.1-mini'
|
||||
|
||||
|
||||
class Requirement(BaseModel):
|
||||
@ -344,14 +344,12 @@ For optimal performance, ensure the database is properly configured and accessib
|
||||
API keys are provided for any language model operations.
|
||||
"""
|
||||
|
||||
|
||||
# MCP server instance
|
||||
mcp = FastMCP(
|
||||
'graphiti',
|
||||
instructions=GRAPHITI_MCP_INSTRUCTIONS,
|
||||
)
|
||||
|
||||
|
||||
# Initialize Graphiti client
|
||||
graphiti_client: Graphiti | None = None
|
||||
|
||||
|
@ -1,21 +1,21 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"graphiti": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": [
|
||||
"run",
|
||||
"/ABSOLUTE/PATH/TO/graphiti_mcp_server.py",
|
||||
"--transport",
|
||||
"stdio"
|
||||
],
|
||||
"env": {
|
||||
"NEO4J_URI": "bolt://localhost:7687",
|
||||
"NEO4J_USER": "neo4j",
|
||||
"NEO4J_PASSWORD": "demodemo",
|
||||
"OPENAI_API_KEY": "${OPENAI_API_KEY}",
|
||||
"MODEL_NAME": "gpt-4o-mini"
|
||||
}
|
||||
}
|
||||
"mcpServers": {
|
||||
"graphiti": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": [
|
||||
"run",
|
||||
"/ABSOLUTE/PATH/TO/graphiti_mcp_server.py",
|
||||
"--transport",
|
||||
"stdio"
|
||||
],
|
||||
"env": {
|
||||
"NEO4J_URI": "bolt://localhost:7687",
|
||||
"NEO4J_USER": "neo4j",
|
||||
"NEO4J_PASSWORD": "demodemo",
|
||||
"OPENAI_API_KEY": "${OPENAI_API_KEY}",
|
||||
"MODEL_NAME": "gpt-4.1-mini"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ async def build_baseline_graph(multi_session: list[int], session_length: int):
|
||||
|
||||
async def eval_graph(multi_session: list[int], session_length: int, llm_client=None) -> float:
|
||||
if llm_client is None:
|
||||
llm_client = OpenAIClient()
|
||||
llm_client = OpenAIClient(config=LLMConfig(model='gpt-4.1-mini'))
|
||||
graphiti = Graphiti(NEO4J_URI, NEO4j_USER, NEO4j_PASSWORD, llm_client=llm_client)
|
||||
with open('baseline_graph_results.json') as file:
|
||||
baseline_results_raw = json.load(file)
|
||||
@ -127,7 +127,6 @@ async def eval_graph(multi_session: list[int], session_length: int, llm_client=N
|
||||
for user_id in add_episode_results:
|
||||
user_count += 1
|
||||
user_raw_score = 0
|
||||
print('add_episode_context: ', add_episode_context)
|
||||
for baseline_result, add_episode_result, episodes in zip(
|
||||
baseline_results[user_id],
|
||||
add_episode_results[user_id],
|
||||
|
Loading…
x
Reference in New Issue
Block a user