update to 4.1 models (#352)

This commit is contained in:
Preston Rasmussen 2025-04-14 21:02:36 -04:00 committed by GitHub
parent ed26852531
commit c8d5c45269
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 31 additions and 34 deletions

View File

@ -283,7 +283,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"llm = ChatOpenAI(model='gpt-4o-mini', temperature=0).bind_tools(tools)" "llm = ChatOpenAI(model='gpt-4.1-mini', temperature=0).bind_tools(tools)"
] ]
}, },
{ {

View File

@ -28,7 +28,7 @@ from .client import CrossEncoderClient
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gpt-4o-mini' DEFAULT_MODEL = 'gpt-4.1-nano'
class BooleanClassifier(BaseModel): class BooleanClassifier(BaseModel):

View File

@ -43,7 +43,7 @@ class LLMConfig:
This is required for making authorized requests. This is required for making authorized requests.
model (str, optional): The specific LLM model to use for generating responses. model (str, optional): The specific LLM model to use for generating responses.
Defaults to "gpt-4o-mini", which appears to be a custom model name. Defaults to "gpt-4.1-mini", which appears to be a custom model name.
Common values might include "gpt-3.5-turbo" or "gpt-4". Common values might include "gpt-3.5-turbo" or "gpt-4".
base_url (str, optional): The base URL of the LLM API service. base_url (str, optional): The base URL of the LLM API service.

View File

@ -30,7 +30,7 @@ from .errors import RateLimitError, RefusalError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gpt-4o-mini' DEFAULT_MODEL = 'gpt-4.1-mini'
class OpenAIClient(LLMClient): class OpenAIClient(LLMClient):

View File

@ -31,7 +31,7 @@ from .errors import RateLimitError, RefusalError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gpt-4o-mini' DEFAULT_MODEL = 'gpt-4.1-mini'
class OpenAIGenericClient(LLMClient): class OpenAIGenericClient(LLMClient):

View File

@ -9,7 +9,7 @@ NEO4J_PASSWORD=demodemo
# OpenAI API Configuration # OpenAI API Configuration
# Required for LLM operations # Required for LLM operations
OPENAI_API_KEY=your_openai_api_key_here OPENAI_API_KEY=your_openai_api_key_here
MODEL_NAME=gpt-4o-mini MODEL_NAME=gpt-4.1-mini
# Optional: Only needed for non-standard OpenAI endpoints # Optional: Only needed for non-standard OpenAI endpoints
# OPENAI_BASE_URL=https://api.openai.com/v1 # OPENAI_BASE_URL=https://api.openai.com/v1

View File

@ -66,7 +66,7 @@ uv run graphiti_mcp_server.py
With options: With options:
```bash ```bash
uv run graphiti_mcp_server.py --model gpt-4o-mini --transport sse uv run graphiti_mcp_server.py --model gpt-4.1-mini --transport sse
``` ```
Available arguments: Available arguments:
@ -96,7 +96,7 @@ Before running the Docker Compose setup, you need to configure the environment v
``` ```
# Required for LLM operations # Required for LLM operations
OPENAI_API_KEY=your_openai_api_key_here OPENAI_API_KEY=your_openai_api_key_here
MODEL_NAME=gpt-4o-mini MODEL_NAME=gpt-4.1-mini
# Optional: OPENAI_BASE_URL only needed for non-standard OpenAI endpoints # Optional: OPENAI_BASE_URL only needed for non-standard OpenAI endpoints
# OPENAI_BASE_URL=https://api.openai.com/v1 # OPENAI_BASE_URL=https://api.openai.com/v1
``` ```
@ -105,7 +105,7 @@ Before running the Docker Compose setup, you need to configure the environment v
2. **Using environment variables directly**: 2. **Using environment variables directly**:
- You can also set the environment variables when running the Docker Compose command: - You can also set the environment variables when running the Docker Compose command:
```bash ```bash
OPENAI_API_KEY=your_key MODEL_NAME=gpt-4o-mini docker compose up OPENAI_API_KEY=your_key MODEL_NAME=gpt-4.1-mini docker compose up
``` ```
#### Neo4j Configuration #### Neo4j Configuration
@ -162,7 +162,7 @@ To use the Graphiti MCP server with an MCP-compatible client, configure it to co
"NEO4J_USER": "neo4j", "NEO4J_USER": "neo4j",
"NEO4J_PASSWORD": "demodemo", "NEO4J_PASSWORD": "demodemo",
"OPENAI_API_KEY": "${OPENAI_API_KEY}", "OPENAI_API_KEY": "${OPENAI_API_KEY}",
"MODEL_NAME": "gpt-4o-mini" "MODEL_NAME": "gpt-4.1-mini"
} }
} }
} }
@ -200,7 +200,7 @@ Or start the server with uv and connect to it:
"NEO4J_USER": "neo4j", "NEO4J_USER": "neo4j",
"NEO4J_PASSWORD": "demodemo", "NEO4J_PASSWORD": "demodemo",
"OPENAI_API_KEY": "${OPENAI_API_KEY}", "OPENAI_API_KEY": "${OPENAI_API_KEY}",
"MODEL_NAME": "gpt-4o-mini" "MODEL_NAME": "gpt-4.1-mini"
} }
} }
} }

View File

@ -31,7 +31,7 @@ from graphiti_core.utils.maintenance.graph_data_operations import clear_data
load_dotenv() load_dotenv()
DEFAULT_LLM_MODEL = 'gpt-4o-mini' DEFAULT_LLM_MODEL = 'gpt-4.1-mini'
class Requirement(BaseModel): class Requirement(BaseModel):
@ -344,14 +344,12 @@ For optimal performance, ensure the database is properly configured and accessib
API keys are provided for any language model operations. API keys are provided for any language model operations.
""" """
# MCP server instance # MCP server instance
mcp = FastMCP( mcp = FastMCP(
'graphiti', 'graphiti',
instructions=GRAPHITI_MCP_INSTRUCTIONS, instructions=GRAPHITI_MCP_INSTRUCTIONS,
) )
# Initialize Graphiti client # Initialize Graphiti client
graphiti_client: Graphiti | None = None graphiti_client: Graphiti | None = None

View File

@ -1,21 +1,21 @@
{ {
"mcpServers": { "mcpServers": {
"graphiti": { "graphiti": {
"transport": "stdio", "transport": "stdio",
"command": "uv", "command": "uv",
"args": [ "args": [
"run", "run",
"/ABSOLUTE/PATH/TO/graphiti_mcp_server.py", "/ABSOLUTE/PATH/TO/graphiti_mcp_server.py",
"--transport", "--transport",
"stdio" "stdio"
], ],
"env": { "env": {
"NEO4J_URI": "bolt://localhost:7687", "NEO4J_URI": "bolt://localhost:7687",
"NEO4J_USER": "neo4j", "NEO4J_USER": "neo4j",
"NEO4J_PASSWORD": "demodemo", "NEO4J_PASSWORD": "demodemo",
"OPENAI_API_KEY": "${OPENAI_API_KEY}", "OPENAI_API_KEY": "${OPENAI_API_KEY}",
"MODEL_NAME": "gpt-4o-mini" "MODEL_NAME": "gpt-4.1-mini"
} }
}
} }
}
} }

View File

@ -99,7 +99,7 @@ async def build_baseline_graph(multi_session: list[int], session_length: int):
async def eval_graph(multi_session: list[int], session_length: int, llm_client=None) -> float: async def eval_graph(multi_session: list[int], session_length: int, llm_client=None) -> float:
if llm_client is None: if llm_client is None:
llm_client = OpenAIClient() llm_client = OpenAIClient(config=LLMConfig(model='gpt-4.1-mini'))
graphiti = Graphiti(NEO4J_URI, NEO4j_USER, NEO4j_PASSWORD, llm_client=llm_client) graphiti = Graphiti(NEO4J_URI, NEO4j_USER, NEO4j_PASSWORD, llm_client=llm_client)
with open('baseline_graph_results.json') as file: with open('baseline_graph_results.json') as file:
baseline_results_raw = json.load(file) baseline_results_raw = json.load(file)
@ -127,7 +127,6 @@ async def eval_graph(multi_session: list[int], session_length: int, llm_client=N
for user_id in add_episode_results: for user_id in add_episode_results:
user_count += 1 user_count += 1
user_raw_score = 0 user_raw_score = 0
print('add_episode_context: ', add_episode_context)
for baseline_result, add_episode_result, episodes in zip( for baseline_result, add_episode_result, episodes in zip(
baseline_results[user_id], baseline_results[user_id],
add_episode_results[user_id], add_episode_results[user_id],