mirror of
https://github.com/TauricResearch/TradingAgents.git
synced 2025-06-27 00:41:34 +00:00
Merge branch 'TauricResearch:main' into save_results
This commit is contained in:
commit
ee3d499894
1
.python-version
Normal file
1
.python-version
Normal file
@ -0,0 +1 @@
|
||||
3.10
|
@ -80,7 +80,7 @@ Our framework decomposes complex trading tasks into specialized roles. This ensu
|
||||
- Composes reports from the analysts and researchers to make informed trading decisions. It determines the timing and magnitude of trades based on comprehensive market insights.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/risk.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
<img src="assets/trader.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
### Risk Management and Portfolio Manager
|
||||
@ -88,7 +88,7 @@ Our framework decomposes complex trading tasks into specialized roles. This ensu
|
||||
- The Portfolio Manager approves/rejects the transaction proposal. If approved, the order will be sent to the simulated exchange and executed.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/trader.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
<img src="assets/risk.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
## Installation and CLI
|
||||
|
63
cli/main.py
63
cli/main.py
@ -297,10 +297,27 @@ def update_display(layout, spinner_text=None):
|
||||
|
||||
# Add regular messages
|
||||
for timestamp, msg_type, content in message_buffer.messages:
|
||||
# Convert content to string if it's not already
|
||||
content_str = content
|
||||
if isinstance(content, list):
|
||||
# Handle list of content blocks (Anthropic format)
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
if item.get('type') == 'text':
|
||||
text_parts.append(item.get('text', ''))
|
||||
elif item.get('type') == 'tool_use':
|
||||
text_parts.append(f"[Tool: {item.get('name', 'unknown')}]")
|
||||
else:
|
||||
text_parts.append(str(item))
|
||||
content_str = ' '.join(text_parts)
|
||||
elif not isinstance(content_str, str):
|
||||
content_str = str(content)
|
||||
|
||||
# Truncate message content if too long
|
||||
if isinstance(content, str) and len(content) > 200:
|
||||
content = content[:197] + "..."
|
||||
all_messages.append((timestamp, msg_type, content))
|
||||
if len(content_str) > 200:
|
||||
content_str = content_str[:197] + "..."
|
||||
all_messages.append((timestamp, msg_type, content_str))
|
||||
|
||||
# Sort by timestamp
|
||||
all_messages.sort(key=lambda x: x[0])
|
||||
@ -446,20 +463,30 @@ def get_user_selections():
|
||||
)
|
||||
selected_research_depth = select_research_depth()
|
||||
|
||||
# Step 5: Thinking agents
|
||||
# Step 5: OpenAI backend
|
||||
console.print(
|
||||
create_question_box(
|
||||
"Step 5: Thinking Agents", "Select your thinking agents for analysis"
|
||||
"Step 5: OpenAI backend", "Select which service to talk to"
|
||||
)
|
||||
)
|
||||
selected_shallow_thinker = select_shallow_thinking_agent()
|
||||
selected_deep_thinker = select_deep_thinking_agent()
|
||||
selected_llm_provider, backend_url = select_llm_provider()
|
||||
|
||||
# Step 6: Thinking agents
|
||||
console.print(
|
||||
create_question_box(
|
||||
"Step 6: Thinking Agents", "Select your thinking agents for analysis"
|
||||
)
|
||||
)
|
||||
selected_shallow_thinker = select_shallow_thinking_agent(selected_llm_provider)
|
||||
selected_deep_thinker = select_deep_thinking_agent(selected_llm_provider)
|
||||
|
||||
return {
|
||||
"ticker": selected_ticker,
|
||||
"analysis_date": analysis_date,
|
||||
"analysts": selected_analysts,
|
||||
"research_depth": selected_research_depth,
|
||||
"llm_provider": selected_llm_provider.lower(),
|
||||
"backend_url": backend_url,
|
||||
"shallow_thinker": selected_shallow_thinker,
|
||||
"deep_thinker": selected_deep_thinker,
|
||||
}
|
||||
@ -685,6 +712,24 @@ def update_research_team_status(status):
|
||||
for agent in research_team:
|
||||
message_buffer.update_agent_status(agent, status)
|
||||
|
||||
def extract_content_string(content):
|
||||
"""Extract string content from various message formats."""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, list):
|
||||
# Handle Anthropic's list format
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
if item.get('type') == 'text':
|
||||
text_parts.append(item.get('text', ''))
|
||||
elif item.get('type') == 'tool_use':
|
||||
text_parts.append(f"[Tool: {item.get('name', 'unknown')}]")
|
||||
else:
|
||||
text_parts.append(str(item))
|
||||
return ' '.join(text_parts)
|
||||
else:
|
||||
return str(content)
|
||||
|
||||
def run_analysis():
|
||||
# First get all user selections
|
||||
@ -696,6 +741,8 @@ def run_analysis():
|
||||
config["max_risk_discuss_rounds"] = selections["research_depth"]
|
||||
config["quick_think_llm"] = selections["shallow_thinker"]
|
||||
config["deep_think_llm"] = selections["deep_thinker"]
|
||||
config["backend_url"] = selections["backend_url"]
|
||||
config["llm_provider"] = selections["llm_provider"].lower()
|
||||
|
||||
# Initialize the graph
|
||||
graph = TradingAgentsGraph(
|
||||
@ -803,7 +850,7 @@ def run_analysis():
|
||||
|
||||
# Extract message content and type
|
||||
if hasattr(last_message, "content"):
|
||||
content = last_message.content
|
||||
content = extract_content_string(last_message.content) # Use the helper function
|
||||
msg_type = "Reasoning"
|
||||
else:
|
||||
content = str(last_message)
|
||||
|
91
cli/utils.py
91
cli/utils.py
@ -122,22 +122,43 @@ def select_research_depth() -> int:
|
||||
return choice
|
||||
|
||||
|
||||
def select_shallow_thinking_agent() -> str:
|
||||
def select_shallow_thinking_agent(provider) -> str:
|
||||
"""Select shallow thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define shallow thinking llm engine options with their corresponding model names
|
||||
SHALLOW_AGENT_OPTIONS = [
|
||||
SHALLOW_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"),
|
||||
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
|
||||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
|
||||
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
|
||||
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
|
||||
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
|
||||
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
|
||||
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
|
||||
],
|
||||
"openrouter": [
|
||||
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
|
||||
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
|
||||
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("llama3.2 local", "llama3.2"),
|
||||
]
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Quick-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in SHALLOW_AGENT_OPTIONS
|
||||
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
@ -158,11 +179,12 @@ def select_shallow_thinking_agent() -> str:
|
||||
return choice
|
||||
|
||||
|
||||
def select_deep_thinking_agent() -> str:
|
||||
def select_deep_thinking_agent(provider) -> str:
|
||||
"""Select deep thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define deep thinking llm engine options with their corresponding model names
|
||||
DEEP_AGENT_OPTIONS = [
|
||||
DEEP_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
|
||||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
@ -170,13 +192,34 @@ def select_deep_thinking_agent() -> str:
|
||||
("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"),
|
||||
("o3 - Full advanced reasoning model", "o3"),
|
||||
("o1 - Premier reasoning and problem-solving model", "o1"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
|
||||
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
|
||||
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
|
||||
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
|
||||
("Claude Opus 4 - Most powerful Anthropic model", " claude-opus-4-0"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
|
||||
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
|
||||
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
|
||||
("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
|
||||
],
|
||||
"openrouter": [
|
||||
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
|
||||
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("qwen3", "qwen3"),
|
||||
]
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Deep-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in DEEP_AGENT_OPTIONS
|
||||
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
@ -193,3 +236,39 @@ def select_deep_thinking_agent() -> str:
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
def select_llm_provider() -> tuple[str, str]:
|
||||
"""Select the OpenAI api url using interactive selection."""
|
||||
# Define OpenAI api options with their corresponding endpoints
|
||||
BASE_URLS = [
|
||||
("OpenAI", "https://api.openai.com/v1"),
|
||||
("Anthropic", "https://api.anthropic.com/"),
|
||||
("Google", "https://generativelanguage.googleapis.com/v1"),
|
||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||
("Ollama", "http://localhost:11434/v1"),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
"Select your LLM Provider:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=(display, value))
|
||||
for display, value in BASE_URLS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
display_name, url = choice
|
||||
print(f"You selected: {display_name}\tURL: {url}")
|
||||
|
||||
return display_name, url
|
||||
|
6
main.py
6
main.py
@ -3,8 +3,10 @@ from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
# Create a custom config
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
|
||||
config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
|
||||
config["llm_provider"] = "google" # Use a different model
|
||||
config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
|
||||
config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model
|
||||
config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model
|
||||
config["max_debate_rounds"] = 1 # Increase debate rounds
|
||||
config["online_tools"] = True # Increase debate rounds
|
||||
|
||||
|
34
pyproject.toml
Normal file
34
pyproject.toml
Normal file
@ -0,0 +1,34 @@
|
||||
[project]
|
||||
name = "tradingagents"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"akshare>=1.16.98",
|
||||
"backtrader>=1.9.78.123",
|
||||
"chainlit>=2.5.5",
|
||||
"chromadb>=1.0.12",
|
||||
"eodhd>=1.0.32",
|
||||
"feedparser>=6.0.11",
|
||||
"finnhub-python>=2.4.23",
|
||||
"langchain-anthropic>=0.3.15",
|
||||
"langchain-experimental>=0.3.4",
|
||||
"langchain-google-genai>=2.1.5",
|
||||
"langchain-openai>=0.3.23",
|
||||
"langgraph>=0.4.8",
|
||||
"pandas>=2.3.0",
|
||||
"parsel>=1.10.0",
|
||||
"praw>=7.8.1",
|
||||
"pytz>=2025.2",
|
||||
"questionary>=2.1.0",
|
||||
"redis>=6.2.0",
|
||||
"requests>=2.32.4",
|
||||
"rich>=14.0.0",
|
||||
"setuptools>=80.9.0",
|
||||
"stockstats>=0.6.5",
|
||||
"tqdm>=4.67.1",
|
||||
"tushare>=1.4.21",
|
||||
"typing-extensions>=4.14.0",
|
||||
"yfinance>=0.2.63",
|
||||
]
|
@ -22,3 +22,5 @@ redis
|
||||
chainlit
|
||||
rich
|
||||
questionary
|
||||
langchain_anthropic
|
||||
langchain-google-genai
|
||||
|
@ -22,7 +22,7 @@ def create_fundamentals_analyst(llm, toolkit):
|
||||
|
||||
system_message = (
|
||||
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
||||
+ " Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.",
|
||||
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.",
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
@ -51,9 +51,14 @@ def create_fundamentals_analyst(llm, toolkit):
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"fundamentals_report": result.content,
|
||||
"fundamentals_report": report,
|
||||
}
|
||||
|
||||
return fundamentals_analyst_node
|
||||
|
@ -76,9 +76,14 @@ Volume-Based Indicators:
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"market_report": result.content,
|
||||
"market_report": report,
|
||||
}
|
||||
|
||||
return market_analyst_node
|
||||
|
@ -47,9 +47,14 @@ def create_news_analyst(llm, toolkit):
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"news_report": result.content,
|
||||
"news_report": report,
|
||||
}
|
||||
|
||||
return news_analyst_node
|
||||
|
@ -47,9 +47,14 @@ def create_social_media_analyst(llm, toolkit):
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"sentiment_report": result.content,
|
||||
"sentiment_report": report,
|
||||
}
|
||||
|
||||
return social_media_analyst_node
|
||||
|
@ -12,13 +12,21 @@ from dateutil.relativedelta import relativedelta
|
||||
from langchain_openai import ChatOpenAI
|
||||
import tradingagents.dataflows.interface as interface
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
|
||||
def create_msg_delete():
|
||||
def delete_messages(state):
|
||||
"""To prevent message history from overflowing, regularly clear message history after a stage of the pipeline is done"""
|
||||
"""Clear messages and add placeholder for Anthropic compatibility"""
|
||||
messages = state["messages"]
|
||||
return {"messages": [RemoveMessage(id=m.id) for m in messages]}
|
||||
|
||||
# Remove all messages
|
||||
removal_operations = [RemoveMessage(id=m.id) for m in messages]
|
||||
|
||||
# Add a minimal placeholder message
|
||||
placeholder = HumanMessage(content="Continue")
|
||||
|
||||
return {"messages": removal_operations + [placeholder]}
|
||||
|
||||
return delete_messages
|
||||
|
||||
|
@ -1,19 +1,23 @@
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
from openai import OpenAI
|
||||
import numpy as np
|
||||
|
||||
|
||||
class FinancialSituationMemory:
|
||||
def __init__(self, name):
|
||||
def __init__(self, name, config):
|
||||
if config["backend_url"] == "http://localhost:11434/v1":
|
||||
self.embedding = "nomic-embed-text"
|
||||
else:
|
||||
self.embedding = "text-embedding-3-small"
|
||||
self.client = OpenAI()
|
||||
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
|
||||
self.situation_collection = self.chroma_client.create_collection(name=name)
|
||||
|
||||
def get_embedding(self, text):
|
||||
"""Get OpenAI embedding for a text"""
|
||||
|
||||
response = self.client.embeddings.create(
|
||||
model="text-embedding-ada-002", input=text
|
||||
model=self.embedding, input=text
|
||||
)
|
||||
return response.data[0].embedding
|
||||
|
||||
|
@ -703,6 +703,7 @@ def get_YFin_data(
|
||||
|
||||
|
||||
def get_stock_news_openai(ticker, curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
@ -713,7 +714,7 @@ def get_stock_news_openai(ticker, curr_date):
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": f"Can you search Social Media for {ticker} on TSLA from 7 days before {curr_date} to {curr_date}? Make sure you only get the data posted during that period.",
|
||||
"text": f"Can you search Social Media for {ticker} from 7 days before {curr_date} to {curr_date}? Make sure you only get the data posted during that period.",
|
||||
}
|
||||
],
|
||||
}
|
||||
@ -737,6 +738,7 @@ def get_stock_news_openai(ticker, curr_date):
|
||||
|
||||
|
||||
def get_global_news_openai(curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
@ -771,6 +773,7 @@ def get_global_news_openai(curr_date):
|
||||
|
||||
|
||||
def get_fundamentals_openai(ticker, curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
|
||||
response = client.responses.create(
|
||||
|
@ -9,8 +9,10 @@ DEFAULT_CONFIG = {
|
||||
"dataflows/data_cache",
|
||||
),
|
||||
# LLM settings
|
||||
"llm_provider": "openai",
|
||||
"deep_think_llm": "o4-mini",
|
||||
"quick_think_llm": "gpt-4o-mini",
|
||||
"backend_url": "https://api.openai.com/v1",
|
||||
# Debate and discussion settings
|
||||
"max_debate_rounds": 1,
|
||||
"max_risk_discuss_rounds": 1,
|
||||
|
@ -7,6 +7,9 @@ from datetime import date
|
||||
from typing import Dict, Any, Tuple, List, Optional
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
|
||||
from langgraph.prebuilt import ToolNode
|
||||
|
||||
from tradingagents.agents import *
|
||||
@ -55,18 +58,26 @@ class TradingAgentsGraph:
|
||||
)
|
||||
|
||||
# Initialize LLMs
|
||||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"])
|
||||
self.quick_thinking_llm = ChatOpenAI(
|
||||
model=self.config["quick_think_llm"], temperature=0.1
|
||||
)
|
||||
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
|
||||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
elif self.config["llm_provider"].lower() == "anthropic":
|
||||
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
elif self.config["llm_provider"].lower() == "google":
|
||||
self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"])
|
||||
self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"])
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}")
|
||||
|
||||
self.toolkit = Toolkit(config=self.config)
|
||||
|
||||
# Initialize memories
|
||||
self.bull_memory = FinancialSituationMemory("bull_memory")
|
||||
self.bear_memory = FinancialSituationMemory("bear_memory")
|
||||
self.trader_memory = FinancialSituationMemory("trader_memory")
|
||||
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory")
|
||||
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory")
|
||||
self.bull_memory = FinancialSituationMemory("bull_memory", self.config)
|
||||
self.bear_memory = FinancialSituationMemory("bear_memory", self.config)
|
||||
self.trader_memory = FinancialSituationMemory("trader_memory", self.config)
|
||||
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config)
|
||||
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config)
|
||||
|
||||
# Create tool nodes
|
||||
self.tool_nodes = self._create_tool_nodes()
|
||||
|
Loading…
x
Reference in New Issue
Block a user