main works, cli bugs

This commit is contained in:
Edward Sun 2025-06-15 22:20:59 -07:00
parent 90b23e72f5
commit da84ef43aa
11 changed files with 5577 additions and 50 deletions

1
.python-version Normal file
View File

@ -0,0 +1 @@
3.10

View File

@ -295,10 +295,27 @@ def update_display(layout, spinner_text=None):
# Add regular messages
for timestamp, msg_type, content in message_buffer.messages:
# Convert content to string if it's not already
content_str = content
if isinstance(content, list):
# Handle list of content blocks (Anthropic format)
text_parts = []
for item in content:
if isinstance(item, dict):
if item.get('type') == 'text':
text_parts.append(item.get('text', ''))
elif item.get('type') == 'tool_use':
text_parts.append(f"[Tool: {item.get('name', 'unknown')}]")
else:
text_parts.append(str(item))
content_str = ' '.join(text_parts)
elif not isinstance(content_str, str):
content_str = str(content)
# Truncate message content if too long
if isinstance(content, str) and len(content) > 200:
content = content[:197] + "..."
all_messages.append((timestamp, msg_type, content))
if len(content_str) > 200:
content_str = content_str[:197] + "..."
all_messages.append((timestamp, msg_type, content_str))
# Sort by timestamp
all_messages.sort(key=lambda x: x[0])
@ -450,7 +467,7 @@ def get_user_selections():
"Step 5: OpenAI backend", "Select which service to talk to"
)
)
selected_openai_backend = select_openai_backend()
selected_llm_provider, backend_url = select_llm_provider()
# Step 6: Thinking agents
console.print(
@ -458,15 +475,16 @@ def get_user_selections():
"Step 6: Thinking Agents", "Select your thinking agents for analysis"
)
)
selected_shallow_thinker = select_shallow_thinking_agent(selected_openai_backend)
selected_deep_thinker = select_deep_thinking_agent(selected_openai_backend)
selected_shallow_thinker = select_shallow_thinking_agent(selected_llm_provider)
selected_deep_thinker = select_deep_thinking_agent(selected_llm_provider)
return {
"ticker": selected_ticker,
"analysis_date": analysis_date,
"analysts": selected_analysts,
"research_depth": selected_research_depth,
"openai_backend": selected_openai_backend,
"llm_provider": selected_llm_provider.lower(),
"backend_url": backend_url,
"shallow_thinker": selected_shallow_thinker,
"deep_thinker": selected_deep_thinker,
}
@ -692,6 +710,24 @@ def update_research_team_status(status):
for agent in research_team:
message_buffer.update_agent_status(agent, status)
def extract_content_string(content):
"""Extract string content from various message formats."""
if isinstance(content, str):
return content
elif isinstance(content, list):
# Handle Anthropic's list format
text_parts = []
for item in content:
if isinstance(item, dict):
if item.get('type') == 'text':
text_parts.append(item.get('text', ''))
elif item.get('type') == 'tool_use':
text_parts.append(f"[Tool: {item.get('name', 'unknown')}]")
else:
text_parts.append(str(item))
return ' '.join(text_parts)
else:
return str(content)
def run_analysis():
# First get all user selections
@ -703,7 +739,8 @@ def run_analysis():
config["max_risk_discuss_rounds"] = selections["research_depth"]
config["quick_think_llm"] = selections["shallow_thinker"]
config["deep_think_llm"] = selections["deep_thinker"]
config["openai_backend"] = selections["openai_backend"]
config["backend_url"] = selections["backend_url"]
config["llm_provider"] = selections["llm_provider"].lower()
# Initialize the graph
graph = TradingAgentsGraph(
@ -764,7 +801,7 @@ def run_analysis():
# Extract message content and type
if hasattr(last_message, "content"):
content = last_message.content
content = extract_content_string(last_message.content) # Use the helper function
msg_type = "Reasoning"
else:
content = str(last_message)

View File

@ -122,23 +122,34 @@ def select_research_depth() -> int:
return choice
def select_shallow_thinking_agent(backend) -> str:
def select_shallow_thinking_agent(provider) -> str:
"""Select shallow thinking llm engine using an interactive selection."""
# Define shallow thinking llm engine options with their corresponding model names
SHALLOW_AGENT_OPTIONS = {
"https://api.openai.com/v1": [
"openai": [
("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"),
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
],
"https://openrouter.ai/api/v1": [
"anthropic": [
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
],
"google": [
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
],
"openrouter": [
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"),
],
"http://localhost:11434/v1": [
"ollama": [
("llama3.2 local", "llama3.2"),
]
}
@ -147,7 +158,7 @@ def select_shallow_thinking_agent(backend) -> str:
"Select Your [Quick-Thinking LLM Engine]:",
choices=[
questionary.Choice(display, value=value)
for display, value in SHALLOW_AGENT_OPTIONS[backend]
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
@ -168,12 +179,12 @@ def select_shallow_thinking_agent(backend) -> str:
return choice
def select_deep_thinking_agent(backend) -> str:
def select_deep_thinking_agent(provider) -> str:
"""Select deep thinking llm engine using an interactive selection."""
# Define deep thinking llm engine options with their corresponding model names
DEEP_AGENT_OPTIONS = {
"https://api.openai.com/v1": [
"openai": [
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
@ -182,11 +193,24 @@ def select_deep_thinking_agent(backend) -> str:
("o3 - Full advanced reasoning model", "o3"),
("o1 - Premier reasoning and problem-solving model", "o1"),
],
"https://openrouter.ai/api/v1": [
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
("deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
"anthropic": [
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
("Claude Opus 4 - Most powerful Anthropic model", " claude-opus-4-0"),
],
"http://localhost:11434/v1": [
"google": [
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
],
"openrouter": [
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
],
"ollama": [
("qwen3", "qwen3"),
]
}
@ -195,7 +219,7 @@ def select_deep_thinking_agent(backend) -> str:
"Select Your [Deep-Thinking LLM Engine]:",
choices=[
questionary.Choice(display, value=value)
for display, value in DEEP_AGENT_OPTIONS[backend]
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
@ -213,21 +237,22 @@ def select_deep_thinking_agent(backend) -> str:
return choice
def select_openai_backend() -> str:
def select_llm_provider() -> tuple[str, str]:
"""Select the OpenAI api url using interactive selection."""
# Define OpenAI api options with their corresponding endpoints
OPENAI_BASE_URLS = [
("OpenAI - Requires an OpenAPI Key", "https://api.openai.com/v1"),
("Openrouter - Requires an OpenRouter API Key", "https://openrouter.ai/api/v1"),
("Ollama - Local", "http://localhost:11434/v1")
BASE_URLS = [
("OpenAI", "https://api.openai.com/v1"),
("Anthropic", "https://api.anthropic.com/"),
("Google", "https://generativelanguage.googleapis.com/v1"),
("Openrouter", "https://openrouter.ai/api/v1"),
("Ollama", "http://localhost:11434/v1"),
]
choice = questionary.select(
"Select your [OpenAI endpoint]:",
"Select your LLM Provider:",
choices=[
questionary.Choice(display, value=value)
for display, value in OPENAI_BASE_URLS
questionary.Choice(display, value=(display, value))
for display, value in BASE_URLS
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
@ -243,4 +268,7 @@ def select_openai_backend() -> str:
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
exit(1)
return choice
display_name, url = choice
print(f"You selected: {display_name}\tURL: {url}")
return display_name, url

View File

@ -3,8 +3,10 @@ from tradingagents.default_config import DEFAULT_CONFIG
# Create a custom config
config = DEFAULT_CONFIG.copy()
config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
config["llm_provider"] = "google" # Use a different model
config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model
config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds
config["online_tools"] = True # Increase debate rounds

34
pyproject.toml Normal file
View File

@ -0,0 +1,34 @@
[project]
name = "tradingagents"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"akshare>=1.16.98",
"backtrader>=1.9.78.123",
"chainlit>=2.5.5",
"chromadb>=1.0.12",
"eodhd>=1.0.32",
"feedparser>=6.0.11",
"finnhub-python>=2.4.23",
"langchain-anthropic>=0.3.15",
"langchain-experimental>=0.3.4",
"langchain-google-genai>=2.1.5",
"langchain-openai>=0.3.23",
"langgraph>=0.4.8",
"pandas>=2.3.0",
"parsel>=1.10.0",
"praw>=7.8.1",
"pytz>=2025.2",
"questionary>=2.1.0",
"redis>=6.2.0",
"requests>=2.32.4",
"rich>=14.0.0",
"setuptools>=80.9.0",
"stockstats>=0.6.5",
"tqdm>=4.67.1",
"tushare>=1.4.21",
"typing-extensions>=4.14.0",
"yfinance>=0.2.63",
]

View File

@ -12,14 +12,22 @@ from dateutil.relativedelta import relativedelta
from langchain_openai import ChatOpenAI
import tradingagents.dataflows.interface as interface
from tradingagents.default_config import DEFAULT_CONFIG
from langchain_core.messages import HumanMessage
def create_msg_delete():
def delete_messages(state):
"""To prevent message history from overflowing, regularly clear message history after a stage of the pipeline is done"""
"""Clear messages and add placeholder for Anthropic compatibility"""
messages = state["messages"]
return {"messages": [RemoveMessage(id=m.id) for m in messages]}
# Remove all messages
removal_operations = [RemoveMessage(id=m.id) for m in messages]
# Add a minimal placeholder message
placeholder = HumanMessage(content="Continue analysis")
return {"messages": removal_operations + [placeholder]}
return delete_messages

View File

@ -5,11 +5,11 @@ from openai import OpenAI
class FinancialSituationMemory:
def __init__(self, name, config):
if config["openai_backend"] == "http://localhost:11434/v1":
if config["backend_url"] == "http://localhost:11434/v1":
self.embedding = "nomic-embed-text"
else:
self.embedding = "text-embedding-ada-002"
self.client = OpenAI(base_url=config["openai_backend"])
self.embedding = "text-embedding-3-small"
self.client = OpenAI()
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
self.situation_collection = self.chroma_client.create_collection(name=name)

View File

@ -704,10 +704,10 @@ def get_YFin_data(
def get_stock_news_openai(ticker, curr_date):
config = get_config()
client = OpenAI(base_url=config["openai_backend"])
client = OpenAI()
response = client.responses.create(
model=config["quick_think_llm"],
model="gpt-4.1-mini",
input=[
{
"role": "system",
@ -739,10 +739,10 @@ def get_stock_news_openai(ticker, curr_date):
def get_global_news_openai(curr_date):
config = get_config()
client = OpenAI(base_url=config["openai_backend"])
client = OpenAI()
response = client.responses.create(
model=config["quick_think_llm"],
model="gpt-4.1-mini",
input=[
{
"role": "system",
@ -774,10 +774,10 @@ def get_global_news_openai(curr_date):
def get_fundamentals_openai(ticker, curr_date):
config = get_config()
client = OpenAI(base_url=config["openai_backend"])
client = OpenAI()
response = client.responses.create(
model=config["quick_think_llm"],
model="gpt-4.1-mini",
input=[
{
"role": "system",

View File

@ -8,9 +8,10 @@ DEFAULT_CONFIG = {
"dataflows/data_cache",
),
# LLM settings
"llm_provider": "openai",
"deep_think_llm": "o4-mini",
"quick_think_llm": "gpt-4o-mini",
"openai_backend": "https://api.openai.com/v1",
"backend_url": "https://api.openai.com/v1",
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,

View File

@ -7,6 +7,9 @@ from datetime import date
from typing import Dict, Any, Tuple, List, Optional
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_google_genai import ChatGoogleGenerativeAI
from langgraph.prebuilt import ToolNode
from tradingagents.agents import *
@ -55,10 +58,18 @@ class TradingAgentsGraph:
)
# Initialize LLMs
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["openai_backend"],)
self.quick_thinking_llm = ChatOpenAI(
model=self.config["quick_think_llm"], temperature=0.1, base_url=self.config["openai_backend"],
)
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
elif self.config["llm_provider"].lower() == "anthropic":
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
elif self.config["llm_provider"].lower() == "google":
self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"])
self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"])
else:
raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}")
self.toolkit = Toolkit(config=self.config)
# Initialize memories

5405
uv.lock generated Normal file

File diff suppressed because it is too large Load Diff