mirror of
https://github.com/langgenius/dify.git
synced 2025-07-05 08:07:10 +00:00
383 lines
22 KiB
Python
383 lines
22 KiB
Python
![]() |
import os
|
||
|
from typing import Generator
|
||
|
|
||
|
import pytest
|
||
|
|
||
|
from core.model_runtime.entities.message_entities import AssistantPromptMessage, TextPromptMessageContent, UserPromptMessage, \
|
||
|
SystemPromptMessage, ImagePromptMessageContent, PromptMessageTool
|
||
|
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
|
||
|
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||
|
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunkDelta, \
|
||
|
LLMResultChunk
|
||
|
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||
|
from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel
|
||
|
|
||
|
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
|
||
|
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||
|
|
||
|
def test_predefined_models():
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
model_schemas = model.predefined_models()
|
||
|
|
||
|
assert len(model_schemas) >= 1
|
||
|
assert isinstance(model_schemas[0], AIModelEntity)
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||
|
def test_validate_credentials_for_chat_model(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
with pytest.raises(CredentialsValidateFailedError):
|
||
|
model.validate_credentials(
|
||
|
model='gpt-3.5-turbo',
|
||
|
credentials={
|
||
|
'openai_api_key': 'invalid_key'
|
||
|
}
|
||
|
)
|
||
|
|
||
|
model.validate_credentials(
|
||
|
model='gpt-3.5-turbo',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
}
|
||
|
)
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['completion']], indirect=True)
|
||
|
def test_validate_credentials_for_completion_model(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
with pytest.raises(CredentialsValidateFailedError):
|
||
|
model.validate_credentials(
|
||
|
model='text-davinci-003',
|
||
|
credentials={
|
||
|
'openai_api_key': 'invalid_key'
|
||
|
}
|
||
|
)
|
||
|
|
||
|
model.validate_credentials(
|
||
|
model='text-davinci-003',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
}
|
||
|
)
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['completion']], indirect=True)
|
||
|
def test_invoke_completion_model(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
result = model.invoke(
|
||
|
model='gpt-3.5-turbo-instruct',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY'),
|
||
|
'openai_api_base': 'https://api.openai.com'
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
UserPromptMessage(
|
||
|
content='Hello World!'
|
||
|
)
|
||
|
],
|
||
|
model_parameters={
|
||
|
'temperature': 0.0,
|
||
|
'max_tokens': 1
|
||
|
},
|
||
|
stream=False,
|
||
|
user="abc-123"
|
||
|
)
|
||
|
|
||
|
assert isinstance(result, LLMResult)
|
||
|
assert len(result.message.content) > 0
|
||
|
assert model._num_tokens_from_string('gpt-3.5-turbo-instruct', result.message.content) == 1
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['completion']], indirect=True)
|
||
|
def test_invoke_stream_completion_model(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
result = model.invoke(
|
||
|
model='gpt-3.5-turbo-instruct',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY'),
|
||
|
'openai_organization': os.environ.get('OPENAI_ORGANIZATION'),
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
UserPromptMessage(
|
||
|
content='Hello World!'
|
||
|
)
|
||
|
],
|
||
|
model_parameters={
|
||
|
'temperature': 0.0,
|
||
|
'max_tokens': 100
|
||
|
},
|
||
|
stream=True,
|
||
|
user="abc-123"
|
||
|
)
|
||
|
|
||
|
assert isinstance(result, Generator)
|
||
|
|
||
|
for chunk in result:
|
||
|
assert isinstance(chunk, LLMResultChunk)
|
||
|
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||
|
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||
|
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||
|
def test_invoke_chat_model(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
result = model.invoke(
|
||
|
model='gpt-3.5-turbo',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
SystemPromptMessage(
|
||
|
content='You are a helpful AI assistant.',
|
||
|
),
|
||
|
UserPromptMessage(
|
||
|
content='Hello World!'
|
||
|
)
|
||
|
],
|
||
|
model_parameters={
|
||
|
'temperature': 0.0,
|
||
|
'top_p': 1.0,
|
||
|
'presence_penalty': 0.0,
|
||
|
'frequency_penalty': 0.0,
|
||
|
'max_tokens': 10
|
||
|
},
|
||
|
stop=['How'],
|
||
|
stream=False,
|
||
|
user="abc-123"
|
||
|
)
|
||
|
|
||
|
assert isinstance(result, LLMResult)
|
||
|
assert len(result.message.content) > 0
|
||
|
|
||
|
for chunk in model._llm_result_to_stream(result):
|
||
|
assert isinstance(chunk, LLMResultChunk)
|
||
|
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||
|
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||
|
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||
|
def test_invoke_chat_model_with_vision(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
result = model.invoke(
|
||
|
model='gpt-4-vision-preview',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
SystemPromptMessage(
|
||
|
content='You are a helpful AI assistant.',
|
||
|
),
|
||
|
UserPromptMessage(
|
||
|
content=[
|
||
|
TextPromptMessageContent(
|
||
|
data='Hello World!',
|
||
|
),
|
||
|
ImagePromptMessageContent(
|
||
|
data='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAE4AAABMCAYAAADDYoEWAAAMQGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBoAQSkhN4EkRpASggt9I4gKiEJEEqMgaBiRxcVXLuIgA1dFVGwAmJBETuLYu+LBRVlXSzYlTcpoOu+8r35vrnz33/O/OfMmbllAFA7zhGJclF1APKEBeLYYH/6uOQUOukpIAEdoAy0gA2Hmy9iRkeHA1iG2r+Xd9cBIm2v2Eu1/tn/X4sGj5/PBQCJhjidl8/Ng/gAAHg1VyQuAIAo5c2mFoikGFagJYYBQrxIijPluFqK0+V4j8wmPpYFcTsASiocjjgTANVLkKcXcjOhhmo/xI5CnkAIgBodYp+8vMk8iNMgtoY2Ioil+oz0H3Qy/6aZPqzJ4WQOY/lcZEUpQJAvyuVM/z/T8b9LXq5kyIclrCpZ4pBY6Zxh3m7mTA6TYhWI+4TpkVEQa0L8QcCT2UOMUrIkIQlye9SAm8+COYMrDVBHHicgDGIDiIOEuZHhCj49QxDEhhjuEHSaoIAdD7EuxIv4+YFxCptN4smxCl9oY4aYxVTwZzlimV+pr/uSnASmQv91Fp+t0MdUi7LikyCmQGxeKEiMhFgVYof8nLgwhc3YoixW5JCNWBIrjd8c4li+MNhfro8VZoiDYhX2pXn5Q/PFNmUJ2JEKvK8gKz5Enh+sncuRxQ/ngl3iC5kJQzr8/HHhQ3Ph8QMC5XPHnvGFCXEKnQ+iAv9Y+VicIsqNVtjjpvzcYClvCrFLfmGcYiyeWAA3pFwfzxAVRMfL48SLsjmh0fJ48OUgHLBAAKADCazpYDLIBoLOvqY+eCfvCQIcIAaZgA/sFczQiCRZjxBe40AR+BMiPsgfHucv6+WDQsh/HWblV3uQIestlI3IAU8gzgNhIBfeS2SjhMPeEsFjyAj+4Z0DKxfGmwurtP/f80Psd4YJmXAFIxnySFcbsiQGEgOIIcQgog2uj/vgXng4vPrB6oQzcI+heXy3JzwhdBEeEq4Rugm3JgmKxT9FGQG6oX6QIhfpP+YCt4Sarrg/7g3VoTKug+sDe9wF+mHivtCzK2RZirilWaH/pP23GfywGgo7siMZJY8g+5Gtfx6paqvqOqwizfWP+ZHHmj6cb9Zwz8/+WT9knwfbsJ8tsUXYfuwMdgI7hx3BmgAda8WasQ7sqBQP767Hst015C1WFk8O1BH8w9/Qykozme9Y59jr+EXeV8CfJn1HA9Zk0XSxIDOrgM6EXwQ+nS3kOoyiOzk6OQMg/b7IX19vYmTfDUSn4zs3/w8AvFsHBwcPf+dCWwHY6w4f/0PfOWsG/HQoA3D2EFciLpRzuPRCgG8JNfik6QEjYAas4XycgBvwAn4gEISCKBAPksFEGH0W3OdiMBXMBPNACSgDy8EaUAk2gi1gB9gN9oEmcAScAKfBBXAJXAN34O7pAS9AP3gHPiMIQkKoCA3RQ4wRC8QOcUIYiA8SiIQjsUgykoZkIkJEgsxE5iNlyEqkEtmM1CJ7kUPICeQc0oXcQh4gvchr5BOKoSqoFmqIWqKjUQbKRMPQeHQCmolOQYvQBehStAKtQXehjegJ9AJ6De1GX6ADGMCUMR3MBLPHGBgLi8JSsAxMjM3GSrFyrAarx1rgOl/BurE+7CNOxGk4HbeHOzgET8C5+BR8Nr4Er8R34I14O34Ff4D3498IVIIBwY7gSWATxhEyCVMJJYRywjbCQcIp+Cz1EN4RiUQdohXRHT6LycRs4gziEuJ6YgPxOLGL+Ig4QCKR9Eh2JG9SFIlDKiCVkNaRdpFaSZdJPaQPSspKxkpOSkFKKUpCpWKlcqWdSseULis9VfpMVidbkD3JUWQeeTp5GXkruYV8kdxD/kzRoFhRvCnxlGzKPEoFpZ5yinKX8kZZWdlU2UM5RlmgPFe5QnmP8lnlB8ofVTRVbFVYKqkqEpWlKttVjqvcUnlDpVItqX7UFGoBdSm1lnqSep/6QZWm6qDKVuWpzlGtUm1Uvaz6Uo2sZqHGVJuoVqRWrrZf7aJanzpZ3VKdpc5Rn61epX5I/Yb6gAZNY4xGlEaexhKNnRrnNJ5pkjQtNQM1eZoLNLdontR8RMNoZjQWjUubT9tKO0Xr0SJqWWmxtbK1yrR2a3Vq9WtrartoJ2pP067SPqrdrYPpWOqwdXJ1luns07mu82mE4QjmCP6IxSPqR1we8V53pK6fLl+3VLdB95ruJz26XqBejt4KvSa9e/q4vq1+jP5U/Q36p/T7RmqN9BrJHVk6ct/I2waoga1BrMEMgy0GHQYDhkaGwYYiw3WGJw37jHSM/IyyjVYbHTPqNaYZ+xgLjFcbtxo/p2vTmfRcegW9nd5vYmASYiIx2WzSafLZ1Mo0wbTYtMH0nhnFjGGWYbbarM2s39zYPMJ8pnmd+W0LsgXDIstircUZi/eWVpZJlgstmyyfWelasa2KrOqs7lpTrX2tp1jXWF+1IdowbHJs1ttcskVtXW2zbKtsL9qhdm52Arv1dl2jCKM8RglH1Yy6Ya9iz7QvtK+zf+Cg4xDuUOzQ5PBytPnolNErRp8Z/c3R1THXcavjnTGaY0LHFI9pGfPaydaJ61TldNWZ6hzkPMe52fmVi50L32WDy01XmmuE60LXNtevbu5uYrd6t153c/c092r3GwwtRjRjCeOsB8HD32OOxxGPj55ungWe+zz/8rL3yvHa6fVsrNVY/titYx95m3pzvDd7d/vQfdJ8Nvl0+5r4cnxrfB/6mfnx/Lb5PWXaMLOZu5gv/R39xf4H/d+zPFmzWMcDsIDggNKAzkDNwITAysD7QaZBmUF1Qf3BrsEzgo+HEELCQlaE3GAbsrnsWnZ/qHvorND2MJWwuLDKsIfhtuHi8JYINCI0YlXE3UiLSGFkUxSIYketiroXbRU9JfpwDDEmOqYq5knsmNiZsWfiaHGT4nbGvYv3j18WfyfBOkGS0JaolpiaWJv4PikgaWVS97jR42aNu5CsnyxIbk4hpSSmbEsZGB84fs34nlTX1JLU6xOsJkybcG6i/sTciUcnqU3iTNqfRkhLStuZ9oUTxanhDKSz06vT+7ks7lruC54fbzWvl+/NX8l/muGdsTLjWaZ35qrM3izfrPKsPgFLUCl4lR2SvTH7fU5Uzvacwdyk3IY8pby0vENCTWGOsH2y0eRpk7tEdqISUfcUzylrpvSLw8Tb8pH8CfnNBVrwR75DYi35RfKg0KewqvDD1MSp+6dpTBNO65huO33x9KdFQUW/zcBncGe0zTSZOW/mg1nMWZtnI7PTZ7fNMZuzYE7P3OC5O+ZR5uXM+73YsXhl8dv5SfNbFhgumLvg0S/Bv9SVqJaIS24s9Fq4cRG+SLCoc7Hz4nWLv5XySs+XOZaVl31Zwl1y/tcxv1b8Org0Y2nnMrdlG5YTlwuXX1/hu2LHSo2VRSsfrYpY1biavrp09ds1k9acK3cp37iWslaytrsivKJ5nfm65eu+VGZVXqvyr2qoNqheXP1+PW/95Q1+G+o3Gm4s2/hpk2DTzc3BmxtrLGvKtxC3FG55sjVx65nfGL/VbtPfVrbt63bh9u4dsTvaa91ra3ca7FxWh9ZJ6np3pe66tDtgd3O9ff3mBp2Gsj1gj2TP871pe6/vC9vXtp+xv/6AxYHqg7SDpY1I4/TG/qaspu7m5OauQ6GH2lq8Wg4edji8/YjJkaqj2keXHaMcW3BssLWodeC46HjficwTj9omtd05Oe7k1faY9s5TYafOng46ffIM80zrWe+zR855njt0nnG+6YLbhcYO146Dv7v+frDTrbPxovvF5ksel1q6xnYdu+x7+cSVgCunr7KvXrgWea3resL1mzdSb3Tf5N18div31qvbhbc/35l7l3C39J76vfL7Bvdr/rD5o6Hbrfvog4AHHQ/jHt55xH304nH+4y89C55Qn5Q/NX5a+8zp2ZHeoN5Lz8c/73khevG5r+RPjT+rX1q/PPCX318d/eP6e16JXw2+XvJG7832ty5v2waiB+6/y3v3+X3
|
||
|
)
|
||
|
]
|
||
|
)
|
||
|
],
|
||
|
model_parameters={
|
||
|
'temperature': 0.0,
|
||
|
'max_tokens': 100
|
||
|
},
|
||
|
stream=False,
|
||
|
user="abc-123"
|
||
|
)
|
||
|
|
||
|
assert isinstance(result, LLMResult)
|
||
|
assert len(result.message.content) > 0
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||
|
def test_invoke_chat_model_with_tools(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
result = model.invoke(
|
||
|
model='gpt-3.5-turbo',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
SystemPromptMessage(
|
||
|
content='You are a helpful AI assistant.',
|
||
|
),
|
||
|
UserPromptMessage(
|
||
|
content="what's the weather today in London?",
|
||
|
)
|
||
|
],
|
||
|
model_parameters={
|
||
|
'temperature': 0.0,
|
||
|
'max_tokens': 100
|
||
|
},
|
||
|
tools=[
|
||
|
PromptMessageTool(
|
||
|
name='get_weather',
|
||
|
description='Determine weather in my location',
|
||
|
parameters={
|
||
|
"type": "object",
|
||
|
"properties": {
|
||
|
"location": {
|
||
|
"type": "string",
|
||
|
"description": "The city and state e.g. San Francisco, CA"
|
||
|
},
|
||
|
"unit": {
|
||
|
"type": "string",
|
||
|
"enum": [
|
||
|
"c",
|
||
|
"f"
|
||
|
]
|
||
|
}
|
||
|
},
|
||
|
"required": [
|
||
|
"location"
|
||
|
]
|
||
|
}
|
||
|
),
|
||
|
PromptMessageTool(
|
||
|
name='get_stock_price',
|
||
|
description='Get the current stock price',
|
||
|
parameters={
|
||
|
"type": "object",
|
||
|
"properties": {
|
||
|
"symbol": {
|
||
|
"type": "string",
|
||
|
"description": "The stock symbol"
|
||
|
}
|
||
|
},
|
||
|
"required": [
|
||
|
"symbol"
|
||
|
]
|
||
|
}
|
||
|
)
|
||
|
],
|
||
|
stream=False,
|
||
|
user="abc-123"
|
||
|
)
|
||
|
|
||
|
assert isinstance(result, LLMResult)
|
||
|
assert isinstance(result.message, AssistantPromptMessage)
|
||
|
assert len(result.message.tool_calls) > 0
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||
|
def test_invoke_stream_chat_model(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
result = model.invoke(
|
||
|
model='gpt-3.5-turbo',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
SystemPromptMessage(
|
||
|
content='You are a helpful AI assistant.',
|
||
|
),
|
||
|
UserPromptMessage(
|
||
|
content='Hello World!'
|
||
|
)
|
||
|
],
|
||
|
model_parameters={
|
||
|
'temperature': 0.0,
|
||
|
'max_tokens': 100
|
||
|
},
|
||
|
stream=True,
|
||
|
user="abc-123"
|
||
|
)
|
||
|
|
||
|
assert isinstance(result, Generator)
|
||
|
|
||
|
for chunk in result:
|
||
|
assert isinstance(chunk, LLMResultChunk)
|
||
|
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||
|
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||
|
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||
|
if chunk.delta.finish_reason is not None:
|
||
|
assert chunk.delta.usage is not None
|
||
|
assert chunk.delta.usage.completion_tokens > 0
|
||
|
|
||
|
|
||
|
def test_get_num_tokens():
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
num_tokens = model.get_num_tokens(
|
||
|
model='gpt-3.5-turbo-instruct',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
UserPromptMessage(
|
||
|
content='Hello World!'
|
||
|
)
|
||
|
]
|
||
|
)
|
||
|
|
||
|
assert num_tokens == 3
|
||
|
|
||
|
num_tokens = model.get_num_tokens(
|
||
|
model='gpt-3.5-turbo',
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
SystemPromptMessage(
|
||
|
content='You are a helpful AI assistant.',
|
||
|
),
|
||
|
UserPromptMessage(
|
||
|
content='Hello World!'
|
||
|
)
|
||
|
]
|
||
|
)
|
||
|
|
||
|
assert num_tokens == 21
|
||
|
|
||
|
@pytest.mark.parametrize('setup_openai_mock', [['chat', 'remote']], indirect=True)
|
||
|
def test_fine_tuned_models(setup_openai_mock):
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
|
||
|
remote_models = model.remote_models(credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
})
|
||
|
|
||
|
if not remote_models:
|
||
|
assert isinstance(remote_models, list)
|
||
|
else:
|
||
|
assert isinstance(remote_models[0], AIModelEntity)
|
||
|
|
||
|
for llm_model in remote_models:
|
||
|
if llm_model.model_type == ModelType.LLM:
|
||
|
break
|
||
|
|
||
|
assert isinstance(llm_model, AIModelEntity)
|
||
|
|
||
|
# test invoke
|
||
|
result = model.invoke(
|
||
|
model=llm_model.model,
|
||
|
credentials={
|
||
|
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||
|
},
|
||
|
prompt_messages=[
|
||
|
SystemPromptMessage(
|
||
|
content='You are a helpful AI assistant.',
|
||
|
),
|
||
|
UserPromptMessage(
|
||
|
content='Hello World!'
|
||
|
)
|
||
|
],
|
||
|
model_parameters={
|
||
|
'temperature': 0.0,
|
||
|
'max_tokens': 100
|
||
|
},
|
||
|
stream=False,
|
||
|
user="abc-123"
|
||
|
)
|
||
|
|
||
|
assert isinstance(result, LLMResult)
|
||
|
|
||
|
def test__get_num_tokens_by_gpt2():
|
||
|
model = OpenAILargeLanguageModel()
|
||
|
num_tokens = model._get_num_tokens_by_gpt2('Hello World!')
|
||
|
|
||
|
assert num_tokens == 3
|