mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-02 01:46:46 +00:00
The default max tokens of 215 is too small, answers are often cut off.I will modify it to 512 to address this issue. (#845)
### What problem does this PR solve? ### Type of change - [x] Refactoring
This commit is contained in:
parent
a7bd427116
commit
9a01d1b876
@ -759,7 +759,7 @@ class Dialog(DataBaseModel):
|
||||
help_text="English|Chinese")
|
||||
llm_id = CharField(max_length=128, null=False, help_text="default llm ID")
|
||||
llm_setting = JSONField(null=False, default={"temperature": 0.1, "top_p": 0.3, "frequency_penalty": 0.7,
|
||||
"presence_penalty": 0.4, "max_tokens": 215})
|
||||
"presence_penalty": 0.4, "max_tokens": 512})
|
||||
prompt_type = CharField(
|
||||
max_length=16,
|
||||
null=False,
|
||||
|
||||
@ -31,14 +31,14 @@ export const settledModelVariableMap = {
|
||||
top_p: 0.3,
|
||||
frequency_penalty: 0.7,
|
||||
presence_penalty: 0.4,
|
||||
max_tokens: 215,
|
||||
max_tokens: 512,
|
||||
},
|
||||
[ModelVariableType.Balance]: {
|
||||
temperature: 0.5,
|
||||
top_p: 0.5,
|
||||
frequency_penalty: 0.7,
|
||||
presence_penalty: 0.4,
|
||||
max_tokens: 215,
|
||||
max_tokens: 512,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user