fix: load the local finetuning model from pipeline YAML (#4729) (#4760)

If using the local model in pipeline YAML. The PromptModel cannot select
the HFLocalInvocationLayer, because of the get_task cannot support the
offline model.

*Local model usage:
  add the task_name parameter in model_kwargs for local model. for
  example text-generation or text2text-generation.

- name: PModel
  type: PromptModel
  params:
    model_name_or_path: /local_model_path
    model_kwargs:
      task_name: text-generation
- name: Prompter
  params:
    model_name_or_path: PModel
    default_prompt_template: question-answering
  type: PromptNode

Signed-off-by: yuanwu <yuan.wu@intel.com>
This commit is contained in:
yuanwu2017 2023-05-02 23:04:42 +08:00 committed by GitHub
parent 479092e3c1
commit c88bc19791
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1,5 +1,6 @@
from typing import Optional, Union, List, Dict
import logging
import os
import torch
@ -266,6 +267,9 @@ class HFLocalInvocationLayer(PromptModelInvocationLayer):
@classmethod
def supports(cls, model_name_or_path: str, **kwargs) -> bool:
task_name: Optional[str] = None
if os.path.exists(model_name_or_path):
return True
try:
task_name = get_task(model_name_or_path, use_auth_token=kwargs.get("use_auth_token", None))
except RuntimeError: