mirror of
				https://github.com/langgenius/dify.git
				synced 2025-10-31 10:53:02 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			183 lines
		
	
	
		
			8.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			183 lines
		
	
	
		
			8.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| import json
 | |
| import logging
 | |
| from typing import Union, Generator
 | |
| 
 | |
| from flask import stream_with_context, Response
 | |
| from flask_restful import reqparse
 | |
| from werkzeug.exceptions import NotFound, InternalServerError
 | |
| 
 | |
| import services
 | |
| from controllers.service_api import api
 | |
| from controllers.service_api.app import create_or_update_end_user_for_user_id
 | |
| from controllers.service_api.app.error import AppUnavailableError, ProviderNotInitializeError, NotChatAppError, \
 | |
|     ConversationCompletedError, CompletionRequestError, ProviderQuotaExceededError, \
 | |
|     ProviderModelCurrentlyNotSupportError
 | |
| from controllers.service_api.wraps import AppApiResource
 | |
| from core.conversation_message_task import PubHandler
 | |
| from core.llm.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \
 | |
|     LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
 | |
| from libs.helper import uuid_value
 | |
| from services.completion_service import CompletionService
 | |
| 
 | |
| 
 | |
| class CompletionApi(AppApiResource):
 | |
|     def post(self, app_model, end_user):
 | |
|         if app_model.mode != 'completion':
 | |
|             raise AppUnavailableError()
 | |
| 
 | |
|         parser = reqparse.RequestParser()
 | |
|         parser.add_argument('inputs', type=dict, required=True, location='json')
 | |
|         parser.add_argument('query', type=str, location='json')
 | |
|         parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
 | |
|         parser.add_argument('user', type=str, location='json')
 | |
|         args = parser.parse_args()
 | |
| 
 | |
|         streaming = args['response_mode'] == 'streaming'
 | |
| 
 | |
|         if end_user is None and args['user'] is not None:
 | |
|             end_user = create_or_update_end_user_for_user_id(app_model, args['user'])
 | |
| 
 | |
|         try:
 | |
|             response = CompletionService.completion(
 | |
|                 app_model=app_model,
 | |
|                 user=end_user,
 | |
|                 args=args,
 | |
|                 from_source='api',
 | |
|                 streaming=streaming
 | |
|             )
 | |
| 
 | |
|             return compact_response(response)
 | |
|         except services.errors.conversation.ConversationNotExistsError:
 | |
|             raise NotFound("Conversation Not Exists.")
 | |
|         except services.errors.conversation.ConversationCompletedError:
 | |
|             raise ConversationCompletedError()
 | |
|         except services.errors.app_model_config.AppModelConfigBrokenError:
 | |
|             logging.exception("App model config broken.")
 | |
|             raise AppUnavailableError()
 | |
|         except ProviderTokenNotInitError as ex:
 | |
|             raise ProviderNotInitializeError(ex.description)
 | |
|         except QuotaExceededError:
 | |
|             raise ProviderQuotaExceededError()
 | |
|         except ModelCurrentlyNotSupportError:
 | |
|             raise ProviderModelCurrentlyNotSupportError()
 | |
|         except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
 | |
|                 LLMRateLimitError, LLMAuthorizationError) as e:
 | |
|             raise CompletionRequestError(str(e))
 | |
|         except ValueError as e:
 | |
|             raise e
 | |
|         except Exception as e:
 | |
|             logging.exception("internal server error.")
 | |
|             raise InternalServerError()
 | |
| 
 | |
| 
 | |
| class CompletionStopApi(AppApiResource):
 | |
|     def post(self, app_model, end_user, task_id):
 | |
|         if app_model.mode != 'completion':
 | |
|             raise AppUnavailableError()
 | |
| 
 | |
|         PubHandler.stop(end_user, task_id)
 | |
| 
 | |
|         return {'result': 'success'}, 200
 | |
| 
 | |
| 
 | |
| class ChatApi(AppApiResource):
 | |
|     def post(self, app_model, end_user):
 | |
|         if app_model.mode != 'chat':
 | |
|             raise NotChatAppError()
 | |
| 
 | |
|         parser = reqparse.RequestParser()
 | |
|         parser.add_argument('inputs', type=dict, required=True, location='json')
 | |
|         parser.add_argument('query', type=str, required=True, location='json')
 | |
|         parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
 | |
|         parser.add_argument('conversation_id', type=uuid_value, location='json')
 | |
|         parser.add_argument('user', type=str, location='json')
 | |
|         args = parser.parse_args()
 | |
| 
 | |
|         streaming = args['response_mode'] == 'streaming'
 | |
| 
 | |
|         if end_user is None and args['user'] is not None:
 | |
|             end_user = create_or_update_end_user_for_user_id(app_model, args['user'])
 | |
| 
 | |
|         try:
 | |
|             response = CompletionService.completion(
 | |
|                 app_model=app_model,
 | |
|                 user=end_user,
 | |
|                 args=args,
 | |
|                 from_source='api',
 | |
|                 streaming=streaming
 | |
|             )
 | |
| 
 | |
|             return compact_response(response)
 | |
|         except services.errors.conversation.ConversationNotExistsError:
 | |
|             raise NotFound("Conversation Not Exists.")
 | |
|         except services.errors.conversation.ConversationCompletedError:
 | |
|             raise ConversationCompletedError()
 | |
|         except services.errors.app_model_config.AppModelConfigBrokenError:
 | |
|             logging.exception("App model config broken.")
 | |
|             raise AppUnavailableError()
 | |
|         except ProviderTokenNotInitError as ex:
 | |
|             raise ProviderNotInitializeError(ex.description)
 | |
|         except QuotaExceededError:
 | |
|             raise ProviderQuotaExceededError()
 | |
|         except ModelCurrentlyNotSupportError:
 | |
|             raise ProviderModelCurrentlyNotSupportError()
 | |
|         except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
 | |
|                 LLMRateLimitError, LLMAuthorizationError) as e:
 | |
|             raise CompletionRequestError(str(e))
 | |
|         except ValueError as e:
 | |
|             raise e
 | |
|         except Exception as e:
 | |
|             logging.exception("internal server error.")
 | |
|             raise InternalServerError()
 | |
| 
 | |
| 
 | |
| class ChatStopApi(AppApiResource):
 | |
|     def post(self, app_model, end_user, task_id):
 | |
|         if app_model.mode != 'chat':
 | |
|             raise NotChatAppError()
 | |
| 
 | |
|         PubHandler.stop(end_user, task_id)
 | |
| 
 | |
|         return {'result': 'success'}, 200
 | |
| 
 | |
| 
 | |
| def compact_response(response: Union[dict | Generator]) -> Response:
 | |
|     if isinstance(response, dict):
 | |
|         return Response(response=json.dumps(response), status=200, mimetype='application/json')
 | |
|     else:
 | |
|         def generate() -> Generator:
 | |
|             try:
 | |
|                 for chunk in response:
 | |
|                     yield chunk
 | |
|             except services.errors.conversation.ConversationNotExistsError:
 | |
|                 yield "data: " + json.dumps(api.handle_error(NotFound("Conversation Not Exists.")).get_json()) + "\n\n"
 | |
|             except services.errors.conversation.ConversationCompletedError:
 | |
|                 yield "data: " + json.dumps(api.handle_error(ConversationCompletedError()).get_json()) + "\n\n"
 | |
|             except services.errors.app_model_config.AppModelConfigBrokenError:
 | |
|                 logging.exception("App model config broken.")
 | |
|                 yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
 | |
|             except ProviderTokenNotInitError as ex:
 | |
|                 yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
 | |
|             except QuotaExceededError:
 | |
|                 yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
 | |
|             except ModelCurrentlyNotSupportError:
 | |
|                 yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
 | |
|             except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
 | |
|                     LLMRateLimitError, LLMAuthorizationError) as e:
 | |
|                 yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
 | |
|             except ValueError as e:
 | |
|                 yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
 | |
|             except Exception:
 | |
|                 logging.exception("internal server error.")
 | |
|                 yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
 | |
| 
 | |
|         return Response(stream_with_context(generate()), status=200,
 | |
|                         mimetype='text/event-stream')
 | |
| 
 | |
| 
 | |
| api.add_resource(CompletionApi, '/completion-messages')
 | |
| api.add_resource(CompletionStopApi, '/completion-messages/<string:task_id>/stop')
 | |
| api.add_resource(ChatApi, '/chat-messages')
 | |
| api.add_resource(ChatStopApi, '/chat-messages/<string:task_id>/stop')
 | |
| 
 | 
