mirror of
				https://github.com/HKUDS/LightRAG.git
				synced 2025-10-31 09:49:54 +00:00 
			
		
		
		
	能够正确调用rag,rag执行完成后,无法返回内容
This commit is contained in:
		
							parent
							
								
									4e5517a602
								
							
						
					
					
						commit
						828af49d6b
					
				| @ -659,38 +659,55 @@ def create_app(args): | |||||||
|             cleaned_query, mode = parse_query_mode(query) |             cleaned_query, mode = parse_query_mode(query) | ||||||
|              |              | ||||||
|             # 调用RAG进行查询 |             # 调用RAG进行查询 | ||||||
|  |             if request.stream: | ||||||
|                 response = await rag.aquery( |                 response = await rag.aquery( | ||||||
|                     cleaned_query, |                     cleaned_query, | ||||||
|                     param=QueryParam( |                     param=QueryParam( | ||||||
|                         mode=mode, |                         mode=mode, | ||||||
|                     stream=request.stream, |                         stream=True, | ||||||
|                 ) |                         only_need_context=False | ||||||
|  |                     ), | ||||||
|                 ) |                 ) | ||||||
| 
 | 
 | ||||||
|             if request.stream: |  | ||||||
|                 async def stream_generator(): |                 async def stream_generator(): | ||||||
|  |                     try: | ||||||
|                         async for chunk in response: |                         async for chunk in response: | ||||||
|                         yield OllamaChatResponse( |                             yield { | ||||||
|                             model=LIGHTRAG_MODEL, |                                 "model": LIGHTRAG_MODEL, | ||||||
|                             created_at=LIGHTRAG_CREATED_AT, |                                 "created_at": LIGHTRAG_CREATED_AT, | ||||||
|                             message=OllamaMessage( |                                 "message": { | ||||||
|                                 role="assistant", |                                     "role": "assistant", | ||||||
|                                 content=chunk |                                     "content": chunk | ||||||
|                             ), |                                 }, | ||||||
|                             done=False |                                 "done": False | ||||||
|  |                             } | ||||||
|  |                         yield { | ||||||
|  |                             "model": LIGHTRAG_MODEL, | ||||||
|  |                             "created_at": LIGHTRAG_CREATED_AT, | ||||||
|  |                             "message": { | ||||||
|  |                                 "role": "assistant", | ||||||
|  |                                 "content": "" | ||||||
|  |                             }, | ||||||
|  |                             "done": True | ||||||
|  |                         } | ||||||
|  |                     except Exception as e: | ||||||
|  |                         logging.error(f"Error in stream_generator: {str(e)}") | ||||||
|  |                         raise | ||||||
|  |                 from fastapi.responses import StreamingResponse | ||||||
|  |                 import json | ||||||
|  |                 return StreamingResponse( | ||||||
|  |                     (f"data: {json.dumps(chunk)}\n\n" async for chunk in stream_generator()), | ||||||
|  |                     media_type="text/event-stream" | ||||||
|                 ) |                 ) | ||||||
|                     # 发送一个空的完成消息 |  | ||||||
|                     yield OllamaChatResponse( |  | ||||||
|                         model=LIGHTRAG_MODEL, |  | ||||||
|                         created_at=LIGHTRAG_CREATED_AT, |  | ||||||
|                         message=OllamaMessage( |  | ||||||
|                             role="assistant", |  | ||||||
|                             content="" |  | ||||||
|                         ), |  | ||||||
|                         done=True |  | ||||||
|                     ) |  | ||||||
|                 return stream_generator() |  | ||||||
|             else: |             else: | ||||||
|  |                 response = await rag.aquery( | ||||||
|  |                     cleaned_query, | ||||||
|  |                     param=QueryParam( | ||||||
|  |                         mode=mode, | ||||||
|  |                         stream=False, | ||||||
|  |                         only_need_context=False | ||||||
|  |                     ), | ||||||
|  |                 ) | ||||||
|                 return OllamaChatResponse( |                 return OllamaChatResponse( | ||||||
|                     model=LIGHTRAG_MODEL, |                     model=LIGHTRAG_MODEL, | ||||||
|                     created_at=LIGHTRAG_CREATED_AT, |                     created_at=LIGHTRAG_CREATED_AT, | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 yangdx
						yangdx