mirror of
https://github.com/FlagOpen/FlagEmbedding.git
synced 2025-06-27 02:39:58 +00:00
simplify example code for embedder inference
This commit is contained in:
parent
effc2bb352
commit
97be9b0f48
@ -0,0 +1,34 @@
|
||||
import os
|
||||
from FlagEmbedding import FlagAutoModel
|
||||
|
||||
|
||||
def test_base_multi_devices():
|
||||
model = FlagAutoModel.from_finetuned(
|
||||
'BAAI/bge-multilingual-gemma2',
|
||||
query_instruction_for_retrieval="Given a question, retrieve passages that answer the question.",
|
||||
devices=["cuda:0", "cuda:1"], # if you don't have GPUs, you can use ["cpu", "cpu"]
|
||||
cache_dir=os.getenv('HF_HUB_CACHE', None),
|
||||
)
|
||||
|
||||
queries = [
|
||||
"how much protein should a female eat",
|
||||
"summit define"
|
||||
] * 100
|
||||
passages = [
|
||||
"As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.",
|
||||
"Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments."
|
||||
] * 100
|
||||
|
||||
queries_embeddings = model.encode_queries(queries)
|
||||
passages_embeddings = model.encode_corpus(passages)
|
||||
|
||||
cos_scores = queries_embeddings @ passages_embeddings.T
|
||||
print(cos_scores[:2, :2])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_base_multi_devices()
|
||||
|
||||
print("--------------------------------")
|
||||
print("Expected Output:")
|
||||
print("[[0.558 0.02113 ]\n [0.01643 0.526 ]]")
|
@ -0,0 +1,34 @@
|
||||
import os
|
||||
from FlagEmbedding import FlagAutoModel
|
||||
|
||||
|
||||
def test_base_single_device():
|
||||
model = FlagAutoModel.from_finetuned(
|
||||
'BAAI/bge-multilingual-gemma2',
|
||||
query_instruction_for_retrieval="Given a question, retrieve passages that answer the question.",
|
||||
devices="cuda:0", # if you don't have a GPU, you can use "cpu"
|
||||
cache_dir=os.getenv('HF_HUB_CACHE', None),
|
||||
)
|
||||
|
||||
queries = [
|
||||
"how much protein should a female eat",
|
||||
"summit define"
|
||||
] * 100
|
||||
passages = [
|
||||
"As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.",
|
||||
"Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments."
|
||||
] * 100
|
||||
|
||||
queries_embeddings = model.encode_queries(queries)
|
||||
passages_embeddings = model.encode_corpus(passages)
|
||||
|
||||
cos_scores = queries_embeddings @ passages_embeddings.T
|
||||
print(cos_scores[:2, :2])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_base_single_device()
|
||||
|
||||
print("--------------------------------")
|
||||
print("Expected Output:")
|
||||
print("[[0.558 0.0212 ]\n [0.01651 0.526 ]]")
|
@ -0,0 +1,48 @@
|
||||
import os
|
||||
from FlagEmbedding import FlagAutoModel
|
||||
|
||||
|
||||
def test_icl_multi_devices():
|
||||
examples = [
|
||||
{
|
||||
'instruct': 'Given a web search query, retrieve relevant passages that answer the query.',
|
||||
'query': 'what is a virtual interface',
|
||||
'response': "A virtual interface is a software-defined abstraction that mimics the behavior and characteristics of a physical network interface. It allows multiple logical network connections to share the same physical network interface, enabling efficient utilization of network resources. Virtual interfaces are commonly used in virtualization technologies such as virtual machines and containers to provide network connectivity without requiring dedicated hardware. They facilitate flexible network configurations and help in isolating network traffic for security and management purposes."
|
||||
},
|
||||
{
|
||||
'instruct': 'Given a web search query, retrieve relevant passages that answer the query.',
|
||||
'query': 'causes of back pain in female for a week',
|
||||
'response': "Back pain in females lasting a week can stem from various factors. Common causes include muscle strain due to lifting heavy objects or improper posture, spinal issues like herniated discs or osteoporosis, menstrual cramps causing referred pain, urinary tract infections, or pelvic inflammatory disease. Pregnancy-related changes can also contribute. Stress and lack of physical activity may exacerbate symptoms. Proper diagnosis by a healthcare professional is crucial for effective treatment and management."
|
||||
}
|
||||
]
|
||||
model = FlagAutoModel.from_finetuned(
|
||||
'BAAI/bge-en-icl',
|
||||
query_instruction_for_retrieval="Given a question, retrieve passages that answer the question.",
|
||||
examples_for_task=examples,
|
||||
examples_instruction_format="<instruct>{}\n<query>{}\n<response>{}",
|
||||
devices=["cuda:0", "cuda:1"], # if you don't have GPUs, you can use ["cpu", "cpu"]
|
||||
cache_dir=os.getenv('HF_HUB_CACHE', None),
|
||||
)
|
||||
|
||||
queries = [
|
||||
"how much protein should a female eat",
|
||||
"summit define"
|
||||
] * 100
|
||||
passages = [
|
||||
"As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.",
|
||||
"Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments."
|
||||
] * 100
|
||||
|
||||
queries_embeddings = model.encode_queries(queries)
|
||||
passages_embeddings = model.encode_corpus(passages)
|
||||
|
||||
cos_scores = queries_embeddings @ passages_embeddings.T
|
||||
print(cos_scores[:2, :2])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_icl_multi_devices()
|
||||
|
||||
print("--------------------------------")
|
||||
print("Expected Output:")
|
||||
print("[[0.579 0.2776]\n [0.2249 0.5146]]")
|
@ -0,0 +1,48 @@
|
||||
import os
|
||||
from FlagEmbedding import FlagAutoModel
|
||||
|
||||
|
||||
def test_icl_single_device():
|
||||
examples = [
|
||||
{
|
||||
'instruct': 'Given a web search query, retrieve relevant passages that answer the query.',
|
||||
'query': 'what is a virtual interface',
|
||||
'response': "A virtual interface is a software-defined abstraction that mimics the behavior and characteristics of a physical network interface. It allows multiple logical network connections to share the same physical network interface, enabling efficient utilization of network resources. Virtual interfaces are commonly used in virtualization technologies such as virtual machines and containers to provide network connectivity without requiring dedicated hardware. They facilitate flexible network configurations and help in isolating network traffic for security and management purposes."
|
||||
},
|
||||
{
|
||||
'instruct': 'Given a web search query, retrieve relevant passages that answer the query.',
|
||||
'query': 'causes of back pain in female for a week',
|
||||
'response': "Back pain in females lasting a week can stem from various factors. Common causes include muscle strain due to lifting heavy objects or improper posture, spinal issues like herniated discs or osteoporosis, menstrual cramps causing referred pain, urinary tract infections, or pelvic inflammatory disease. Pregnancy-related changes can also contribute. Stress and lack of physical activity may exacerbate symptoms. Proper diagnosis by a healthcare professional is crucial for effective treatment and management."
|
||||
}
|
||||
]
|
||||
model = FlagAutoModel.from_finetuned(
|
||||
'BAAI/bge-en-icl',
|
||||
query_instruction_for_retrieval="Given a question, retrieve passages that answer the question.",
|
||||
examples_for_task=examples,
|
||||
examples_instruction_format="<instruct>{}\n<query>{}\n<response>{}",
|
||||
devices="cuda:0", # if you don't have a GPU, you can use "cpu"
|
||||
cache_dir=os.getenv('HF_HUB_CACHE', None),
|
||||
)
|
||||
|
||||
queries = [
|
||||
"how much protein should a female eat",
|
||||
"summit define"
|
||||
] * 100
|
||||
passages = [
|
||||
"As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.",
|
||||
"Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments."
|
||||
] * 100
|
||||
|
||||
queries_embeddings = model.encode_queries(queries)
|
||||
passages_embeddings = model.encode_corpus(passages)
|
||||
|
||||
cos_scores = queries_embeddings @ passages_embeddings.T
|
||||
print(cos_scores[:2, :2])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_icl_single_device()
|
||||
|
||||
print("--------------------------------")
|
||||
print("Expected Output:")
|
||||
print("[[0.579 0.2776]\n [0.2249 0.5146]]")
|
@ -5,8 +5,6 @@ from FlagEmbedding import FlagLLMModel
|
||||
def test_base_multi_devices():
|
||||
model = FlagLLMModel(
|
||||
'BAAI/bge-multilingual-gemma2',
|
||||
normalize_embeddings=True,
|
||||
use_fp16=True,
|
||||
query_instruction_for_retrieval="Given a question, retrieve passages that answer the question.",
|
||||
query_instruction_format="<instruct>{}\n<query>{}",
|
||||
devices=["cuda:0", "cuda:1"], # if you don't have GPUs, you can use ["cpu", "cpu"]
|
||||
|
@ -5,8 +5,6 @@ from FlagEmbedding import FlagLLMModel
|
||||
def test_base_single_device():
|
||||
model = FlagLLMModel(
|
||||
'BAAI/bge-multilingual-gemma2',
|
||||
normalize_embeddings=True,
|
||||
use_fp16=True,
|
||||
query_instruction_for_retrieval="Given a question, retrieve passages that answer the question.",
|
||||
query_instruction_format="<instruct>{}\n<query>{}",
|
||||
devices="cuda:0", # if you don't have a GPU, you can use "cpu"
|
||||
|
@ -17,8 +17,6 @@ def test_icl_multi_devices():
|
||||
]
|
||||
model = FlagICLModel(
|
||||
'BAAI/bge-en-icl',
|
||||
normalize_embeddings=True,
|
||||
use_fp16=True,
|
||||
query_instruction_for_retrieval="Given a question, retrieve passages that answer the question.",
|
||||
query_instruction_format="<instruct>{}\n<query>{}",
|
||||
examples_for_task=examples,
|
||||
|
@ -17,8 +17,6 @@ def test_icl_single_device():
|
||||
]
|
||||
model = FlagICLModel(
|
||||
'BAAI/bge-en-icl',
|
||||
normalize_embeddings=True,
|
||||
use_fp16=True,
|
||||
query_instruction_for_retrieval="Given a question, retrieve passages that answer the question.",
|
||||
query_instruction_format="<instruct>{}\n<query>{}",
|
||||
examples_for_task=examples,
|
||||
|
Loading…
x
Reference in New Issue
Block a user