test: improve some slow tests (#9297)

* test: improve slow tests

* rm leftover and improve test
This commit is contained in:
Stefano Fiorucci 2025-04-24 08:50:36 +02:00 committed by GitHub
parent 9ae7da8df3
commit df662daaef
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 8 additions and 17 deletions

View File

@ -123,7 +123,7 @@ class TestLinkContentFetcher:
def test_run_bad_status_code(self):
"""Test behavior when a request results in an error status code"""
empty_byte_stream = b""
fetcher = LinkContentFetcher(raise_on_failure=False)
fetcher = LinkContentFetcher(raise_on_failure=False, retry_attempts=0)
mock_response = Mock(status_code=403)
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
"403 Client Error", request=Mock(), response=mock_response
@ -304,12 +304,12 @@ class TestLinkContentFetcherAsync:
mock_get.return_value = mock_response
# With raise_on_failure=False
fetcher = LinkContentFetcher(raise_on_failure=False)
fetcher = LinkContentFetcher(raise_on_failure=False, retry_attempts=0)
streams = (await fetcher.run_async(urls=["https://www.example.com"]))["streams"]
assert len(streams) == 1 # Returns an empty stream
# With raise_on_failure=True
fetcher = LinkContentFetcher(raise_on_failure=True)
fetcher = LinkContentFetcher(raise_on_failure=True, retry_attempts=0)
with pytest.raises(httpx.HTTPStatusError):
await fetcher.run_async(urls=["https://www.example.com"])

View File

@ -308,18 +308,9 @@ class TestOpenAIGenerator:
)
@pytest.mark.integration
def test_run_with_system_prompt(self):
generator = OpenAIGenerator(
model="gpt-4o-mini",
system_prompt="You answer in Portuguese, regardless of the language on which a question is asked",
)
result = generator.run("Can you explain the Pitagoras therom?")
assert "teorema" in result["replies"][0].lower()
result = generator.run(
"Can you explain the Pitagoras therom? Repeat the name of the theorem in German.",
system_prompt="You answer in German, regardless of the language on which a question is asked.",
)
assert "pythag" in result["replies"][0].lower()
generator = OpenAIGenerator(model="gpt-4o-mini", system_prompt="Answer in Italian using only one word.")
result = generator.run("What's the capital of Italy?")
assert "roma" in result["replies"][0].lower()
@pytest.mark.skipif(
not os.environ.get("OPENAI_API_KEY", None),

View File

@ -7,7 +7,7 @@ def test_async_pipeline_reentrance(waiting_component, spying_tracer):
pp = AsyncPipeline()
pp.add_component("wait", waiting_component())
run_data = [{"wait_for": 1}, {"wait_for": 2}]
run_data = [{"wait_for": 0.001}, {"wait_for": 0.002}]
async def run_all():
# Create concurrent tasks for each pipeline run

View File

@ -23,7 +23,7 @@ class TestPipeline:
pp = Pipeline()
pp.add_component("wait", waiting_component())
run_data = [{"wait_for": 1}, {"wait_for": 2}]
run_data = [{"wait_for": 0.001}, {"wait_for": 0.002}]
# Use ThreadPoolExecutor to run pipeline calls in parallel
with ThreadPoolExecutor(max_workers=len(run_data)) as executor: