crawl4ai/tests/async/test_parameters_and_options.py

117 lines
3.9 KiB
Python
Raw Normal View History

2024-09-03 01:27:00 +08:00
import os
import sys
import pytest
# Add the parent directory to the Python path
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
from crawl4ai.async_webcrawler import AsyncWebCrawler
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
@pytest.mark.asyncio
async def test_word_count_threshold():
async with AsyncWebCrawler(verbose=True) as crawler:
url = "https://www.nbcnews.com/business"
2025-01-13 19:19:58 +08:00
result_no_threshold = await crawler.arun(
url=url, word_count_threshold=0, bypass_cache=True
)
result_with_threshold = await crawler.arun(
url=url, word_count_threshold=50, bypass_cache=True
)
2024-09-03 01:27:00 +08:00
assert len(result_no_threshold.markdown) > len(result_with_threshold.markdown)
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
@pytest.mark.asyncio
async def test_css_selector():
async with AsyncWebCrawler(verbose=True) as crawler:
url = "https://www.nbcnews.com/business"
css_selector = "h1, h2, h3"
2025-01-13 19:19:58 +08:00
result = await crawler.arun(
url=url, css_selector=css_selector, bypass_cache=True
)
2024-09-03 01:27:00 +08:00
assert result.success
2025-01-13 19:19:58 +08:00
assert (
"<h1" in result.cleaned_html
or "<h2" in result.cleaned_html
or "<h3" in result.cleaned_html
)
2024-09-03 01:27:00 +08:00
@pytest.mark.asyncio
async def test_javascript_execution():
async with AsyncWebCrawler(verbose=True) as crawler:
url = "https://www.nbcnews.com/business"
# Crawl without JS
result_without_more = await crawler.arun(url=url, bypass_cache=True)
2025-01-13 19:19:58 +08:00
js_code = [
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
]
2024-09-03 01:27:00 +08:00
result_with_more = await crawler.arun(url=url, js=js_code, bypass_cache=True)
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
assert result_with_more.success
assert len(result_with_more.markdown) > len(result_without_more.markdown)
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
@pytest.mark.asyncio
async def test_screenshot():
async with AsyncWebCrawler(verbose=True) as crawler:
url = "https://www.nbcnews.com/business"
result = await crawler.arun(url=url, screenshot=True, bypass_cache=True)
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
assert result.success
assert result.screenshot
assert isinstance(result.screenshot, str) # Should be a base64 encoded string
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
@pytest.mark.asyncio
async def test_custom_user_agent():
async with AsyncWebCrawler(verbose=True) as crawler:
url = "https://www.nbcnews.com/business"
custom_user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Crawl4AI/1.0"
2025-01-13 19:19:58 +08:00
result = await crawler.arun(
url=url, user_agent=custom_user_agent, bypass_cache=True
)
2024-09-03 01:27:00 +08:00
assert result.success
# Note: We can't directly verify the user agent in the result, but we can check if the crawl was successful
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
@pytest.mark.asyncio
async def test_extract_media_and_links():
async with AsyncWebCrawler(verbose=True) as crawler:
url = "https://www.nbcnews.com/business"
result = await crawler.arun(url=url, bypass_cache=True)
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
assert result.success
assert result.media
assert isinstance(result.media, dict)
2025-01-13 19:19:58 +08:00
assert "images" in result.media
2024-09-03 01:27:00 +08:00
assert result.links
assert isinstance(result.links, dict)
2025-01-13 19:19:58 +08:00
assert "internal" in result.links and "external" in result.links
2024-09-03 01:27:00 +08:00
@pytest.mark.asyncio
async def test_metadata_extraction():
async with AsyncWebCrawler(verbose=True) as crawler:
url = "https://www.nbcnews.com/business"
result = await crawler.arun(url=url, bypass_cache=True)
2025-01-13 19:19:58 +08:00
2024-09-03 01:27:00 +08:00
assert result.success
assert result.metadata
assert isinstance(result.metadata, dict)
# Check for common metadata fields
2025-01-13 19:19:58 +08:00
assert any(
key in result.metadata for key in ["title", "description", "keywords"]
)
2024-09-03 01:27:00 +08:00
# Entry point for debugging
if __name__ == "__main__":
2025-01-13 19:19:58 +08:00
pytest.main([__file__, "-v"])