mirror of
https://github.com/mealie-recipes/mealie.git
synced 2025-05-24 01:12:54 -04:00
* add httpx depedency for async http requests * rework scraper strategies to download recipe html asynchronously * rework recipe_data_service to download recipe images asynchronously * fix recipe_parser test, so it can use async results * fix bulk import so that it also works with async scraper * fix broken recipe_parser tests * Fix issues found by scanners * Add additional checks for ingredient and instruction count in test_create_by_url * Revert changes in test recipe_data Since we are checking ingredients and instructions in test_create_url now, these would fail with the stored html of recipe data * Add explicit type annotation in recipe_data_service.largest_content_len * Fix typo in annotation
27 lines
1.0 KiB
Python
27 lines
1.0 KiB
Python
import pytest
|
|
|
|
from mealie.services.scraper import scraper
|
|
from tests.utils.recipe_data import RecipeSiteTestCase, get_recipe_test_cases
|
|
|
|
test_cases = get_recipe_test_cases()
|
|
|
|
"""
|
|
|
|
These tests are skipped by default and only really used when troubleshooting the parser
|
|
directly. If you are working on improve the parser you can add test cases to the `get_recipe_test_cases` function
|
|
and then use this test case by removing the `@pytest.mark.skip` and than testing your results.
|
|
|
|
"""
|
|
|
|
|
|
@pytest.mark.skipif(True, reason="Long Running API Test - manually run when updating the parser")
|
|
@pytest.mark.parametrize("recipe_test_data", test_cases)
|
|
@pytest.mark.asyncio
|
|
async def test_recipe_parser(recipe_test_data: RecipeSiteTestCase):
|
|
recipe, _ = await scraper.create_from_url(recipe_test_data.url)
|
|
|
|
assert recipe.slug == recipe_test_data.expected_slug
|
|
assert len(recipe.recipe_instructions) == recipe_test_data.num_steps
|
|
assert len(recipe.recipe_ingredient) == recipe_test_data.num_ingredients
|
|
assert recipe.org_url == recipe_test_data.url
|