1
0
Fork 0
mirror of https://github.com/mealie-recipes/mealie.git synced 2025-08-02 20:15:24 +02:00

fix(deps): update dependency recipe-scrapers to v14.57.0 (#3804)

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Michael Genson <71845777+michael-genson@users.noreply.github.com>
Co-authored-by: Kuchenpirat <24235032+Kuchenpirat@users.noreply.github.com>
This commit is contained in:
renovate[bot] 2024-07-01 19:39:22 +00:00 committed by GitHub
parent aabab73310
commit dc64484b8e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 52 additions and 64 deletions

View file

@ -1,3 +1,4 @@
import inspect
import json
import os
import random
@ -8,14 +9,17 @@ from typing import Generator
from uuid import uuid4
from zipfile import ZipFile
from httpx import Response
import pytest
from bs4 import BeautifulSoup
from fastapi.testclient import TestClient
from pytest import MonkeyPatch
from recipe_scrapers._abstract import AbstractScraper
from recipe_scrapers._schemaorg import SchemaOrg
from recipe_scrapers.plugins import SchemaOrgFillPlugin
from slugify import slugify
from mealie.pkgs.safehttp.transport import AsyncSafeTransport
from mealie.repos.repository_factory import AllRepositories
from mealie.schema.recipe.recipe import Recipe, RecipeCategory, RecipeSummary, RecipeTag
from mealie.schema.recipe.recipe_category import CategorySave, TagSave
@ -72,6 +76,14 @@ def get_init(html_path: Path):
self.url = url
self.schema = SchemaOrg(page_data)
# attach the SchemaOrgFill plugin
if not hasattr(self.__class__, "plugins_initialized"):
for name, _ in inspect.getmembers(self, inspect.ismethod): # type: ignore
current_method = getattr(self.__class__, name)
current_method = SchemaOrgFillPlugin.run(current_method)
setattr(self.__class__, name, current_method)
setattr(self.__class__, "plugins_initialized", True)
return init_override
@ -102,6 +114,16 @@ def test_create_by_url(
"get_html",
open_graph_override(recipe_data.html_file.read_text()),
)
# Skip AsyncSafeTransport requests
async def return_empty_response(*args, **kwargs):
return Response(200, content=b"")
monkeypatch.setattr(
AsyncSafeTransport,
"handle_async_request",
return_empty_response,
)
# Skip image downloader
monkeypatch.setattr(
RecipeDataService,
@ -112,7 +134,9 @@ def test_create_by_url(
api_client.delete(api_routes.recipes_slug(recipe_data.expected_slug), headers=unique_user.token)
response = api_client.post(
api_routes.recipes_create_url, json={"url": recipe_data.url, "include_tags": False}, headers=unique_user.token
api_routes.recipes_create_url,
json={"url": recipe_data.url, "include_tags": recipe_data.include_tags},
headers=unique_user.token,
)
assert response.status_code == 201
@ -128,67 +152,13 @@ def test_create_by_url(
assert len(recipe_dict["recipeInstructions"]) == recipe_data.num_steps
assert len(recipe_dict["recipeIngredient"]) == recipe_data.num_ingredients
if not recipe_data.include_tags:
return
def test_create_by_url_with_tags(
api_client: TestClient,
unique_user: TestUser,
monkeypatch: MonkeyPatch,
):
html_file = data.html_nutty_umami_noodles_with_scallion_brown_butter_and_snow_peas_recipe
expected_tags = recipe_data.expected_tags or set()
assert len(recipe_dict["tags"]) == len(expected_tags)
# Override init function for AbstractScraper to use the test html instead of calling the url
monkeypatch.setattr(
AbstractScraper,
"__init__",
get_init(html_file),
)
# Override the get_html method of all scraper strategies to return the test html
for scraper_cls in DEFAULT_SCRAPER_STRATEGIES:
monkeypatch.setattr(
scraper_cls,
"get_html",
open_graph_override(html_file.read_text()),
)
# Skip image downloader
monkeypatch.setattr(
RecipeDataService,
"scrape_image",
lambda *_: "TEST_IMAGE",
)
response = api_client.post(
api_routes.recipes_create_url,
json={"url": "https://google.com", "include_tags": True}, # URL Doesn't matter
headers=unique_user.token,
)
assert response.status_code == 201
slug = "nutty-umami-noodles-with-scallion-brown-butter-and-snow-peas"
# Get the recipe
response = api_client.get(api_routes.recipes_slug(slug), headers=unique_user.token)
assert response.status_code == 200
# Verifiy the tags are present and title cased
expected_tags = {
"Sauté",
"Pea",
"Noodle",
"Udon Noodle",
"Ramen Noodle",
"Dinner",
"Main",
"Vegetarian",
"Easy",
"Quick",
"Weeknight Meals",
"Web",
}
recipe = json.loads(response.text)
assert len(recipe["tags"]) == len(expected_tags)
for tag in recipe["tags"]:
for tag in recipe_dict["tags"]:
assert tag["name"] in expected_tags