1
0
Fork 0
mirror of https://github.com/mealie-recipes/mealie.git synced 2025-08-02 20:15:24 +02:00

Feature/import keywords as tags (#1170)

* feat: import original keywords as tags

* remove cached env

* Update frontend api types

* fix: Issues with previous tag scraping implementation

* Update category handling in backend

* Update backend tests to include group_id

* Correct type check

* Update create-url interface

* Improve tag cleaner list support

* remove builtin name shadowing

* update type annotations

* test include tags scraper

* implement scaper context for optional data

* readd cache venv

* use continue instead of break

* remove test callback

Co-authored-by: Miroito <alban.vachette@gmail.com>
This commit is contained in:
Hayden 2022-04-23 12:23:12 -08:00 committed by GitHub
parent 75c535fb2e
commit c696dee320
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
27 changed files with 281 additions and 61 deletions

View file

@ -13,7 +13,7 @@ from slugify import slugify
from mealie.schema.recipe.recipe import RecipeCategory
from mealie.services.recipe.recipe_data_service import RecipeDataService
from mealie.services.scraper.scraper_strategies import RecipeScraperOpenGraph
from tests import utils
from tests import data, utils
from tests.utils.app_routes import AppRoutes
from tests.utils.factories import random_string
from tests.utils.fixture_schemas import TestUser
@ -83,12 +83,77 @@ def test_create_by_url(
api_client.delete(api_routes.recipes_recipe_slug(recipe_data.expected_slug), headers=unique_user.token)
response = api_client.post(api_routes.recipes_create_url, json={"url": recipe_data.url}, headers=unique_user.token)
response = api_client.post(
api_routes.recipes_create_url, json={"url": recipe_data.url, "include_tags": False}, headers=unique_user.token
)
assert response.status_code == 201
assert json.loads(response.text) == recipe_data.expected_slug
def test_create_by_url_with_tags(
api_client: TestClient,
api_routes: AppRoutes,
unique_user: TestUser,
monkeypatch: MonkeyPatch,
):
html_file = data.html_nutty_umami_noodles_with_scallion_brown_butter_and_snow_peas_recipe
# Override init function for AbstractScraper to use the test html instead of calling the url
monkeypatch.setattr(
AbstractScraper,
"__init__",
get_init(html_file),
)
# Override the get_html method of the RecipeScraperOpenGraph to return the test html
monkeypatch.setattr(
RecipeScraperOpenGraph,
"get_html",
open_graph_override(html_file.read_text()),
)
# Skip image downloader
monkeypatch.setattr(
RecipeDataService,
"scrape_image",
lambda *_: "TEST_IMAGE",
)
response = api_client.post(
api_routes.recipes_create_url,
json={"url": "https://google.com", "include_tags": True}, # URL Doesn't matter
headers=unique_user.token,
)
assert response.status_code == 201
slug = "nutty-umami-noodles-with-scallion-brown-butter-and-snow-peas"
# Get the recipe
response = api_client.get(api_routes.recipes_recipe_slug(slug), headers=unique_user.token)
assert response.status_code == 200
# Verifiy the tags are present
expected_tags = {
"sauté",
"pea",
"noodle",
"udon noodle",
"ramen noodle",
"dinner",
"main",
"vegetarian",
"easy",
"quick",
"weeknight meals",
"web",
}
recipe = json.loads(response.text)
assert len(recipe["tags"]) == len(expected_tags)
for tag in recipe["tags"]:
assert tag["name"] in expected_tags
@pytest.mark.parametrize("recipe_data", recipe_test_data)
def test_read_update(
api_client: TestClient,