import os from datetime import date from zipfile import ZipFile import pytest import hircine.enums as enums from hircine.plugins.scrapers.anchira import AnchiraYamlScraper from hircine.scraper.types import ( URL, Artist, Censorship, Circle, Date, Direction, Language, Rating, Tag, Title, World, ) @pytest.fixture def archive_file(tmpdir): file = os.path.join(tmpdir, "archive.zip") data = """ --- Source: https://anchira.to/g/1/1 URL: https://example.com Title: Example Title Artist: - Example Circle: - Example Parody: - Original Work - Example Magazine: [] Tags: - Unlimited - Book Released: 1574394240 Pages: 102 ... """ with ZipFile(file, "x") as ziph: ziph.writestr("info.yaml", data) yield file def test_does_scrape(monkeypatch, archive_file, gen_comic): comic = next(gen_comic) comic.archive.path = archive_file scraper = AnchiraYamlScraper(comic) assert scraper.is_available assert scraper.source == AnchiraYamlScraper.source assert scraper.name == "anchira.to info.yaml" assert set(scraper.collect()) == set( [ Artist(name="Example"), Circle(name="Example"), Date(value=date(2019, 11, 22)), Direction(value=enums.Direction.RIGHT_TO_LEFT), Language(value=enums.Language.EN), Tag(namespace="none", tag="Book"), Title(value="Example Title"), URL(value="https://example.com"), World(name="Example"), ] ) def test_does_not_scrape_on_error(tmpdir, gen_comic): comic = next(gen_comic) comic.archive.path = os.path.join(tmpdir, "nonexistent.zip") scraper = AnchiraYamlScraper(comic) assert scraper.data == {} assert not scraper.is_available @pytest.mark.parametrize( "tag, parsed", [ ("Hentai", Rating(value=enums.Rating.EXPLICIT)), ("Non-H", Rating(value=enums.Rating.QUESTIONABLE)), ("Ecchi", Rating(value=enums.Rating.QUESTIONABLE)), ("Uncensored", Censorship(value=enums.Censorship.NONE)), ], ids=[ "hentai", "non-h", "ecchi", "uncensored", ], ) def test_parses_tags(tag, parsed): scraper = AnchiraYamlScraper(None) assert scraper.parse_tag(tag) == parsed