summaryrefslogtreecommitdiffstatshomepage
path: root/tests/plugins/scrapers/test_anchira.py
blob: 05549576b5ae6ebfe0fb2a1d7c49dbc5c861b7ab (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import os
from datetime import date
from zipfile import ZipFile

import pytest

import hircine.enums as enums
from hircine.plugins.scrapers.anchira import AnchiraYamlScraper
from hircine.scraper.types import (
    URL,
    Artist,
    Censorship,
    Circle,
    Date,
    Direction,
    Language,
    Rating,
    Tag,
    Title,
    World,
)


@pytest.fixture
def archive_file(tmpdir):
    file = os.path.join(tmpdir, "archive.zip")

    data = """
---
Source: https://anchira.to/g/1/1
URL: https://example.com
Title: Example Title
Artist:
- Example
Circle:
- Example
Parody:
- Original Work
- Example
Magazine: []
Tags:
- Unlimited
- Book
Released: 1574394240
Pages: 102
...
    """

    with ZipFile(file, "x") as ziph:
        ziph.writestr("info.yaml", data)

    yield file


def test_does_scrape(monkeypatch, archive_file, gen_comic):
    comic = next(gen_comic)
    comic.archive.path = archive_file

    scraper = AnchiraYamlScraper(comic)

    assert scraper.is_available
    assert scraper.source == AnchiraYamlScraper.source
    assert scraper.name == "anchira.to info.yaml"

    assert set(scraper.collect()) == set(
        [
            Artist(name="Example"),
            Circle(name="Example"),
            Date(value=date(2019, 11, 22)),
            Direction(value=enums.Direction.RIGHT_TO_LEFT),
            Language(value=enums.Language.EN),
            Tag(namespace="none", tag="Book"),
            Title(value="Example Title"),
            URL(value="https://example.com"),
            World(name="Example"),
        ]
    )


def test_does_not_scrape_on_error(tmpdir, gen_comic):
    comic = next(gen_comic)
    comic.archive.path = os.path.join(tmpdir, "nonexistent.zip")

    scraper = AnchiraYamlScraper(comic)

    assert scraper.data == {}
    assert not scraper.is_available


@pytest.mark.parametrize(
    "tag, parsed",
    [
        ("Hentai", Rating(value=enums.Rating.EXPLICIT)),
        ("Non-H", Rating(value=enums.Rating.QUESTIONABLE)),
        ("Ecchi", Rating(value=enums.Rating.QUESTIONABLE)),
        ("Uncensored", Censorship(value=enums.Censorship.NONE)),
    ],
    ids=[
        "hentai",
        "non-h",
        "ecchi",
        "uncensored",
    ],
)
def test_parses_tags(tag, parsed):
    scraper = AnchiraYamlScraper(None)
    assert scraper.parse_tag(tag) == parsed