summaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--pyproject.toml4
-rw-r--r--src/hircine/api/inputs.py8
-rw-r--r--src/hircine/api/mutation/resolvers.py34
-rw-r--r--src/hircine/api/query/__init__.py18
-rw-r--r--src/hircine/api/query/resolvers.py2
-rw-r--r--src/hircine/db/ops.py10
-rw-r--r--src/hircine/scanner.py4
-rw-r--r--src/hircine/scraper/utils.py2
-rw-r--r--tests/api/test_comic.py8
-rw-r--r--tests/api/test_db.py8
-rw-r--r--tests/scrapers/test_scraper.py2
-rw-r--r--tests/scrapers/test_scraper_utils.py16
12 files changed, 60 insertions, 56 deletions
diff --git a/pyproject.toml b/pyproject.toml
index f4ff147..2a3dec1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -49,6 +49,7 @@ select = [
"W", # pycodestyle
"I", # isort
"UP", # pyupgrade
+ "A", # flake8-builtin
"B", # flake8-bugbear
"SIM", # flake8-simplify
"FURB" # refurb
@@ -56,6 +57,9 @@ select = [
ignore = ["B007", "SIM102", "SIM108"]
+[tool.ruff.lint.flake8-builtins]
+builtins-ignorelist = ["copyright", "filter", "hash", "id", "input"]
+
[tool.pytest.ini_options]
pythonpath = ["src"]
diff --git a/src/hircine/api/inputs.py b/src/hircine/api/inputs.py
index 1f0e08e..38e17e4 100644
--- a/src/hircine/api/inputs.py
+++ b/src/hircine/api/inputs.py
@@ -408,14 +408,14 @@ class ComicTagsUpsertInput(UpsertInputList):
have, missing = await ops.get_all_names(
ctx.session, model, names, options=model.load_full()
)
- dict = {}
+ data = {}
for item in have:
- dict[item.name] = (item, True)
+ data[item.name] = (item, True)
for item in missing:
- dict[item] = (model(name=item), False)
+ data[item] = (model(name=item), False)
- return dict
+ return data
remaining_ns, remaining_tags = zip(*missing)
diff --git a/src/hircine/api/mutation/resolvers.py b/src/hircine/api/mutation/resolvers.py
index 069669e..1a0cd45 100644
--- a/src/hircine/api/mutation/resolvers.py
+++ b/src/hircine/api/mutation/resolvers.py
@@ -84,26 +84,26 @@ def add(modelcls, post_add=None):
async with db.session() as s:
try:
- object = modelcls()
- ctx = MutationContext(input, object, s)
+ obj = modelcls()
+ ctx = MutationContext(input, obj, s)
async for field, value, _ in fetch_fields(input, ctx):
- setattr(object, field, value)
+ setattr(obj, field, value)
except APIException as e:
return e.graphql_error
- s.add(object)
+ s.add(obj)
await s.flush()
if post_add:
- returnval = await post_add(s, input, object)
+ returnval = await post_add(s, input, obj)
await s.commit()
if returnval:
return returnval
else:
- return AddSuccess(modelcls, object.id)
+ return AddSuccess(modelcls, obj.id)
return inner
@@ -118,16 +118,16 @@ async def post_add_comic(session, input, comic):
return AddComicSuccess(Comic, comic.id, has_remaining)
-def update_attr(object, field, value, mode):
+def update_attr(obj, field, value, mode):
if mode != UpdateMode.REPLACE and isinstance(value, list):
- attr = getattr(object, field)
+ attr = getattr(obj, field)
match mode:
case UpdateMode.ADD:
value.extend(attr)
case UpdateMode.REMOVE:
value = list(set(attr) - set(value))
- setattr(object, field, value)
+ setattr(obj, field, value)
async def _update(ids: List[int], modelcls, input, successcls):
@@ -143,19 +143,19 @@ async def _update(ids: List[int], modelcls, input, successcls):
if missing:
return IDNotFoundError(modelcls, missing.pop())
- for object in objects:
- s.add(object)
+ for obj in objects:
+ s.add(obj)
try:
- ctx = MutationContext(input, object, s, multiple=multiple)
+ ctx = MutationContext(input, obj, s, multiple=multiple)
async for field, value, mode in fetch_fields(input, ctx):
- update_attr(object, field, value, mode)
+ update_attr(obj, field, value, mode)
except APIException as e:
return e.graphql_error
- if isinstance(object, MixinModifyDates) and s.is_modified(object):
- object.updated_at = datetime.now(tz=timezone.utc)
+ if isinstance(obj, MixinModifyDates) and s.is_modified(obj):
+ obj.updated_at = datetime.now(tz=timezone.utc)
await s.commit()
@@ -183,8 +183,8 @@ def delete(modelcls, post_delete=None):
if missing:
return IDNotFoundError(modelcls, missing.pop())
- for object in objects:
- await s.delete(object)
+ for obj in objects:
+ await s.delete(obj)
await s.flush()
diff --git a/src/hircine/api/query/__init__.py b/src/hircine/api/query/__init__.py
index 9d81989..2e07c71 100644
--- a/src/hircine/api/query/__init__.py
+++ b/src/hircine/api/query/__init__.py
@@ -19,9 +19,9 @@ from hircine.api.types import (
)
from .resolvers import (
- all,
comic_scrapers,
comic_tags,
+ every,
scrape_comic,
single,
)
@@ -34,21 +34,21 @@ def query(resolver):
@strawberry.type
class Query:
archive: rp.ArchiveResponse = query(single(models.Archive, full=True))
- archives: FilterResult[Archive] = query(all(models.Archive))
+ archives: FilterResult[Archive] = query(every(models.Archive))
artist: rp.ArtistResponse = query(single(models.Artist))
- artists: FilterResult[Artist] = query(all(models.Artist))
+ artists: FilterResult[Artist] = query(every(models.Artist))
character: rp.CharacterResponse = query(single(models.Character))
- characters: FilterResult[Character] = query(all(models.Character))
+ characters: FilterResult[Character] = query(every(models.Character))
circle: rp.CircleResponse = query(single(models.Circle))
- circles: FilterResult[Circle] = query(all(models.Circle))
+ circles: FilterResult[Circle] = query(every(models.Circle))
comic: rp.ComicResponse = query(single(models.Comic, full=True))
comic_scrapers: List[ComicScraper] = query(comic_scrapers)
comic_tags: FilterResult[ComicTag] = query(comic_tags)
- comics: FilterResult[Comic] = query(all(models.Comic))
+ comics: FilterResult[Comic] = query(every(models.Comic))
namespace: rp.NamespaceResponse = query(single(models.Namespace))
- namespaces: FilterResult[Namespace] = query(all(models.Namespace))
+ namespaces: FilterResult[Namespace] = query(every(models.Namespace))
tag: rp.TagResponse = query(single(models.Tag, full=True))
- tags: FilterResult[Tag] = query(all(models.Tag))
+ tags: FilterResult[Tag] = query(every(models.Tag))
world: rp.WorldResponse = query(single(models.World))
- worlds: FilterResult[World] = query(all(models.World))
+ worlds: FilterResult[World] = query(every(models.World))
scrape_comic: rp.ScrapeComicResponse = query(scrape_comic)
diff --git a/src/hircine/api/query/resolvers.py b/src/hircine/api/query/resolvers.py
index a18e63e..6609cc1 100644
--- a/src/hircine/api/query/resolvers.py
+++ b/src/hircine/api/query/resolvers.py
@@ -50,7 +50,7 @@ def single(model, full=False):
return inner
-def all(model):
+def every(model):
typecls = getattr(types, model.__name__)
filtercls = getattr(filters, f"{model.__name__}Filter")
sortcls = getattr(sort, f"{model.__name__}Sort")
diff --git a/src/hircine/db/ops.py b/src/hircine/db/ops.py
index e61c5ad..91c830d 100644
--- a/src/hircine/db/ops.py
+++ b/src/hircine/db/ops.py
@@ -116,9 +116,9 @@ def lookup_identity(session, model, ids):
satisfied = set()
for id in ids:
- object = session.identity_map.get(identity_key(model, id), None)
- if object is not None:
- objects.append(object)
+ obj = session.identity_map.get(identity_key(model, id), None)
+ if obj is not None:
+ objects.append(obj)
satisfied.add(id)
return objects, satisfied
@@ -143,7 +143,7 @@ async def get_all(session, model, ids, options=None, use_identity_map=False):
objects += (await session.scalars(sql)).unique().all()
- fetched_ids = [object.id for object in objects]
+ fetched_ids = [obj.id for obj in objects]
missing = set(ids) - set(fetched_ids)
return objects, missing
@@ -159,7 +159,7 @@ async def get_all_names(session, model, names, options=None):
objects = (await session.scalars(sql)).unique().all()
- fetched_names = [object.name for object in objects]
+ fetched_names = [obj.name for obj in objects]
missing = set(names) - set(fetched_names)
return objects, missing
diff --git a/src/hircine/scanner.py b/src/hircine/scanner.py
index d1077ed..d2b5cd3 100644
--- a/src/hircine/scanner.py
+++ b/src/hircine/scanner.py
@@ -56,7 +56,7 @@ class Registry:
@property
def duplicates(self):
- for hash, value in self.marked.items():
+ for _, value in self.marked.items():
if len(value) > 1:
yield value
@@ -302,7 +302,7 @@ class Scanner:
def process_member(self, input):
path, name = input
- with ZipFile(path, mode="r") as zip, zip.open(name, mode="r") as member:
+ with ZipFile(path, mode="r") as ziph, ziph.open(name, mode="r") as member:
_, ext = os.path.splitext(name)
digest = file_digest(member, blake3).digest()
diff --git a/src/hircine/scraper/utils.py b/src/hircine/scraper/utils.py
index 5a383a2..4e6e9f1 100644
--- a/src/hircine/scraper/utils.py
+++ b/src/hircine/scraper/utils.py
@@ -57,5 +57,5 @@ def open_archive_file(archive, member, check_sidecar=True):
return
- with ZipFile(archive.path, "r") as zip, zip.open(member, "r") as file:
+ with ZipFile(archive.path, "r") as ziph, ziph.open(member, "r") as file:
yield file
diff --git a/tests/api/test_comic.py b/tests/api/test_comic.py
index 8555a5a..dcc5822 100644
--- a/tests/api/test_comic.py
+++ b/tests/api/test_comic.py
@@ -1097,7 +1097,7 @@ async def test_upsert_comic_tags_uses_existing(upsert_comics, empty_comic):
@pytest.mark.parametrize(
- "key,list",
+ "key,items",
[
("artists", ["arty", "farty"]),
("tags", ["alien:medium", "human:tiny"]),
@@ -1116,11 +1116,11 @@ async def test_upsert_comic_tags_uses_existing(upsert_comics, empty_comic):
],
)
@pytest.mark.anyio
-async def test_upsert_comic_creates(upsert_comics, empty_comic, key, list):
+async def test_upsert_comic_creates(upsert_comics, empty_comic, key, items):
original_comic = await DB.add(empty_comic)
input = {
- key: {"names": list, "options": {"onMissing": "CREATE"}},
+ key: {"names": items, "options": {"onMissing": "CREATE"}},
}
response = Response(await upsert_comics(original_comic.id, input))
response.assert_is("UpsertSuccess")
@@ -1128,7 +1128,7 @@ async def test_upsert_comic_creates(upsert_comics, empty_comic, key, list):
comic = await DB.get(Comic, original_comic.id, full=True)
assert comic is not None
- assert set(list) == set([o.name for o in getattr(comic, key)])
+ assert set(items) == set([o.name for o in getattr(comic, key)])
@pytest.mark.anyio
diff --git a/tests/api/test_db.py b/tests/api/test_db.py
index 1405c23..b030035 100644
--- a/tests/api/test_db.py
+++ b/tests/api/test_db.py
@@ -67,8 +67,8 @@ async def test_models_retained_when_clearing_association(
comic = await DB.add(comic)
async with database.session() as s:
- object = await s.get(Comic, comic.id)
- setattr(object, key, [])
+ obj = await s.get(Comic, comic.id)
+ setattr(obj, key, [])
await s.commit()
assert await DB.get(assoccls, (comic.id, model.id)) is None
@@ -87,8 +87,8 @@ async def test_models_retained_when_clearing_comictag(empty_comic):
await DB.add(ct)
async with database.session() as s:
- object = await s.get(Comic, comic.id)
- object.tags = []
+ obj = await s.get(Comic, comic.id)
+ obj.tags = []
await s.commit()
assert await DB.get(ComicTag, (comic.id, ct.namespace_id, ct.tag_id)) is None
diff --git a/tests/scrapers/test_scraper.py b/tests/scrapers/test_scraper.py
index 8492425..d0cef7b 100644
--- a/tests/scrapers/test_scraper.py
+++ b/tests/scrapers/test_scraper.py
@@ -20,7 +20,7 @@ class NoneScraper(Scraper):
class WarningScraper(Scraper):
is_available = True
- def warn(self, str):
+ def warn(self, msg):
raise ScrapeWarning("Invalid input")
def scrape(self):
diff --git a/tests/scrapers/test_scraper_utils.py b/tests/scrapers/test_scraper_utils.py
index 4b02aad..30b9796 100644
--- a/tests/scrapers/test_scraper_utils.py
+++ b/tests/scrapers/test_scraper_utils.py
@@ -8,23 +8,23 @@ from hircine.scraper.utils import open_archive_file, parse_dict
def test_parse_dict():
- dict = {
+ data = {
"scalar": "foo",
"list": ["bar", "baz"],
"dict": {"nested_scalar": "qux", "nested_list": ["plugh", "xyzzy"]},
}
- def id(type):
- return lambda item: f"{type}_{item}"
+ def annotate(tag):
+ return lambda item: f"{tag}_{item}"
parsers = {
- "scalar": id("scalar"),
- "list": id("list"),
- "dict": {"nested_scalar": id("scalar"), "nested_list": id("list")},
- "missing": id("missing"),
+ "scalar": annotate("scalar"),
+ "list": annotate("list"),
+ "dict": {"nested_scalar": annotate("scalar"), "nested_list": annotate("list")},
+ "missing": annotate("missing"),
}
- assert [f() for f in parse_dict(parsers, dict)] == [
+ assert [f() for f in parse_dict(parsers, data)] == [
"scalar_foo",
"list_bar",
"list_baz",