From c504860b69dca344aae1d032d2db105eab9006ea Mon Sep 17 00:00:00 2001 From: Chlupaty Date: Wed, 11 Mar 2026 21:28:29 +0100 Subject: [PATCH] Add mypy and fix xlsx parsing --- pyproject.toml | 12 ++++-------- src/beaky/scanner/scanner.py | 23 +++++++---------------- 2 files changed, 11 insertions(+), 24 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3b4654e..98490e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,20 +9,16 @@ description = "Scan tickets and decide" requires-python = ">=3.12" dependencies = [ "pillow==12.1.1", -<<<<<<< HEAD "pydantic==2.12.5", - "pandas==3.0.1" -======= - "openpyxl>=3.1.0", - "pydantic==2.12.5" ->>>>>>> ec872d0 (Implement xlsx parsing) + # "pandas==3.0.1", +# "openpyxl>=3.1.0", ] [project.optional-dependencies] dev = [ "pytest>=9.0.2", "ruff==0.15.5", - "playwright==1.58.0" # only dev because it cant be installed in a pipeline, just locally + # "playwright==1.58.0" # only dev because it cant be installed in a pipeline, just locally ] [project.scripts] @@ -30,7 +26,7 @@ beaky = "beaky.cli:main" [tool.ruff] -line-length = 130 +line-length = 120 lint.select = ["E", "F", "I"] [tool.mypy] diff --git a/src/beaky/scanner/scanner.py b/src/beaky/scanner/scanner.py index 1e82cf7..770f51a 100644 --- a/src/beaky/scanner/scanner.py +++ b/src/beaky/scanner/scanner.py @@ -1,9 +1,8 @@ from datetime import datetime -from typing import List, Optional, Iterator, Union - -from pydantic.dataclasses import dataclass +from typing import Iterator, List, Optional from openpyxl import load_workbook +from pydantic.dataclasses import dataclass from beaky.config import Config from beaky.datamodels.scan import Scan @@ -41,16 +40,7 @@ class Link: class Links: - """Loads Link objects from an Excel file (.xlsx). - - Usage: - l = Links(path_to_xlsx) - links = l.ret_links() # returns list[Link] - for link in l: ... - """ - - def __init__(self, path: Union[str, Config]): - # Accept either a raw path string or a Config with .path attribute + def __init__(self, path: str | Config): if isinstance(path, Config): self._path = path.path else: @@ -83,7 +73,7 @@ class Links: header_map = { (str(h).strip().lower() if h is not None else ""): i for i, h in enumerate(header) } # Helper to parse date-like values - def parse_date(v) -> Optional[datetime]: + def parse_date(v: None | datetime) -> Optional[datetime]: if v is None: return None if isinstance(v, datetime): @@ -107,7 +97,7 @@ class Links: # Find the column indices we care about id_idx = header_map.get("id") - url_idx = header_map.get("link") or header_map.get("url") + url_idx = header_map.get("link") date_idx = header_map.get("date") if id_idx is None or url_idx is None: @@ -124,7 +114,8 @@ class Links: # skip empty rows continue - link = Link(id=str(raw_id).strip() if raw_id is not None else "", url=str(raw_url).strip() if raw_url is not None else "", date=parse_date(raw_date)) + link = Link(id=str(raw_id).strip() if raw_id is not None else "", + url=str(raw_url).strip() if raw_url is not None else "", date=parse_date(raw_date)) self.links.append(link) except Exception: # Skip problematic rows silently