mirror of
https://github.com/guezoloic/millesima-ai-engine.git
synced 2026-03-28 18:03:47 +00:00
52
.github/workflows/python-app.yml
vendored
52
.github/workflows/python-app.yml
vendored
@@ -5,35 +5,41 @@ name: Python application
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ "main" ]
|
branches: ["main"]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ "main" ]
|
branches: ["main"]
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python 3.10
|
|
||||||
uses: actions/setup-python@v3
|
- name: Set up Python 3.10
|
||||||
with:
|
uses: actions/setup-python@v4
|
||||||
python-version: "3.10"
|
with:
|
||||||
- name: Install dependencies
|
python-version: "3.10"
|
||||||
run: |
|
|
||||||
python -m pip install --upgrade pip
|
- name: install dependencies
|
||||||
pip install flake8 pytest
|
run: |
|
||||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
python -m pip install --upgrade pip
|
||||||
- name: Lint with flake8
|
pip install ".[test,doc]"
|
||||||
run: |
|
|
||||||
# stop the build if there are Python syntax errors or undefined names
|
- name: Lint with flake8
|
||||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
run: |
|
||||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
- name: Test with pytest
|
|
||||||
run: |
|
- name: Test with pytest
|
||||||
pytest
|
run: pytest
|
||||||
|
|
||||||
|
- name: Deploy Doc
|
||||||
|
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||||
|
run: |
|
||||||
|
git config user.name github-actions
|
||||||
|
git config user.email github-actions@github.com
|
||||||
|
mkdocs gh-deploy --force
|
||||||
|
|||||||
1
docs/index.md
Normal file
1
docs/index.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Millesima
|
||||||
3
docs/scraper.md
Normal file
3
docs/scraper.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Scraper
|
||||||
|
|
||||||
|
::: scraper.Scraper
|
||||||
4
docs/scraperdata.md
Normal file
4
docs/scraperdata.md
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
|
||||||
|
# _ScraperData
|
||||||
|
|
||||||
|
::: scraper._ScraperData
|
||||||
20
main.py
20
main.py
@@ -1,20 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
from os import getcwd
|
|
||||||
from os.path import normpath, join
|
|
||||||
from sys import argv
|
|
||||||
from pandas import read_csv, DataFrame
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
if len(argv) != 2:
|
|
||||||
raise ValueError(f"{argv[0]} <filename.csv>")
|
|
||||||
|
|
||||||
path: str = normpath(join(getcwd(), argv[1]))
|
|
||||||
db: DataFrame = read_csv(path)
|
|
||||||
print(db.all())
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
try:
|
|
||||||
main()
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERREUR: {e}")
|
|
||||||
14
mkdocs.yml
Normal file
14
mkdocs.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
site_name: "Projet Millesima S6"
|
||||||
|
|
||||||
|
theme:
|
||||||
|
name: "material"
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- search
|
||||||
|
- mkdocstrings
|
||||||
|
|
||||||
|
markdown_extensions:
|
||||||
|
- admonition
|
||||||
|
- pymdownx.details
|
||||||
|
- pymdownx.superfences
|
||||||
|
- pymdownx.tabbed
|
||||||
12
pyproject.toml
Normal file
12
pyproject.toml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[project]
|
||||||
|
name = "projet-millesima-s6"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = ["requests==2.32.5", "beautifulsoup4==4.14.3", "pandas==2.3.3", "tqdm==4.67.3"]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
test = ["pytest==8.4.2", "requests-mock==1.12.1", "flake8==7.3.0"]
|
||||||
|
doc = ["mkdocs<2.0.0", "mkdocs-material==9.6.23", "mkdocstrings[python]"]
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools", "wheel"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
requests==2.32.5
|
|
||||||
requests-mock==1.12.1
|
|
||||||
beautifulsoup4==4.14.3
|
|
||||||
pytest==8.4.2
|
|
||||||
requests-mock==1.12.1
|
|
||||||
pandas==2.3.3
|
|
||||||
@@ -1,29 +1,78 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
from sys import argv
|
|
||||||
from typing import cast
|
|
||||||
from requests import HTTPError, Response, Session
|
|
||||||
from bs4 import BeautifulSoup, Tag
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
from io import SEEK_END, SEEK_SET, BufferedWriter
|
||||||
from json import JSONDecodeError, loads
|
from json import JSONDecodeError, loads
|
||||||
|
from os import makedirs
|
||||||
|
from os.path import dirname, exists, join, normpath, realpath
|
||||||
|
from pickle import UnpicklingError, dump, load
|
||||||
|
from sys import argv
|
||||||
|
from tqdm.std import tqdm
|
||||||
|
from typing import Any, Callable, Literal, TypeVar, cast
|
||||||
|
from bs4 import BeautifulSoup, Tag
|
||||||
|
from requests import HTTPError, Response, Session
|
||||||
|
|
||||||
|
_dir: str = dirname(realpath(__name__))
|
||||||
|
|
||||||
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
|
def _getcache(mode: Literal["rb", "wb"], fn: Callable[[Any], T]) -> T | None:
|
||||||
|
"""_summary_
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
_type_: _description_
|
||||||
|
"""
|
||||||
|
cache_dirname = normpath(join(_dir, ".cache"))
|
||||||
|
save_path = normpath(join(cache_dirname, "save"))
|
||||||
|
|
||||||
|
if not exists(cache_dirname):
|
||||||
|
makedirs(cache_dirname)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(save_path, mode) as f:
|
||||||
|
return fn(f)
|
||||||
|
except (FileNotFoundError, EOFError, UnpicklingError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def savestate(data: tuple[int, set[str]]) -> None:
|
||||||
|
def save(f: BufferedWriter) -> None:
|
||||||
|
_ = f.seek(0)
|
||||||
|
_ = f.truncate()
|
||||||
|
dump(data, f)
|
||||||
|
f.flush()
|
||||||
|
|
||||||
|
_getcache("wb", save)
|
||||||
|
|
||||||
|
|
||||||
|
def loadstate() -> tuple[int, set[str]] | None:
|
||||||
|
return _getcache("rb", lambda f: load(f))
|
||||||
|
|
||||||
|
|
||||||
class _ScraperData:
|
class _ScraperData:
|
||||||
"""_summary_"""
|
"""
|
||||||
|
Conteneur de données spécialisé pour extraire les informations des dictionnaires JSON.
|
||||||
|
|
||||||
|
Cette classe agit comme une interface simplifiée au-dessus du dictionnaire brut
|
||||||
|
renvoyé par la balise __NEXT_DATA__ du site Millesima.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, data: dict[str, object]) -> None:
|
def __init__(self, data: dict[str, object]) -> None:
|
||||||
"""_summary_
|
"""
|
||||||
|
Initialise le conteneur avec un dictionnaire de données.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
data (dict[str, object]): _description_
|
data (dict[str, object]): Le dictionnaire JSON brut extrait de la page.
|
||||||
"""
|
"""
|
||||||
self._data: dict[str, object] = data
|
self._data: dict[str, object] = data
|
||||||
|
|
||||||
def _getcontent(self) -> dict[str, object] | None:
|
def _getcontent(self) -> dict[str, object] | None:
|
||||||
"""_summary_
|
"""
|
||||||
|
Navigue dans l'arborescence Redux pour atteindre le contenu du produit.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict[str, object]: _description_
|
dict[str, object] | None: Le dictionnaire du produit ou None si la structure diffère.
|
||||||
"""
|
"""
|
||||||
current_data: dict[str, object] = self._data
|
current_data: dict[str, object] = self._data
|
||||||
for key in ["initialReduxState", "product", "content"]:
|
for key in ["initialReduxState", "product", "content"]:
|
||||||
@@ -35,10 +84,11 @@ class _ScraperData:
|
|||||||
return current_data
|
return current_data
|
||||||
|
|
||||||
def _getattributes(self) -> dict[str, object] | None:
|
def _getattributes(self) -> dict[str, object] | None:
|
||||||
"""_summary_
|
"""
|
||||||
|
Extrait les attributs techniques (notes, appellations, etc.) du produit.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict[str, object]: _description_
|
dict[str, object] | None: Les attributs du vin ou None.
|
||||||
"""
|
"""
|
||||||
current_data: object = self._getcontent()
|
current_data: object = self._getcontent()
|
||||||
if current_data is None:
|
if current_data is None:
|
||||||
@@ -47,9 +97,13 @@ class _ScraperData:
|
|||||||
|
|
||||||
def prix(self) -> float | None:
|
def prix(self) -> float | None:
|
||||||
"""
|
"""
|
||||||
Retourne le prix unitaire d'une bouteille (75cl).
|
Calcule le prix unitaire d'une bouteille (standardisée à 75cl).
|
||||||
|
|
||||||
Si aucun prix n'est disponible, retourne None.
|
Le site vend souvent par caisses (6, 12 bouteilles) ou formats (Magnum).
|
||||||
|
Cette méthode normalise le prix pour obtenir celui d'une seule unité.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float | None: Le prix calculé arrondi à 2 décimales, ou None.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
content = self._getcontent()
|
content = self._getcontent()
|
||||||
@@ -91,13 +145,13 @@ class _ScraperData:
|
|||||||
return prix_calcule
|
return prix_calcule
|
||||||
|
|
||||||
def appellation(self) -> str | None:
|
def appellation(self) -> str | None:
|
||||||
"""_summary_
|
"""
|
||||||
|
Extrait le nom de l'appellation du vin.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
str: _description_
|
str | None: Le nom (ex: 'Pauillac') ou None.
|
||||||
"""
|
"""
|
||||||
attrs: dict[str, object] | None = self._getattributes()
|
attrs: dict[str, object] | None = self._getattributes()
|
||||||
|
|
||||||
if attrs is not None:
|
if attrs is not None:
|
||||||
app_dict: object | None = attrs.get("appellation")
|
app_dict: object | None = attrs.get("appellation")
|
||||||
if isinstance(app_dict, dict):
|
if isinstance(app_dict, dict):
|
||||||
@@ -105,13 +159,16 @@ class _ScraperData:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def _getcritiques(self, name: str) -> str | None:
|
def _getcritiques(self, name: str) -> str | None:
|
||||||
"""_summary_
|
"""
|
||||||
|
Méthode générique pour parser les notes des critiques (Parker, Suckling, etc.).
|
||||||
|
|
||||||
|
Gère les notes simples ("95") et les plages de notes ("95-97") en faisant la moyenne.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
name (str): _description_
|
name (str): La clé de l'attribut dans le JSON (ex: 'note_rp').
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
str | None: _description_
|
str | None: La note formatée en chaîne de caractères ou None.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
current_value: dict[str, object] | None = self._getattributes()
|
current_value: dict[str, object] | None = self._getattributes()
|
||||||
@@ -130,52 +187,72 @@ class _ScraperData:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def parker(self) -> str | None:
|
def parker(self) -> str | None:
|
||||||
|
"""Note Robert Parker."""
|
||||||
return self._getcritiques("note_rp")
|
return self._getcritiques("note_rp")
|
||||||
|
|
||||||
def robinson(self) -> str | None:
|
def robinson(self) -> str | None:
|
||||||
|
"""Note Jancis Robinson."""
|
||||||
return self._getcritiques("note_jr")
|
return self._getcritiques("note_jr")
|
||||||
|
|
||||||
def suckling(self) -> str | None:
|
def suckling(self) -> str | None:
|
||||||
|
"""Note James Suckling."""
|
||||||
return self._getcritiques("note_js")
|
return self._getcritiques("note_js")
|
||||||
|
|
||||||
def getdata(self) -> dict[str, object]:
|
def getdata(self) -> dict[str, object]:
|
||||||
|
"""Retourne le dictionnaire de données complet."""
|
||||||
return self._data
|
return self._data
|
||||||
|
|
||||||
def informations(self) -> str:
|
def informations(self) -> str:
|
||||||
"""
|
"""
|
||||||
Retourne toutes les informations sous la forme :
|
Agrège les données clés pour l'export CSV.
|
||||||
"Appelation,Parker,J.Robinson,J.Suckling,Prix"
|
|
||||||
|
Returns:
|
||||||
|
str: Ligne formatée : "Appellation,Parker,Robinson,Suckling,Prix".
|
||||||
"""
|
"""
|
||||||
|
|
||||||
appellation = self.appellation()
|
appellation = self.appellation()
|
||||||
parker = self.parker()
|
parker = self.parker()
|
||||||
robinson = self.robinson()
|
robinson = self.robinson()
|
||||||
suckling = self.suckling()
|
suckling = self.suckling()
|
||||||
try:
|
prix = self.prix()
|
||||||
prix = self.prix()
|
|
||||||
except ValueError:
|
|
||||||
prix = None
|
|
||||||
|
|
||||||
return f"{appellation},{parker},{robinson},{suckling},{prix}"
|
return f"{appellation},{parker},{robinson},{suckling},{prix}"
|
||||||
|
|
||||||
|
|
||||||
class Scraper:
|
class Scraper:
|
||||||
"""
|
"""
|
||||||
Scraper est une classe qui permet de gerer
|
Client HTTP optimisé pour le scraping de millesima.fr.
|
||||||
de façon dynamique des requetes uniquement
|
|
||||||
sur le serveur https de Millesima
|
Gère la session persistante, les headers de navigation et un cache double
|
||||||
|
pour optimiser les performances et la discrétion.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
"""
|
"""
|
||||||
Initialise la session de scraping.
|
Initialise l'infrastructure de navigation:
|
||||||
|
|
||||||
|
- créer une session pour éviter de faire un handshake pour chaque requête
|
||||||
|
- ajout d'un header pour éviter le blocage de l'accès au site
|
||||||
|
- ajout d'un système de cache
|
||||||
"""
|
"""
|
||||||
self._url: str = "https://www.millesima.fr/"
|
self._url: str = "https://www.millesima.fr/"
|
||||||
# Très utile pour éviter de renvoyer toujours les mêmes handshake
|
# Très utile pour éviter de renvoyer toujours les mêmes handshake
|
||||||
# TCP et d'avoir toujours une connexion constante avec le server
|
# TCP et d'avoir toujours une connexion constante avec le server
|
||||||
self._session: Session = Session()
|
self._session: Session = Session()
|
||||||
|
# Crée une "fausse carte d'identité" pour éviter que le site nous
|
||||||
|
# bloque car on serait des robots
|
||||||
|
self._session.headers.update(
|
||||||
|
{
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
|
||||||
|
AppleWebKit/537.36 (KHTML, like Gecko) \
|
||||||
|
Chrome/122.0.0.0 Safari/537.36",
|
||||||
|
"Accept-Language": "fr-FR,fr;q=0.9,en;q=0.8",
|
||||||
|
}
|
||||||
|
)
|
||||||
# Système de cache pour éviter de solliciter le serveur inutilement
|
# Système de cache pour éviter de solliciter le serveur inutilement
|
||||||
|
# utilise pour _request
|
||||||
self._latest_request: tuple[(str, Response)] | None = None
|
self._latest_request: tuple[(str, Response)] | None = None
|
||||||
|
# utilise pour getsoup
|
||||||
self._latest_soups: OrderedDict[str, BeautifulSoup] = OrderedDict[
|
self._latest_soups: OrderedDict[str, BeautifulSoup] = OrderedDict[
|
||||||
str, BeautifulSoup
|
str, BeautifulSoup
|
||||||
]()
|
]()
|
||||||
@@ -190,11 +267,12 @@ class Scraper:
|
|||||||
Returns:
|
Returns:
|
||||||
Response: L'objet réponse de la requête.
|
Response: L'objet réponse de la requête.
|
||||||
|
|
||||||
Raise:
|
Raises:
|
||||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||||
"""
|
"""
|
||||||
target_url: str = self._url + subdir.lstrip("/")
|
target_url: str = self._url + subdir.lstrip("/")
|
||||||
response: Response = self._session.get(url=target_url, timeout=10)
|
# envoyer une requête GET sur la page si erreur, renvoie un raise
|
||||||
|
response: Response = self._session.get(url=target_url, timeout=30)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@@ -210,7 +288,7 @@ class Scraper:
|
|||||||
Returns:
|
Returns:
|
||||||
Response: L'objet réponse (cache ou nouvelle requête).
|
Response: L'objet réponse (cache ou nouvelle requête).
|
||||||
|
|
||||||
Raise:
|
Raises:
|
||||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -240,7 +318,7 @@ class Scraper:
|
|||||||
Returns:
|
Returns:
|
||||||
BeautifulSoup: L'objet parsé pour extraction de données.
|
BeautifulSoup: L'objet parsé pour extraction de données.
|
||||||
|
|
||||||
Raise:
|
Raises:
|
||||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -261,23 +339,20 @@ class Scraper:
|
|||||||
def getjsondata(self, subdir: str, id: str = "__NEXT_DATA__") -> _ScraperData:
|
def getjsondata(self, subdir: str, id: str = "__NEXT_DATA__") -> _ScraperData:
|
||||||
"""
|
"""
|
||||||
Extrait les données JSON contenues dans la balise __NEXT_DATA__ du site.
|
Extrait les données JSON contenues dans la balise __NEXT_DATA__ du site.
|
||||||
Beaucoup de sites modernes (Next.js) stockent leur état initial dans
|
|
||||||
une balise <script> pour l'hydratation côté client.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
subdir (str): Le chemin de la page.
|
subdir (str): Le chemin de la page.
|
||||||
id (str, optional): L'identifiant de la balise script (par défaut __NEXT_DATA__).
|
id (str, optional): L'identifiant de la balise script.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
HTTPError: Soulevée par `getresponse` si le serveur renvoie un code d'erreur (4xx, 5xx).
|
HTTPError: Erreur renvoyée par le serveur (4xx, 5xx).
|
||||||
JSONDecodeError: Soulevée par `loads` si le contenu de la balise n'est pas un JSON valide.
|
JSONDecodeError: Si le contenu de la balise n'est pas un JSON valide.
|
||||||
ValueError: Soulevée manuellement si l'une des clés attendues (props, pageProps, etc.)
|
ValueError: Si les clés 'props' ou 'pageProps' sont absentes.
|
||||||
est absente de la structure JSON.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict[str, object]: Un dictionnaire contenant les données utiles
|
_ScraperData: Instance contenant les données extraites.
|
||||||
ou un dictionnaire vide en cas d'erreur.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
soup: BeautifulSoup = self.getsoup(subdir)
|
soup: BeautifulSoup = self.getsoup(subdir)
|
||||||
script: Tag | None = soup.find("script", id=id)
|
script: Tag | None = soup.find("script", id=id)
|
||||||
|
|
||||||
@@ -294,74 +369,116 @@ class Scraper:
|
|||||||
|
|
||||||
return _ScraperData(cast(dict[str, object], current_data))
|
return _ScraperData(cast(dict[str, object], current_data))
|
||||||
|
|
||||||
def _geturlproductslist(self, subdir: str):
|
def _geturlproductslist(self, subdir: str) -> list[dict[str, Any]] | None:
|
||||||
"""_summary_
|
"""
|
||||||
|
Récupère la liste des produits d'une page de catégorie.
|
||||||
Args:
|
|
||||||
subdir (str): _description_
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
_type_: _description_
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
data: dict[str, object] = self.getjsondata(subdir).getdata()
|
data: dict[str, object] = self.getjsondata(subdir).getdata()
|
||||||
|
|
||||||
for element in ["initialReduxState", "categ", "content"]:
|
for element in ["initialReduxState", "categ", "content"]:
|
||||||
data: dict[str, object] = cast(dict[str, object], data.get(element))
|
data = cast(dict[str, object], data.get(element))
|
||||||
if not isinstance(data, dict):
|
|
||||||
return None
|
products: list[dict[str, Any]] = cast(
|
||||||
|
list[dict[str, Any]], data.get("products")
|
||||||
|
)
|
||||||
|
|
||||||
products: list[str] = cast(list[str], data.get("products"))
|
|
||||||
if isinstance(products, list):
|
if isinstance(products, list):
|
||||||
return products
|
return products
|
||||||
|
|
||||||
except (JSONDecodeError, HTTPError):
|
except (JSONDecodeError, HTTPError):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def getvins(self, subdir: str, filename: str):
|
def _writevins(self, cache: set[str], product: dict[str, Any], f: Any) -> None:
|
||||||
"""_summary_
|
"""_summary_
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
subdir (str): _description_
|
cache (set[str]): _description_
|
||||||
filename (str): _description_
|
product (dict): _description_
|
||||||
|
f (Any): _description_
|
||||||
"""
|
"""
|
||||||
with open(filename, "a") as f:
|
if isinstance(product, dict):
|
||||||
cache: set[str] = set[str]()
|
link: Any | None = product.get("seoKeyword")
|
||||||
page = 0
|
if link and link not in cache:
|
||||||
_ = f.write("Appellation,Robert,Robinson,Suckling,Prix\n")
|
try:
|
||||||
|
infos = self.getjsondata(link).informations()
|
||||||
|
_ = f.write(infos + "\n")
|
||||||
|
cache.add(link)
|
||||||
|
except (JSONDecodeError, HTTPError) as e:
|
||||||
|
print(f"Erreur sur le produit {link}: {e}")
|
||||||
|
|
||||||
while True:
|
def getvins(self, subdir: str, filename: str, reset: bool = False) -> None:
|
||||||
page += 1
|
"""
|
||||||
products_list = self._geturlproductslist(f"{subdir}?page={page}")
|
Scrape toutes les pages d'une catégorie et sauvegarde en CSV.
|
||||||
|
|
||||||
if not products_list:
|
Args:
|
||||||
break
|
subdir (str): La catégorie (ex: '/vins-rouges').
|
||||||
|
filename (str): Nom du fichier de sortie (ex: 'vins.csv').
|
||||||
|
reset (bool): (Optionnel) pour réinitialiser le processus.
|
||||||
|
"""
|
||||||
|
# mode d'écriture fichier
|
||||||
|
mode: Literal["w", "a+"] = "w" if reset else "a+"
|
||||||
|
# titre
|
||||||
|
title: str = "Appellation,Robert,Robinson,Suckling,Prix\n"
|
||||||
|
# page du début
|
||||||
|
page: int = 1
|
||||||
|
# le set qui sert de cache
|
||||||
|
cache: set[str] = set[str]()
|
||||||
|
|
||||||
products_list_length = len(products_list)
|
custom_format = "{l_bar} {bar:20} {r_bar}"
|
||||||
for i, product in enumerate(products_list):
|
|
||||||
if not isinstance(product, dict):
|
|
||||||
continue
|
|
||||||
|
|
||||||
link = product.get("seoKeyword")
|
if not reset:
|
||||||
|
# appelle la fonction pour load le cache, si il existe
|
||||||
|
# pas, il utilise les variables de base sinon il override
|
||||||
|
# toute les variables pour continuer et pas recommencer le
|
||||||
|
# processus en entier.
|
||||||
|
serializable: tuple[int, set[str]] | None = loadstate()
|
||||||
|
if isinstance(serializable, tuple):
|
||||||
|
# override la page et le cache
|
||||||
|
page, cache = serializable
|
||||||
|
try:
|
||||||
|
with open(filename, mode) as f:
|
||||||
|
# check si le titre est bien présent au début du buffer
|
||||||
|
# sinon il l'ecrit, petit bug potentiel, a+ ecrit tout le
|
||||||
|
# temps a la fin du buffer, si on a ecrit des choses avant
|
||||||
|
# le titre sera apres ces données mais on part du principe
|
||||||
|
# que personne va toucher le fichier.
|
||||||
|
_ = f.seek(0, SEEK_SET)
|
||||||
|
if not (f.read(len(title)) == title):
|
||||||
|
_ = f.write(title)
|
||||||
|
else:
|
||||||
|
_ = f.seek(0, SEEK_END)
|
||||||
|
|
||||||
if link and link not in cache:
|
while True:
|
||||||
try:
|
products_list: list[dict[str, Any]] | None = (
|
||||||
infos = self.getjsondata(link).informations()
|
self._geturlproductslist(f"{subdir}?page={page}")
|
||||||
_ = f.write(infos + "\n")
|
)
|
||||||
print(
|
if not products_list:
|
||||||
f"page: {page} | {i + 1}/{products_list_length} {link}"
|
break
|
||||||
)
|
|
||||||
cache.add(link)
|
pbar: tqdm[dict[str, Any]] = tqdm(
|
||||||
except (JSONDecodeError, HTTPError) as e:
|
products_list, bar_format=custom_format
|
||||||
print(f"Erreur sur le produit {link}: {e}")
|
)
|
||||||
f.flush()
|
for product in pbar:
|
||||||
|
keyword = product.get("seoKeyword", "Inconnu")[:40]
|
||||||
|
pbar.set_description(
|
||||||
|
f"Page: {page:<3} | Product: {keyword:<40}"
|
||||||
|
)
|
||||||
|
self._writevins(cache, product, f)
|
||||||
|
page += 1
|
||||||
|
except (Exception, HTTPError, KeyboardInterrupt, JSONDecodeError):
|
||||||
|
if not reset:
|
||||||
|
savestate((page, cache))
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
def main() -> None:
|
||||||
if len(argv) != 2:
|
if len(argv) != 3:
|
||||||
raise ValueError(f"{argv[0]} <sous-url>")
|
raise ValueError(f"{argv[0]} <filename> <sous-url>")
|
||||||
|
filename = argv[1]
|
||||||
|
suburl = argv[2]
|
||||||
|
|
||||||
scraper: Scraper = Scraper()
|
scraper: Scraper = Scraper()
|
||||||
scraper.getvins(argv[1], "donnee.csv")
|
scraper.getvins(suburl, filename)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -153,7 +153,7 @@ def mock_site():
|
|||||||
|
|
||||||
html_product = f"""
|
html_product = f"""
|
||||||
<html>
|
<html>
|
||||||
<body>
|
<body>
|
||||||
<h1>MILLESIMA</h1>
|
<h1>MILLESIMA</h1>
|
||||||
<script id="__NEXT_DATA__" type="application/json">
|
<script id="__NEXT_DATA__" type="application/json">
|
||||||
{dumps(json_data)}
|
{dumps(json_data)}
|
||||||
@@ -168,7 +168,7 @@ def mock_site():
|
|||||||
|
|
||||||
html_product = f"""
|
html_product = f"""
|
||||||
<html>
|
<html>
|
||||||
<body>
|
<body>
|
||||||
<h1>MILLESIMA</h1>
|
<h1>MILLESIMA</h1>
|
||||||
<script id="__NEXT_DATA__" type="application/json">
|
<script id="__NEXT_DATA__" type="application/json">
|
||||||
{dumps(json_data)}
|
{dumps(json_data)}
|
||||||
@@ -179,7 +179,7 @@ def mock_site():
|
|||||||
|
|
||||||
list_pleine = f"""
|
list_pleine = f"""
|
||||||
<html>
|
<html>
|
||||||
<body>
|
<body>
|
||||||
<h1>LE WINE</h1>
|
<h1>LE WINE</h1>
|
||||||
<script id="__NEXT_DATA__" type="application/json">
|
<script id="__NEXT_DATA__" type="application/json">
|
||||||
{dumps({
|
{dumps({
|
||||||
@@ -207,7 +207,7 @@ def mock_site():
|
|||||||
|
|
||||||
list_vide = f"""
|
list_vide = f"""
|
||||||
<html>
|
<html>
|
||||||
<body>
|
<body>
|
||||||
<h1>LE WINE</h1>
|
<h1>LE WINE</h1>
|
||||||
<script id="__NEXT_DATA__" type="application/json">
|
<script id="__NEXT_DATA__" type="application/json">
|
||||||
{dumps({
|
{dumps({
|
||||||
@@ -319,7 +319,7 @@ def test_informations(scraper: Scraper):
|
|||||||
def test_search(scraper: Scraper):
|
def test_search(scraper: Scraper):
|
||||||
m = mock_open()
|
m = mock_open()
|
||||||
with patch("builtins.open", m):
|
with patch("builtins.open", m):
|
||||||
scraper.getvins("wine.html", "fake_file.csv")
|
scraper.getvins("wine.html", "fake_file.csv", True)
|
||||||
|
|
||||||
assert m().write.called
|
assert m().write.called
|
||||||
all_writes = "".join(call.args[0] for call in m().write.call_args_list)
|
all_writes = "".join(call.args[0] for call in m().write.call_args_list)
|
||||||
Reference in New Issue
Block a user