mirror of
https://github.com/guezoloic/millesima_projetS6.git
synced 2026-03-29 03:23:47 +00:00
Compare commits
25 Commits
optimisati
...
123c43aa05
| Author | SHA1 | Date | |
|---|---|---|---|
| 123c43aa05 | |||
| a163e7687f | |||
| 0f6eb856c6 | |||
| d62145e250 | |||
| 829c303e78 | |||
| b584f9a301 | |||
| 547c7ec4c1 | |||
| 0aa765d6a0 | |||
| 8a357abe86 | |||
|
|
2f5af5aabf | ||
| a33b484dea | |||
| dd430b9861 | |||
| 011bb6a689 | |||
| 96dbaaaaf6 | |||
| ed86e588f7 | |||
|
|
0182bbbf20 | ||
|
|
cd1e266f25 | ||
|
|
2aa99453a0 | ||
| 9f1ff1ef7b | |||
|
|
bfc39db652 | ||
|
|
717fce6ca4 | ||
| 9914e8af41 | |||
| 2bc5d57a31 | |||
| 8f21e48b28 | |||
|
|
8cae082344 |
52
.github/workflows/python-app.yml
vendored
52
.github/workflows/python-app.yml
vendored
@@ -5,35 +5,41 @@ name: Python application
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
branches: ["main"]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install flake8 pytest
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install ".[test,doc]"
|
||||
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
|
||||
- name: Test with pytest
|
||||
run: pytest
|
||||
|
||||
- name: Deploy Doc
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
mkdocs gh-deploy --force
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -205,3 +205,5 @@ cython_debug/
|
||||
marimo/_static/
|
||||
marimo/_lsp/
|
||||
__marimo__/
|
||||
|
||||
*.csv
|
||||
1
docs/index.md
Normal file
1
docs/index.md
Normal file
@@ -0,0 +1 @@
|
||||
# Millesima
|
||||
3
docs/scraper.md
Normal file
3
docs/scraper.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Scraper
|
||||
|
||||
::: scraper.Scraper
|
||||
4
docs/scraperdata.md
Normal file
4
docs/scraperdata.md
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
# _ScraperData
|
||||
|
||||
::: scraper._ScraperData
|
||||
224
main.py
224
main.py
@@ -1,224 +0,0 @@
|
||||
from typing import cast
|
||||
from requests import Response, Session
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
from collections import OrderedDict
|
||||
from json import loads
|
||||
|
||||
|
||||
class _ScraperData:
|
||||
def __init__(self, data: dict[str, object], scraper: Scraper | None = None) -> None:
|
||||
self._data: dict[str, object] = data
|
||||
self._scraper: Scraper | None = scraper
|
||||
|
||||
def _getcontent(self) -> dict[str, object] | None:
|
||||
"""_summary_
|
||||
|
||||
Returns:
|
||||
dict[str, object]: _description_
|
||||
"""
|
||||
current_data: dict[str, object] = self._data
|
||||
for key in ["initialReduxState", "product", "content"]:
|
||||
new_data: object | None = current_data.get(key)
|
||||
if new_data is None:
|
||||
return None
|
||||
current_data: dict[str, object] = cast(dict[str, object], new_data)
|
||||
|
||||
return current_data
|
||||
|
||||
def _getattributes(self) -> dict[str, object] | None:
|
||||
"""_summary_
|
||||
|
||||
Returns:
|
||||
dict[str, object]: _description_
|
||||
"""
|
||||
current_data: object = self._getcontent()
|
||||
if current_data is None:
|
||||
return None
|
||||
return cast(dict[str, object], current_data.get("attributes"))
|
||||
|
||||
def appellation(self) -> str | None:
|
||||
"""_summary_
|
||||
|
||||
Returns:
|
||||
str: _description_
|
||||
"""
|
||||
attrs: dict[str, object] | None = self._getattributes()
|
||||
|
||||
if attrs is not None:
|
||||
app_dict: object | None = attrs.get("appellation")
|
||||
if isinstance(app_dict, dict):
|
||||
return cast(str, app_dict.get("value"))
|
||||
return None
|
||||
|
||||
def _getcritiques(self, name: str) -> str | None:
|
||||
"""_summary_
|
||||
|
||||
Args:
|
||||
name (str): _description_
|
||||
|
||||
Returns:
|
||||
str | None: _description_
|
||||
"""
|
||||
|
||||
current_value: dict[str, object] | None = self._getattributes()
|
||||
if current_value is not None:
|
||||
app_dict: dict[str, object] = cast(
|
||||
dict[str, object], current_value.get(name)
|
||||
)
|
||||
if not app_dict:
|
||||
return None
|
||||
|
||||
val = cast(str, app_dict.get("value")).rstrip("+").split("-")
|
||||
if len(val) > 1:
|
||||
val[0] = str((int(val[0]) + int(val[1])) / 2)
|
||||
|
||||
return val[0]
|
||||
return None
|
||||
|
||||
def parker(self) -> str | None:
|
||||
return self._getcritiques("note_rp")
|
||||
|
||||
def robinson(self) -> str | None:
|
||||
return self._getcritiques("note_jr")
|
||||
|
||||
def suckling(self) -> str | None:
|
||||
return self._getcritiques("note_js")
|
||||
|
||||
def getdata(self) -> dict[str, object]:
|
||||
return self._data
|
||||
|
||||
|
||||
class Scraper:
|
||||
"""
|
||||
Scraper est une classe qui permet de gerer
|
||||
de façon dynamique des requetes uniquement
|
||||
sur le serveur https de Millesima
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Initialise la session de scraping.
|
||||
"""
|
||||
self._url: str = "https://www.millesima.fr/"
|
||||
# Très utile pour éviter de renvoyer toujours les mêmes handshake
|
||||
# TCP et d'avoir toujours une connexion constante avec le server
|
||||
self._session: Session = Session()
|
||||
# Système de cache pour éviter de solliciter le serveur inutilement
|
||||
self._latest_request: tuple[(str, Response)] | None = None
|
||||
self._latest_soups: OrderedDict[str, BeautifulSoup] = OrderedDict[
|
||||
str, BeautifulSoup
|
||||
]()
|
||||
|
||||
def _request(self, subdir: str) -> Response:
|
||||
"""
|
||||
Effectue une requête GET sur le serveur Millesima.
|
||||
|
||||
Args:
|
||||
subdir (str): Le sous-répertoire ou chemin de l'URL (ex: "/vins").
|
||||
|
||||
Returns:
|
||||
Response: L'objet réponse de la requête.
|
||||
|
||||
Raise:
|
||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||
"""
|
||||
target_url: str = self._url + subdir.lstrip("/")
|
||||
response: Response = self._session.get(url=target_url, timeout=10)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
|
||||
def getresponse(self, subdir: str = "", use_cache: bool = True) -> Response:
|
||||
"""
|
||||
Récupère la réponse d'une page, en utilisant le cache si possible.
|
||||
|
||||
Args:
|
||||
subdir (str, optional): Le chemin de la page.
|
||||
use_cache (bool, optional): Utilise la donnée deja sauvegarder ou
|
||||
écrase la donnée utilisé avec la nouvelle
|
||||
|
||||
Returns:
|
||||
Response: L'objet réponse (cache ou nouvelle requête).
|
||||
|
||||
Raise:
|
||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||
"""
|
||||
|
||||
# si dans le cache, latest_request existe
|
||||
if use_cache and self._latest_request is not None:
|
||||
rq_subdir, rq_response = self._latest_request
|
||||
|
||||
# si c'est la meme requete et que use_cache est true,
|
||||
# on renvoie celle enregistrer
|
||||
if subdir == rq_subdir:
|
||||
return rq_response
|
||||
|
||||
request: Response = self._request(subdir)
|
||||
# on recrée la structure pour le systeme de cache si activer
|
||||
if use_cache:
|
||||
self._latest_request = (subdir, request)
|
||||
|
||||
return request
|
||||
|
||||
def getsoup(self, subdir: str, use_cache: bool = True) -> BeautifulSoup:
|
||||
"""
|
||||
Récupère le contenu HTML d'une page et le transforme en objet BeautifulSoup.
|
||||
|
||||
Args:
|
||||
subdir (str, optional): Le chemin de la page.
|
||||
|
||||
Returns:
|
||||
BeautifulSoup: L'objet parsé pour extraction de données.
|
||||
|
||||
Raise:
|
||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||
"""
|
||||
|
||||
if use_cache and subdir in self._latest_soups:
|
||||
return self._latest_soups[subdir]
|
||||
|
||||
markup: str = self.getresponse(subdir).text
|
||||
soup: BeautifulSoup = BeautifulSoup(markup, features="html.parser")
|
||||
|
||||
if use_cache:
|
||||
self._latest_soups[subdir] = soup
|
||||
|
||||
if len(self._latest_soups) > 10:
|
||||
_ = self._latest_soups.popitem(last=False)
|
||||
|
||||
return soup
|
||||
|
||||
def getjsondata(self, subdir: str, id: str = "__NEXT_DATA__") -> _ScraperData:
|
||||
"""
|
||||
Extrait les données JSON contenues dans la balise __NEXT_DATA__ du site.
|
||||
Beaucoup de sites modernes (Next.js) stockent leur état initial dans
|
||||
une balise <script> pour l'hydratation côté client.
|
||||
|
||||
Args:
|
||||
subdir (str): Le chemin de la page.
|
||||
id (str, optional): L'identifiant de la balise script (par défaut __NEXT_DATA__).
|
||||
|
||||
Raises:
|
||||
HTTPError: Soulevée par `getresponse` si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||
JSONDecodeError: Soulevée par `loads` si le contenu de la balise n'est pas un JSON valide.
|
||||
ValueError: Soulevée manuellement si l'une des clés attendues (props, pageProps, etc.)
|
||||
est absente de la structure JSON.
|
||||
|
||||
Returns:
|
||||
dict[str, object]: Un dictionnaire contenant les données utiles
|
||||
ou un dictionnaire vide en cas d'erreur.
|
||||
"""
|
||||
soup: BeautifulSoup = self.getsoup(subdir)
|
||||
script: Tag | None = soup.find("script", id=id)
|
||||
|
||||
if script is None or not script.string:
|
||||
raise ValueError(f"le script id={id} est introuvable")
|
||||
|
||||
current_data: object = cast(object, loads(script.string))
|
||||
|
||||
for key in ["props", "pageProps"]:
|
||||
if isinstance(current_data, dict) and key in current_data:
|
||||
current_data = cast(object, current_data[key])
|
||||
continue
|
||||
raise ValueError(f"Clé manquante dans le JSON : {key}")
|
||||
|
||||
return _ScraperData(cast(dict[str, object], current_data))
|
||||
14
mkdocs.yml
Normal file
14
mkdocs.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
site_name: "Projet Millesima S6"
|
||||
|
||||
theme:
|
||||
name: "material"
|
||||
|
||||
plugins:
|
||||
- search
|
||||
- mkdocstrings
|
||||
|
||||
markdown_extensions:
|
||||
- admonition
|
||||
- pymdownx.details
|
||||
- pymdownx.superfences
|
||||
- pymdownx.tabbed
|
||||
12
pyproject.toml
Normal file
12
pyproject.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[project]
|
||||
name = "projet-millesima-s6"
|
||||
version = "0.1.0"
|
||||
dependencies = ["requests==2.32.5", "beautifulsoup4==4.14.3", "pandas==2.3.3"]
|
||||
|
||||
[project.optional-dependencies]
|
||||
test = ["pytest==8.4.2", "requests-mock==1.12.1", "flake8==7.3.0"]
|
||||
doc = ["mkdocs<2.0.0", "mkdocs-material==9.6.23", "mkdocstrings[python]"]
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
@@ -1,4 +0,0 @@
|
||||
requests>=2.32.5
|
||||
requests-mock>=1.12.1
|
||||
beautifulsoup4>=4.14.3
|
||||
|
||||
Binary file not shown.
20
src/main.py
Executable file
20
src/main.py
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from os import getcwd
|
||||
from os.path import normpath, join
|
||||
from sys import argv
|
||||
from pandas import read_csv, DataFrame
|
||||
|
||||
def main() -> None:
|
||||
if len(argv) != 2:
|
||||
raise ValueError(f"{argv[0]} <filename.csv>")
|
||||
|
||||
path: str = normpath(join(getcwd(), argv[1]))
|
||||
db: DataFrame = read_csv(path)
|
||||
print(db.all())
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
print(f"ERREUR: {e}")
|
||||
404
src/scraper.py
Executable file
404
src/scraper.py
Executable file
@@ -0,0 +1,404 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from sys import argv
|
||||
from typing import cast
|
||||
from requests import HTTPError, Response, Session
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
from collections import OrderedDict
|
||||
from json import JSONDecodeError, loads
|
||||
|
||||
|
||||
class _ScraperData:
|
||||
"""
|
||||
Conteneur de données spécialisé pour extraire les informations des dictionnaires JSON.
|
||||
|
||||
Cette classe agit comme une interface simplifiée au-dessus du dictionnaire brut
|
||||
renvoyé par la balise __NEXT_DATA__ du site Millesima.
|
||||
"""
|
||||
|
||||
def __init__(self, data: dict[str, object]) -> None:
|
||||
"""
|
||||
Initialise le conteneur avec un dictionnaire de données.
|
||||
|
||||
Args:
|
||||
data (dict[str, object]): Le dictionnaire JSON brut extrait de la page.
|
||||
"""
|
||||
self._data: dict[str, object] = data
|
||||
|
||||
def _getcontent(self) -> dict[str, object] | None:
|
||||
"""
|
||||
Navigue dans l'arborescence Redux pour atteindre le contenu du produit.
|
||||
|
||||
Returns:
|
||||
dict[str, object] | None: Le dictionnaire du produit ou None si la structure diffère.
|
||||
"""
|
||||
current_data: dict[str, object] = self._data
|
||||
for key in ["initialReduxState", "product", "content"]:
|
||||
new_data: object | None = current_data.get(key)
|
||||
if new_data is None:
|
||||
return None
|
||||
current_data: dict[str, object] = cast(dict[str, object], new_data)
|
||||
|
||||
return current_data
|
||||
|
||||
def _getattributes(self) -> dict[str, object] | None:
|
||||
"""
|
||||
Extrait les attributs techniques (notes, appellations, etc.) du produit.
|
||||
|
||||
Returns:
|
||||
dict[str, object] | None: Les attributs du vin ou None.
|
||||
"""
|
||||
current_data: object = self._getcontent()
|
||||
if current_data is None:
|
||||
return None
|
||||
return cast(dict[str, object], current_data.get("attributes"))
|
||||
|
||||
def prix(self) -> float | None:
|
||||
"""
|
||||
Calcule le prix unitaire d'une bouteille (standardisée à 75cl).
|
||||
|
||||
Le site vend souvent par caisses (6, 12 bouteilles) ou formats (Magnum).
|
||||
Cette méthode normalise le prix pour obtenir celui d'une seule unité.
|
||||
|
||||
Returns:
|
||||
float | None: Le prix calculé arrondi à 2 décimales, ou None.
|
||||
"""
|
||||
|
||||
content = self._getcontent()
|
||||
if content is None:
|
||||
return None
|
||||
|
||||
items = content.get("items")
|
||||
|
||||
# Vérifie que items existe et n'est pas vide
|
||||
if not isinstance(items, list) or len(items) == 0:
|
||||
return None
|
||||
|
||||
prix_calcule: float | None = None
|
||||
|
||||
for item in items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
|
||||
p = item.get("offerPrice")
|
||||
attrs = item.get("attributes", {})
|
||||
|
||||
nbunit = attrs.get("nbunit", {}).get("value")
|
||||
equivbtl = attrs.get("equivbtl", {}).get("value")
|
||||
|
||||
if not isinstance(p, (int, float)) or not nbunit or not equivbtl:
|
||||
continue
|
||||
|
||||
nb = float(nbunit)
|
||||
eq = float(equivbtl)
|
||||
|
||||
if nb <= 0 or eq <= 0:
|
||||
continue
|
||||
|
||||
if nb == 1 and eq == 1:
|
||||
return float(p)
|
||||
|
||||
prix_calcule = round(float(p) / (nb * eq), 2)
|
||||
|
||||
return prix_calcule
|
||||
|
||||
def appellation(self) -> str | None:
|
||||
"""
|
||||
Extrait le nom de l'appellation du vin.
|
||||
|
||||
Returns:
|
||||
str | None: Le nom (ex: 'Pauillac') ou None.
|
||||
"""
|
||||
attrs: dict[str, object] | None = self._getattributes()
|
||||
|
||||
if attrs is not None:
|
||||
app_dict: object | None = attrs.get("appellation")
|
||||
if isinstance(app_dict, dict):
|
||||
return cast(str, app_dict.get("value"))
|
||||
return None
|
||||
|
||||
def _getcritiques(self, name: str) -> str | None:
|
||||
"""
|
||||
Méthode générique pour parser les notes des critiques (Parker, Suckling, etc.).
|
||||
|
||||
Gère les notes simples ("95") et les plages de notes ("95-97") en faisant la moyenne.
|
||||
|
||||
Args:
|
||||
name (str): La clé de l'attribut dans le JSON (ex: 'note_rp').
|
||||
|
||||
Returns:
|
||||
str | None: La note formatée en chaîne de caractères ou None.
|
||||
"""
|
||||
|
||||
current_value: dict[str, object] | None = self._getattributes()
|
||||
if current_value is not None:
|
||||
app_dict: dict[str, object] = cast(
|
||||
dict[str, object], current_value.get(name)
|
||||
)
|
||||
if not app_dict:
|
||||
return None
|
||||
|
||||
val = cast(str, app_dict.get("value")).rstrip("+").split("-")
|
||||
if len(val) > 1 and val[1] != "":
|
||||
val[0] = str(round((float(val[0]) + float(val[1])) / 2, 1))
|
||||
|
||||
return val[0]
|
||||
return None
|
||||
|
||||
def parker(self) -> str | None:
|
||||
"""Note Robert Parker."""
|
||||
return self._getcritiques("note_rp")
|
||||
|
||||
def robinson(self) -> str | None:
|
||||
"""Note Jancis Robinson."""
|
||||
return self._getcritiques("note_jr")
|
||||
|
||||
def suckling(self) -> str | None:
|
||||
"""Note James Suckling."""
|
||||
return self._getcritiques("note_js")
|
||||
|
||||
def getdata(self) -> dict[str, object]:
|
||||
"""Retourne le dictionnaire de données complet."""
|
||||
return self._data
|
||||
|
||||
def informations(self) -> str:
|
||||
"""
|
||||
Agrège les données clés pour l'export CSV.
|
||||
|
||||
Returns:
|
||||
str: Ligne formatée : "Appellation,Parker,Robinson,Suckling,Prix".
|
||||
"""
|
||||
|
||||
appellation = self.appellation()
|
||||
parker = self.parker()
|
||||
robinson = self.robinson()
|
||||
suckling = self.suckling()
|
||||
prix = self.prix()
|
||||
|
||||
return f"{appellation},{parker},{robinson},{suckling},{prix}"
|
||||
|
||||
|
||||
class Scraper:
|
||||
"""
|
||||
Client HTTP optimisé pour le scraping de millesima.fr.
|
||||
|
||||
Gère la session persistante, les headers de navigation et un cache double
|
||||
pour optimiser les performances et la discrétion.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Initialise l'infrastructure de navigation:
|
||||
|
||||
- créer une session pour éviter de faire un handshake pour chaque requête
|
||||
- ajout d'un header pour éviter le blocage de l'accès au site
|
||||
- ajout d'un système de cache
|
||||
"""
|
||||
self._url: str = "https://www.millesima.fr/"
|
||||
# Très utile pour éviter de renvoyer toujours les mêmes handshake
|
||||
# TCP et d'avoir toujours une connexion constante avec le server
|
||||
self._session: Session = Session()
|
||||
# Crée une "fausse carte d'identité" pour éviter que le site nous
|
||||
# bloque car on serait des robots
|
||||
self._session.headers.update(
|
||||
{
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
|
||||
AppleWebKit/537.36 (KHTML, like Gecko) \
|
||||
Chrome/122.0.0.0 Safari/537.36",
|
||||
"Accept-Language": "fr-FR,fr;q=0.9,en;q=0.8",
|
||||
}
|
||||
)
|
||||
# Système de cache pour éviter de solliciter le serveur inutilement
|
||||
# utilise pour _request
|
||||
self._latest_request: tuple[(str, Response)] | None = None
|
||||
# utilise pour getsoup
|
||||
self._latest_soups: OrderedDict[str, BeautifulSoup] = OrderedDict[
|
||||
str, BeautifulSoup
|
||||
]()
|
||||
|
||||
def _request(self, subdir: str) -> Response:
|
||||
"""
|
||||
Effectue une requête GET sur le serveur Millesima.
|
||||
|
||||
Args:
|
||||
subdir (str): Le sous-répertoire ou chemin de l'URL (ex: "/vins").
|
||||
|
||||
Returns:
|
||||
Response: L'objet réponse de la requête.
|
||||
|
||||
Raises:
|
||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||
"""
|
||||
target_url: str = self._url + subdir.lstrip("/")
|
||||
# envoyer une requête GET sur la page si erreur, renvoie un raise
|
||||
response: Response = self._session.get(url=target_url, timeout=30)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
|
||||
def getresponse(self, subdir: str = "", use_cache: bool = True) -> Response:
|
||||
"""
|
||||
Récupère la réponse d'une page, en utilisant le cache si possible.
|
||||
|
||||
Args:
|
||||
subdir (str, optional): Le chemin de la page.
|
||||
use_cache (bool, optional): Utilise la donnée deja sauvegarder ou
|
||||
écrase la donnée utilisé avec la nouvelle
|
||||
|
||||
Returns:
|
||||
Response: L'objet réponse (cache ou nouvelle requête).
|
||||
|
||||
Raises:
|
||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||
"""
|
||||
|
||||
# si dans le cache, latest_request existe
|
||||
if use_cache and self._latest_request is not None:
|
||||
rq_subdir, rq_response = self._latest_request
|
||||
|
||||
# si c'est la meme requete et que use_cache est true,
|
||||
# on renvoie celle enregistrer
|
||||
if subdir == rq_subdir:
|
||||
return rq_response
|
||||
|
||||
request: Response = self._request(subdir)
|
||||
# on recrée la structure pour le systeme de cache si activer
|
||||
if use_cache:
|
||||
self._latest_request = (subdir, request)
|
||||
|
||||
return request
|
||||
|
||||
def getsoup(self, subdir: str, use_cache: bool = True) -> BeautifulSoup:
|
||||
"""
|
||||
Récupère le contenu HTML d'une page et le transforme en objet BeautifulSoup.
|
||||
|
||||
Args:
|
||||
subdir (str, optional): Le chemin de la page.
|
||||
|
||||
Returns:
|
||||
BeautifulSoup: L'objet parsé pour extraction de données.
|
||||
|
||||
Raises:
|
||||
HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx).
|
||||
"""
|
||||
|
||||
if use_cache and subdir in self._latest_soups:
|
||||
return self._latest_soups[subdir]
|
||||
|
||||
markup: str = self.getresponse(subdir).text
|
||||
soup: BeautifulSoup = BeautifulSoup(markup, features="html.parser")
|
||||
|
||||
if use_cache:
|
||||
self._latest_soups[subdir] = soup
|
||||
|
||||
if len(self._latest_soups) > 10:
|
||||
_ = self._latest_soups.popitem(last=False)
|
||||
|
||||
return soup
|
||||
|
||||
def getjsondata(self, subdir: str, id: str = "__NEXT_DATA__") -> _ScraperData:
|
||||
"""
|
||||
Extrait les données JSON contenues dans la balise __NEXT_DATA__ du site.
|
||||
|
||||
Args:
|
||||
subdir (str): Le chemin de la page.
|
||||
id (str, optional): L'identifiant de la balise script.
|
||||
|
||||
Raises:
|
||||
HTTPError: Erreur renvoyée par le serveur (4xx, 5xx).
|
||||
JSONDecodeError: Si le contenu de la balise n'est pas un JSON valide.
|
||||
ValueError: Si les clés 'props' ou 'pageProps' sont absentes.
|
||||
|
||||
Returns:
|
||||
_ScraperData: Instance contenant les données extraites.
|
||||
"""
|
||||
|
||||
soup: BeautifulSoup = self.getsoup(subdir)
|
||||
script: Tag | None = soup.find("script", id=id)
|
||||
|
||||
if script is None or not script.string:
|
||||
raise ValueError(f"le script id={id} est introuvable")
|
||||
|
||||
current_data: object = cast(object, loads(script.string))
|
||||
|
||||
for key in ["props", "pageProps"]:
|
||||
if isinstance(current_data, dict) and key in current_data:
|
||||
current_data = cast(object, current_data[key])
|
||||
continue
|
||||
raise ValueError(f"Clé manquante dans le JSON : {key}")
|
||||
|
||||
return _ScraperData(cast(dict[str, object], current_data))
|
||||
|
||||
def _geturlproductslist(self, subdir: str) -> list[str] | None:
|
||||
"""
|
||||
Récupère la liste des produits d'une page de catégorie.
|
||||
"""
|
||||
try:
|
||||
data: dict[str, object] = self.getjsondata(subdir).getdata()
|
||||
|
||||
for element in ["initialReduxState", "categ", "content"]:
|
||||
data: dict[str, object] = cast(dict[str, object], data.get(element))
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
|
||||
products: list[str] = cast(list[str], data.get("products"))
|
||||
if isinstance(products, list):
|
||||
return products
|
||||
|
||||
except (JSONDecodeError, HTTPError):
|
||||
return None
|
||||
|
||||
def getvins(self, subdir: str, filename: str, reset: bool) -> None:
|
||||
"""
|
||||
Scrape récursivement toutes les pages d'une catégorie et sauvegarde en CSV.
|
||||
|
||||
Args:
|
||||
subdir (str): La catégorie (ex: '/vins-rouges').
|
||||
filename (str): Nom du fichier de sortie (ex: 'vins.csv').
|
||||
reset (bool): (Optionnel) pour réinitialiser le processus.
|
||||
"""
|
||||
with open(filename, "w") as f:
|
||||
cache: set[str] = set[str]()
|
||||
page = 0
|
||||
_ = f.write("Appellation,Robert,Robinson,Suckling,Prix\n")
|
||||
|
||||
while True:
|
||||
page += 1
|
||||
products_list: list[str] | None = self._geturlproductslist(
|
||||
f"{subdir}?page={page}"
|
||||
)
|
||||
|
||||
if not products_list:
|
||||
break
|
||||
|
||||
products_list_length = len(products_list)
|
||||
for i, product in enumerate(products_list):
|
||||
if not isinstance(product, dict):
|
||||
continue
|
||||
|
||||
link = product.get("seoKeyword")
|
||||
|
||||
if link and link not in cache:
|
||||
try:
|
||||
infos = self.getjsondata(link).informations()
|
||||
_ = f.write(infos + "\n")
|
||||
print(
|
||||
f"page: {page} | {i + 1}/{products_list_length} {link}"
|
||||
)
|
||||
cache.add(link)
|
||||
except (JSONDecodeError, HTTPError) as e:
|
||||
print(f"Erreur sur le produit {link}: {e}")
|
||||
f.flush()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
if len(argv) != 2:
|
||||
raise ValueError(f"{argv[0]} <sous-url>")
|
||||
scraper: Scraper = Scraper()
|
||||
scraper.getvins(argv[1], "donnee.csv", False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
print(f"ERREUR: {e}")
|
||||
0
tests/test_main.py
Normal file
0
tests/test_main.py
Normal file
@@ -1,8 +1,8 @@
|
||||
from json import dumps
|
||||
from bs4 import Tag
|
||||
from unittest.mock import patch, mock_open
|
||||
import pytest
|
||||
from requests_mock import Mocker
|
||||
from main import Scraper
|
||||
from scraper import Scraper
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
@@ -71,10 +71,10 @@ def mock_site():
|
||||
"_id": "J4131/22/C/CC/6-11652",
|
||||
"partnumber": "J4131/22/C/CC/6",
|
||||
"taxRate": "H",
|
||||
"listPrice": 390,
|
||||
"offerPrice": 390,
|
||||
"seoKeyword": "nino-negri-5-stelle-sfursat-2022-c-cc-6.html",
|
||||
"shortdesc": "Un carton de 6 Bouteilles (75cl)",
|
||||
"listPrice": 842,
|
||||
"offerPrice": 842,
|
||||
"seoKeyword": "vin-de-charazade1867.html",
|
||||
"shortdesc": "Une bouteille du meilleur vin du monde?",
|
||||
"attributes": {
|
||||
"promotion_o_n": {
|
||||
"valueId": "0",
|
||||
@@ -94,6 +94,18 @@ def mock_site():
|
||||
"type": "CHECKBOX",
|
||||
"isSpirit": False,
|
||||
},
|
||||
"equivbtl": {
|
||||
"valueId": "1",
|
||||
"name": "equivbtl",
|
||||
"value": "1",
|
||||
"isSpirit": False,
|
||||
},
|
||||
"nbunit": {
|
||||
"valueId": "1",
|
||||
"name": "nbunit",
|
||||
"value": "1",
|
||||
"isSpirit": False,
|
||||
},
|
||||
},
|
||||
"stock": 12,
|
||||
"availability": "2026-02-05",
|
||||
@@ -105,29 +117,17 @@ def mock_site():
|
||||
}
|
||||
],
|
||||
"attributes": {
|
||||
"equivbtl": {
|
||||
"valueId": "1",
|
||||
"name": "equivbtl",
|
||||
"value": "1",
|
||||
"isSpirit": False,
|
||||
},
|
||||
"nbunit": {
|
||||
"valueId": "6",
|
||||
"name": "nbunit",
|
||||
"value": "6",
|
||||
"isSpirit": False,
|
||||
},
|
||||
"appellation": {
|
||||
"valueId": "433",
|
||||
"name": "Appellation",
|
||||
"value": "Sforzato di Valtellina",
|
||||
"url": "sforzato-di-valtellina.html",
|
||||
"value": "Madame-Loïk",
|
||||
"url": "Madame-loik.html",
|
||||
"isSpirit": False,
|
||||
"groupIdentifier": "appellation_433",
|
||||
},
|
||||
"note_rp": {
|
||||
"valueId": "91",
|
||||
"name": "Parker",
|
||||
"name": "Peter Parker",
|
||||
"value": "91",
|
||||
"isSpirit": False,
|
||||
},
|
||||
@@ -138,8 +138,8 @@ def mock_site():
|
||||
"isSpirit": False,
|
||||
},
|
||||
"note_js": {
|
||||
"valueId": "93-94",
|
||||
"name": "J. Suckling",
|
||||
"valueId": "93-94.5",
|
||||
"name": "J. cherazade",
|
||||
"value": "93-94",
|
||||
"isSpirit": False,
|
||||
},
|
||||
@@ -166,6 +166,79 @@ def mock_site():
|
||||
text=html_product,
|
||||
)
|
||||
|
||||
html_product = f"""
|
||||
<html>
|
||||
<body>
|
||||
<h1>MILLESIMA</h1>
|
||||
<script id="__NEXT_DATA__" type="application/json">
|
||||
{dumps(json_data)}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
list_pleine = f"""
|
||||
<html>
|
||||
<body>
|
||||
<h1>LE WINE</h1>
|
||||
<script id="__NEXT_DATA__" type="application/json">
|
||||
{dumps({
|
||||
"props": {
|
||||
"pageProps": {
|
||||
"initialReduxState": {
|
||||
"categ": {
|
||||
"content": {
|
||||
"products": [
|
||||
{"seoKeyword": "/nino-negri-5-stelle-sfursat-2022.html",},
|
||||
{"seoKeyword": "/poubelle",},
|
||||
{"seoKeyword": "/",}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
list_vide = f"""
|
||||
<html>
|
||||
<body>
|
||||
<h1>LE WINE</h1>
|
||||
<script id="__NEXT_DATA__" type="application/json">
|
||||
{dumps({
|
||||
"props": {
|
||||
"pageProps": {
|
||||
"initialReduxState": {
|
||||
"categ": {
|
||||
"content": {
|
||||
"products": [
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
m.get(
|
||||
"https://www.millesima.fr/wine.html",
|
||||
complete_qs=False,
|
||||
response_list=[
|
||||
{"text": list_pleine},
|
||||
{"text": list_vide},
|
||||
],
|
||||
)
|
||||
|
||||
# on return m sans fermer le server qui simule la page
|
||||
yield m
|
||||
|
||||
@@ -190,7 +263,7 @@ def test_appellation(scraper: Scraper):
|
||||
contenu = scraper.getjsondata("nino-negri-5-stelle-sfursat-2022.html")
|
||||
assert vide.appellation() is None
|
||||
assert poubelle.appellation() is None
|
||||
assert contenu.appellation() == "Sforzato di Valtellina"
|
||||
assert contenu.appellation() == "Madame-Loïk"
|
||||
|
||||
|
||||
def test_fonctionprivee(scraper: Scraper):
|
||||
@@ -223,3 +296,31 @@ def test_critiques(scraper: Scraper):
|
||||
assert contenu.robinson() == "17"
|
||||
assert contenu.suckling() == "93.5"
|
||||
assert contenu._getcritiques("test_ts") is None
|
||||
|
||||
|
||||
def test_prix(scraper: Scraper):
|
||||
vide = scraper.getjsondata("")
|
||||
poubelle = scraper.getjsondata("poubelle")
|
||||
contenu = scraper.getjsondata("nino-negri-5-stelle-sfursat-2022.html")
|
||||
assert vide.prix() is None
|
||||
assert poubelle.prix() is None
|
||||
assert contenu.prix() == 842.0
|
||||
|
||||
|
||||
def test_informations(scraper: Scraper):
|
||||
contenu = scraper.getjsondata("nino-negri-5-stelle-sfursat-2022.html")
|
||||
assert contenu.informations() == "Madame-Loïk,91,17,93.5,842.0"
|
||||
vide = scraper.getjsondata("")
|
||||
poubelle = scraper.getjsondata("poubelle")
|
||||
assert vide.informations() == "None,None,None,None,None"
|
||||
assert poubelle.informations() == "None,None,None,None,None"
|
||||
|
||||
|
||||
def test_search(scraper: Scraper):
|
||||
m = mock_open()
|
||||
with patch("builtins.open", m):
|
||||
scraper.getvins("wine.html", "fake_file.csv", False)
|
||||
|
||||
assert m().write.called
|
||||
all_writes = "".join(call.args[0] for call in m().write.call_args_list)
|
||||
assert "Madame-Loïk,91,17,93.5,842.0" in all_writes
|
||||
Reference in New Issue
Block a user