From e6b04fc740e081af3fe70dc82a8a634c8a1df675 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20GUEZO?= Date: Sun, 1 Feb 2026 22:04:21 +0100 Subject: [PATCH 1/3] ajout(main): class Scraper --- main.py | 113 +++++++++++++++++++++++++++++++++++++++++++---- requirements.txt | 2 +- 2 files changed, 106 insertions(+), 9 deletions(-) diff --git a/main.py b/main.py index d6c33b3..a2cd2e2 100644 --- a/main.py +++ b/main.py @@ -1,12 +1,109 @@ +import requests +from typing import Any from bs4 import BeautifulSoup -import requests as rq +import json -def getsoup(s: str) -> BeautifulSoup: - return BeautifulSoup(rq.get(s).text, 'html.parser') +url = "louis-latour-aloxe-corton-1er-cru-les-chaillots-2018.html" +# response = requests.get(url) +# soup = BeautifulSoup(response.text, 'html.parser') -soup = getsoup("https://www.millesima.fr/") -def nimportequoi() : - links = soup.find_all('a') - for link in links: - print(link.get('href')) \ No newline at end of file +class MillesimaSoup(BeautifulSoup): + def __init__(self, markup="", features="html.parser", *args, **kwargs): + super().__init__(markup, features, *args, **kwargs) + + self._json_data = self._extract_json_data() + + def _extract_json_data(self) -> dict[str, Any]: + script = self.find("script", id="__NEXT_DATA__") + + if script and script.string: + try: + data: dict[str, Any] = json.loads(script.string) + for element in ['props', 'pageProps', 'initialReduxState', 'product', 'content']: + data.get(element) + return data + except json.decoder.JSONDecodeError: + return {} + return {} + + +class Scraper: + """ + Scraper est une classe qui permet de gerer + de façon dynamique des requetes uniquement + sur le serveur https de Millesina + """ + + def __init__(self): + """ + Docstring for __init__ + + :param self: Description + :param subdir: Description + :type subdir: str + """ + # Très utile pour éviter de renvoyer toujours les mêmes handshake + # TCP et d'avoir toujours une connexion constante avec le server + self._session: requests.Session = requests.Session() + self._url: str = "https://www.millesima.fr/" + self._soup = None + + def _request(self, subdir: str, use_cache: bool = True) -> requests.Response | requests.HTTPError: + """ + Docstring for _request + + :param self: Description + :param subdir: Description + :type subdir: str + :param use_cache: Description + :type use_cache: bool + :return: Description + :rtype: Response | AttributeError + """ + + target_url: str = f"{self._url}{subdir.lstrip("/")}" + + # Éviter un max possible de faire des requetes au servers même + # en ayant un tunnel tcp avec le paramètre `use_cache` que si + # activer, va comparer l'url avec l'url précédant + if use_cache and hasattr(self, "_response") and self._response is not None: + if self._response.url == target_url: + return self._response + + self._response: requests.Response = self._session.get(target_url, timeout=10) + self._response.raise_for_status() + + return self._response + + + def getsoup(self, subdir: str = "/") -> BeautifulSoup: + """ + Docstring for getsoup + + :param self: Description + :return: Description + :rtype: BeautifulSoup + """ + self._request(subdir) + self._soup = BeautifulSoup(self._response.text, "html.parser") + return self._soup + + + + +print(Scraper().getsoup(url)) + +# # On cible la balise magique +# script_tag = soup.find('script', id='__NEXT_DATA__') +# print(script_tag) + +# if script_tag: +# # On transforme le texte en vrai dictionnaire Python +# data = json.loads(script_tag.string) +# # Navigation dans l'objet (Next.js structure toujours comme ça) +# product_info = data['props']['pageProps']['initialReduxState']['product']['content'] + +# print(f"Vin : {product_info['productName']}") +# print(f"Prix HT : {product_info['items'][0]['htPrice']} €") +# print(f"Stock : {product_info['items'][0]['stock']}") diff --git a/requirements.txt b/requirements.txt index f7b4ad1..05f3180 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -requests>=2.32.5 +selenium>=4.40.0 beautifulsoup4>=4.14.3 \ No newline at end of file From 469a351f2a504bf6fa10ac69747a1c3b61c34d29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20GUEZO?= Date: Wed, 4 Feb 2026 21:01:48 +0100 Subject: [PATCH 2/3] ajout: ajout fonction json --- main.py | 64 ++++++++++++++-------------------------------------- test_main.py | 4 +++- 2 files changed, 20 insertions(+), 48 deletions(-) diff --git a/main.py b/main.py index a2cd2e2..416489d 100644 --- a/main.py +++ b/main.py @@ -3,30 +3,6 @@ from typing import Any from bs4 import BeautifulSoup import json -url = "louis-latour-aloxe-corton-1er-cru-les-chaillots-2018.html" -# response = requests.get(url) -# soup = BeautifulSoup(response.text, 'html.parser') - - -class MillesimaSoup(BeautifulSoup): - def __init__(self, markup="", features="html.parser", *args, **kwargs): - super().__init__(markup, features, *args, **kwargs) - - self._json_data = self._extract_json_data() - - def _extract_json_data(self) -> dict[str, Any]: - script = self.find("script", id="__NEXT_DATA__") - - if script and script.string: - try: - data: dict[str, Any] = json.loads(script.string) - for element in ['props', 'pageProps', 'initialReduxState', 'product', 'content']: - data.get(element) - return data - except json.decoder.JSONDecodeError: - return {} - return {} - class Scraper: """ @@ -47,7 +23,7 @@ class Scraper: # TCP et d'avoir toujours une connexion constante avec le server self._session: requests.Session = requests.Session() self._url: str = "https://www.millesima.fr/" - self._soup = None + self._soup = self.getsoup() def _request(self, subdir: str, use_cache: bool = True) -> requests.Response | requests.HTTPError: """ @@ -71,11 +47,11 @@ class Scraper: if self._response.url == target_url: return self._response - self._response: requests.Response = self._session.get(target_url, timeout=10) + self._response: requests.Response = self._session.get( + target_url, timeout=10) self._response.raise_for_status() return self._response - def getsoup(self, subdir: str = "/") -> BeautifulSoup: """ @@ -85,25 +61,19 @@ class Scraper: :return: Description :rtype: BeautifulSoup """ - self._request(subdir) - self._soup = BeautifulSoup(self._response.text, "html.parser") + if subdir != None: + self._request(subdir) + self._soup = BeautifulSoup(self._response.text, "html.parser") return self._soup - - - -print(Scraper().getsoup(url)) - -# # On cible la balise magique -# script_tag = soup.find('script', id='__NEXT_DATA__') -# print(script_tag) - -# if script_tag: -# # On transforme le texte en vrai dictionnaire Python -# data = json.loads(script_tag.string) -# # Navigation dans l'objet (Next.js structure toujours comme ça) -# product_info = data['props']['pageProps']['initialReduxState']['product']['content'] - -# print(f"Vin : {product_info['productName']}") -# print(f"Prix HT : {product_info['items'][0]['htPrice']} €") -# print(f"Stock : {product_info['items'][0]['stock']}") + def get_json_data(self): + script = self._soup.find("script", id="__NEXT_DATA__") + if script and script.string: + try: + data: dict[str, Any] = json.loads(script.string) + for element in ['props', 'pageProps', 'initialReduxState', 'product', 'content']: + data.get(element) + return data + except json.decoder.JSONDecodeError: + pass + return {} diff --git a/test_main.py b/test_main.py index 4af0b52..645bf07 100644 --- a/test_main.py +++ b/test_main.py @@ -1,4 +1,6 @@ from main import * +scraper = Scraper() + def test_soup(): - assert getsoup("https://example.com").find('h1').text == "Example Domain" + assert scraper.getsoup().find('h1').text[3:12] == "MILLESIMA" From 1c59297241459546099443ed76d5fb2fa95f03e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20GUEZO?= Date: Wed, 4 Feb 2026 22:19:29 +0100 Subject: [PATCH 3/3] ajout(main.py): ajout commentaires --- main.py | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/main.py b/main.py index 416489d..bc64468 100644 --- a/main.py +++ b/main.py @@ -1,5 +1,5 @@ import requests -from typing import Any +from typing import Any, Dict from bs4 import BeautifulSoup import json @@ -13,11 +13,7 @@ class Scraper: def __init__(self): """ - Docstring for __init__ - - :param self: Description - :param subdir: Description - :type subdir: str + Initialise la session de scraping et récupère la page d'accueil. """ # Très utile pour éviter de renvoyer toujours les mêmes handshake # TCP et d'avoir toujours une connexion constante avec le server @@ -27,15 +23,11 @@ class Scraper: def _request(self, subdir: str, use_cache: bool = True) -> requests.Response | requests.HTTPError: """ - Docstring for _request - - :param self: Description - :param subdir: Description - :type subdir: str - :param use_cache: Description - :type use_cache: bool - :return: Description - :rtype: Response | AttributeError + Effectue une requête GET sur le serveur Millesima. + :param subdir: Le sous-répertoire ou chemin de l'URL (ex: "/vins"). + :param use_cache: Si True, retourne la réponse précédente si l'URL est identique. + :return: requests.Response: L'objet réponse de la requête. + :rtype: requests.HTTPError: Si le serveur renvoie un code d'erreur (4xx, 5xx). """ target_url: str = f"{self._url}{subdir.lstrip("/")}" @@ -55,10 +47,10 @@ class Scraper: def getsoup(self, subdir: str = "/") -> BeautifulSoup: """ - Docstring for getsoup + Récupère le contenu HTML d'une page et le transforme en objet BeautifulSoup. - :param self: Description - :return: Description + :param subdir: Le chemin de la page. Si None, retourne la soupe actuelle. + :return: BeautifulSoup: L'objet parsé pour extraction de données. :rtype: BeautifulSoup """ if subdir != None: @@ -66,7 +58,16 @@ class Scraper: self._soup = BeautifulSoup(self._response.text, "html.parser") return self._soup - def get_json_data(self): + def get_json_data(self) -> Dict[str, Any]: + """ + Extrait les données JSON contenues dans la balise __NEXT_DATA__ du site. + + Beaucoup de sites modernes (Next.js) stockent leur état initial dans + une balise