diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 9db79e1..afc9269 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -8,7 +8,6 @@ services: environment: SECRET_API_KEY: 'superkey_that_can_be_changed' TMDB_API_KEY: 'REDATED' - FLARESOLVERR_HOST: flaresolverr JACKETT_HOST: jackett JACKETT_API_KEY: 'REDACTED' ZILEAN_URL: 'http://zilean:8181' @@ -16,6 +15,7 @@ services: REDIS_PORT: 6379 LOG_LEVEL: DEBUG LOG_REDACTED: False + USE_HTTPS: True TZ: Europe/London ports: - 8080:8080 @@ -25,7 +25,7 @@ services: - redis - zilean - jackett - - flaresolverr + - postgres restart: unless-stopped redis: @@ -47,50 +47,30 @@ services: deploy: resources: limits: - memory: 1g + memory: 3g volumes: - ./zilean_data:/app/data environment: - - Zilean__ElasticSearch__Url=http://elasticsearch:9200 + Zilean__Database__ConnectionString: "Host=postgres;Port=5432;Database=zilean;Username=zilean;Password=zilean" + Zilean__Dmm__ImportBatched: "true" + Zilean__Dmm__MaxFilteredResults: 200 + Zilean__Dmm__MinimumScoreMatch: 0.85 depends_on: - elasticsearch: - condition: service_healthy + - postgres - - elasticsearch: - image: elasticsearch:8.14.3@sha256:1ddbb1ae0754278f3ab53edc24fcc5c790ebc2422cc47abea760b24abee2d88a - container_name: elasticsearch + postgres: + image: postgres:16.3-alpine3.20 + container_name: postgres restart: unless-stopped environment: - ES_SETTING_DISCOVERY_TYPE: single-node - ES_SETTING_XPACK_SECURITY_ENABLED: false - ES_SETTING_BOOTSTRAP_MEMORY__LOCK: true - ES_JAVA_OPTS: "-Xms512m -Xmx512m" + PGDATA: /var/lib/postgresql/data/pgdata + POSTGRES_USER: zilean + POSTGRES_PASSWORD: zilean + POSTGRES_DB: zilean expose: - - 9200 - deploy: - resources: - limits: - memory: 2g - healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:9200 | grep -q 'You Know, for Search'"] - interval: 10s - timeout: 10s - retries: 5 + - 5432 volumes: - - elastic_data:/usr/share/elasticsearch/data:rw - - flaresolverr: - image: ghcr.io/flaresolverr/flaresolverr:latest - container_name: flaresolverr - environment: - - LOG_LEVEL=${LOG_LEVEL:-info} - - LOG_HTML=${LOG_HTML:-false} - - CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none} - - TZ=Europe/London - expose: - - 8191 - restart: unless-stopped + - ./zilean_postgresql:/var/lib/postgresql/data/pgdata jackett: image: lscr.io/linuxserver/jackett:latest @@ -104,7 +84,4 @@ services: - ./blackhole:/downloads ports: - 9117:9117 - restart: unless-stopped - -volumes: - elastic_data: \ No newline at end of file + restart: unless-stopped \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index f044ab2..79b9ea1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "stream-fusion" -version = "1.0.0" +version = "1.1.0" description = "StreamFusion is an advanced plugin for Stremio that significantly enhances its streaming capabilities with debrid service." authors = ["LimeDrive "] readme = "README.md" diff --git a/stream_fusion/utils/zilean/zilean_api.py b/stream_fusion/utils/zilean/zilean_api.py index a83436a..7e398a7 100644 --- a/stream_fusion/utils/zilean/zilean_api.py +++ b/stream_fusion/utils/zilean/zilean_api.py @@ -1,6 +1,6 @@ import requests from typing import List, Optional, Tuple -from pydantic import BaseModel, Field, ValidationError +from pydantic import BaseModel, Field from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry @@ -8,62 +8,48 @@ from stream_fusion.logging_config import logger -class DmmQueryRequest(BaseModel): +class DMMQueryRequest(BaseModel): queryText: Optional[str] = None -class TorrentInfo(BaseModel): - resolution: Optional[str] = None - year: Optional[int] = None - remastered: bool = False - source: Optional[str] = None - codec: Optional[str] = None - group: Optional[str] = None - episodes: Tuple[int, ...] = Field(default_factory=tuple) - seasons: Tuple[int, ...] = Field(default_factory=tuple) - languages: Tuple[str, ...] = Field(default_factory=tuple) +class DMMImdbFile(BaseModel): + imdbId: Optional[str] = None + category: Optional[str] = None title: Optional[str] = None - rawTitle: Optional[str] = None - size: int = 0 - infoHash: Optional[str] = None - isPossibleMovie: bool = False + adult: bool = False + year: int = 0 - class Config: - frozen = True +class DMMImdbSearchResult(BaseModel): + title: Optional[str] = None + imdbId: Optional[str] = None + year: int = 0 + score: float = 0.0 + category: Optional[str] = None -class ExtractedDmmEntry(BaseModel): - filename: str - infoHash: str - filesize: int - parseResponse: Optional[TorrentInfo] = None - resolution: Optional[str] = None +class DMMTorrentInfo(BaseModel): + info_hash: Optional[str] = None + resolution: Tuple[str, ...] = Field(default_factory=tuple) year: Optional[int] = None - remastered: bool = False - source: Optional[str] = None - codec: Optional[str] = None - group: Optional[str] = None - episodes: Optional[Tuple[int, ...]] = None - seasons: Optional[Tuple[int, ...]] = None - languages: Optional[Tuple[str, ...]] = None - title: Optional[str] = None - rawTitle: Optional[str] = None - size: Optional[int] = None - isPossibleMovie: bool = False + remastered: Optional[bool] = None + codec: Tuple[str, ...] = Field(default_factory=tuple) + audio: Tuple[str, ...] = Field(default_factory=tuple) + quality: Tuple[str, ...] = Field(default_factory=tuple) + episode: Tuple[int, ...] = Field(default_factory=tuple) + season: Tuple[int, ...] = Field(default_factory=tuple) + language: Tuple[str, ...] = Field(default_factory=tuple) + parsed_title: Optional[str] = None + raw_title: Optional[str] = None + size: int = 0 + category: Optional[str] = None + imdb_id: Optional[str] = None + imdb: Optional[DMMImdbFile] = None class Config: frozen = True -class ImdbFile(BaseModel): - imdbId: Optional[str] = None - category: Optional[str] = None - title: Optional[str] = None - adult: bool = False - year: int = 0 - - class ZileanAPI: def __init__( self, @@ -105,9 +91,19 @@ def _request(self, method: str, endpoint: str, **kwargs): logger.error(f"Erreur lors de la requête API : {e}") raise - def dmm_search(self, query: DmmQueryRequest) -> List[ExtractedDmmEntry]: + def _convert_to_dmm_torrent_info(self, entry: dict) -> DMMTorrentInfo: + for key in ['resolution', 'codec', 'audio', 'quality', 'episode', 'season', 'language']: + if key in entry and isinstance(entry[key], list): + entry[key] = tuple(entry[key]) + + if 'imdb' in entry and entry['imdb']: + entry['imdb'] = DMMImdbFile(**entry['imdb']) + + return DMMTorrentInfo(**entry) + + def dmm_search(self, query: DMMQueryRequest) -> List[DMMTorrentInfo]: response = self._request("POST", "/dmm/search", json=query.dict()) - return [ExtractedDmmEntry(**entry) for entry in response.json()] + return [self._convert_to_dmm_torrent_info(entry) for entry in response.json()] def dmm_filtered( self, @@ -117,7 +113,8 @@ def dmm_filtered( year: Optional[int] = None, language: Optional[str] = None, resolution: Optional[str] = None, - ) -> List[ExtractedDmmEntry]: + imdb_id: Optional[str] = None, + ) -> List[DMMTorrentInfo]: params = { "Query": query, "Season": season, @@ -125,29 +122,11 @@ def dmm_filtered( "Year": year, "Language": language, "Resolution": resolution, + "ImdbId": imdb_id, } params = {k: v for k, v in params.items() if v is not None} response = self._request("GET", "/dmm/filtered", params=params) - - entries = [] - for entry in response.json(): - for key in ["episodes", "seasons", "languages"]: - if key in entry and isinstance(entry[key], list): - entry[key] = tuple(entry[key]) - - torrent_info = TorrentInfo( - **{k: v for k, v in entry.items() if k in TorrentInfo.__fields__} - ) - - extracted_entry = ExtractedDmmEntry( - filename=entry.get("rawTitle", ""), - infoHash=entry.get("infoHash", ""), - filesize=entry.get("size", 0), - parseResponse=torrent_info, - ) - entries.append(extracted_entry) - - return entries + return [self._convert_to_dmm_torrent_info(entry) for entry in response.json()] def dmm_on_demand_scrape(self) -> None: self._request("GET", "/dmm/on-demand-scrape") @@ -157,12 +136,12 @@ def healthchecks_ping(self) -> str: return response.text def imdb_search( - self, query: Optional[str] = None, year: Optional[int] = None - ) -> List[ImdbFile]: - params = {"Query": query, "Year": year} + self, query: Optional[str] = None, year: Optional[int] = None, category: Optional[str] = None + ) -> List[DMMImdbSearchResult]: + params = {"Query": query, "Year": year, "Category": category} params = {k: v for k, v in params.items() if v is not None} response = self._request("POST", "/imdb/search", params=params) - return [ImdbFile(**file) for file in response.json()] + return [DMMImdbSearchResult(**file) for file in response.json()] def __del__(self): - self.session.close() + self.session.close() \ No newline at end of file diff --git a/stream_fusion/utils/zilean/zilean_result.py b/stream_fusion/utils/zilean/zilean_result.py index 6eb8767..a4296bd 100644 --- a/stream_fusion/utils/zilean/zilean_result.py +++ b/stream_fusion/utils/zilean/zilean_result.py @@ -3,7 +3,7 @@ from stream_fusion.utils.torrent.torrent_item import TorrentItem from stream_fusion.logging_config import logger from stream_fusion.utils.detection import detect_languages -from stream_fusion.utils.zilean.zilean_api import ExtractedDmmEntry +from stream_fusion.utils.zilean.zilean_api import DMMTorrentInfo class ZileanResult: @@ -39,15 +39,15 @@ def convert_to_torrent_item(self): self.parsed_data ) - def from_api_cached_item(self, api_cached_item: ExtractedDmmEntry, media): + def from_api_cached_item(self, api_cached_item: DMMTorrentInfo, media): # if type(api_cached_item) is not dict: # logger.error(api_cached_item) - self.info_hash = api_cached_item.infoHash + self.info_hash = api_cached_item.info_hash if len(self.info_hash) != 40: raise ValueError(f"The hash '{self.info_hash}' does not have the expected length of 40 characters.") - parsed_result = parse(api_cached_item.filename) + parsed_result = parse(api_cached_item.raw_title) self.raw_title = parsed_result.raw_title self.indexer = "DMM API" @@ -55,7 +55,7 @@ def from_api_cached_item(self, api_cached_item: ExtractedDmmEntry, media): self.link = self.magnet self.languages = detect_languages(self.raw_title) self.seeders = 0 - self.size = api_cached_item.filesize + self.size = api_cached_item.size self.type = media.type self.privacy = "private" self.from_cache = True diff --git a/stream_fusion/utils/zilean/zilean_service.py b/stream_fusion/utils/zilean/zilean_service.py index 8a11c8c..fa6ea89 100644 --- a/stream_fusion/utils/zilean/zilean_service.py +++ b/stream_fusion/utils/zilean/zilean_service.py @@ -6,7 +6,7 @@ from stream_fusion.utils.models.movie import Movie from stream_fusion.utils.models.series import Series from stream_fusion.settings import settings -from stream_fusion.utils.zilean.zilean_api import ZileanAPI, DmmQueryRequest, ExtractedDmmEntry +from stream_fusion.utils.zilean.zilean_api import ZileanAPI, DMMQueryRequest, DMMTorrentInfo class ZileanService: def __init__(self, config): @@ -14,7 +14,7 @@ def __init__(self, config): self.logger = logger self.max_workers = settings.zilean_max_workers - def search(self, media: Union[Movie, Series]) -> List[ExtractedDmmEntry]: + def search(self, media: Union[Movie, Series]) -> List[DMMTorrentInfo]: if isinstance(media, Movie): return self.__search_movie(media) elif isinstance(media, Series): @@ -22,30 +22,14 @@ def search(self, media: Union[Movie, Series]) -> List[ExtractedDmmEntry]: else: raise TypeError("Only Movie and Series are allowed as media!") - # def __clean_title(self, title: str) -> str: - # pronouns_to_remove = [ - # 'le', 'la', 'les', 'l\'', 'un', 'une', 'des', 'du', 'de', 'à', 'au', 'aux', - # 'the', 'a', 'an', 'some', 'of', 'to', 'at', 'in', 'on', 'for', - # 'he', 'she', 'it', 'they', 'we', 'you', 'i', 'me', 'him', 'her', 'them', 'us', - # 'il', 'elle', 'on', 'nous', 'vous', 'ils', 'elles', 'je', 'tu', 'moi', 'toi', 'lui' - # ] - # title = title.lower() - # title = re.sub(r'[^a-zA-Z0-9\s]', ' ', title) - # words = title.split() - # words = [word for word in words if word not in pronouns_to_remove] - # cleaned_title = ' '.join(words) - # cleaned_title = re.sub(r'\s+', ' ', cleaned_title).strip() - # return cleaned_title - - def __deduplicate_api_results(self, api_results: List[ExtractedDmmEntry]) -> List[ExtractedDmmEntry]: + def __deduplicate_api_results(self, api_results: List[DMMTorrentInfo]) -> List[DMMTorrentInfo]: unique_results = set() deduplicated_results = [] for result in api_results: result_tuple = ( - result.filename, - result.infoHash, - result.filesize, - result.parseResponse.model_dump_json() if result.parseResponse else None + result.raw_title, + result.info_hash, + result.size, ) if result_tuple not in unique_results: unique_results.add(result_tuple) @@ -56,40 +40,52 @@ def __remove_duplicate_titles(self, titles: List[str]) -> List[str]: seen = set() return [title for title in titles if not (title.lower() in seen or seen.add(title.lower()))] - def __search_movie(self, movie: Movie) -> List[ExtractedDmmEntry]: + def __search_movie(self, movie: Movie) -> List[DMMTorrentInfo]: unique_titles = self.__remove_duplicate_titles(movie.titles) - # clean_titles = [self.__clean_title(title) for title in unique_titles] - return self.__threaded_search_movie(unique_titles) + keyword_results = self.__threaded_search_movie(unique_titles) + + # Search by IMDb ID + imdb_results = self.__search_by_imdb_id(movie.id) + + # Combine and deduplicate results + all_results = keyword_results + imdb_results + return self.__deduplicate_api_results(all_results) - def __search_series(self, series: Series) -> List[ExtractedDmmEntry]: + def __search_series(self, series: Series) -> List[DMMTorrentInfo]: unique_titles = self.__remove_duplicate_titles(series.titles) - # clean_titles = [self.__clean_title(title) for title in unique_titles] - return self.__threaded_search_series(unique_titles, series) + keyword_results = self.__threaded_search_series(unique_titles, series) + + # Search by IMDb ID + imdb_results = self.__search_by_imdb_id(series.id) + + # Combine and deduplicate results + all_results = keyword_results + imdb_results + return self.__deduplicate_api_results(all_results) - def __threaded_search_movie(self, search_texts: List[str]) -> List[ExtractedDmmEntry]: + def __threaded_search_movie(self, search_texts: List[str]) -> List[DMMTorrentInfo]: results = [] with ThreadPoolExecutor(max_workers=self.max_workers) as executor: future_to_text = {executor.submit(self.__make_movie_request, text): text for text in search_texts} for future in as_completed(future_to_text): results.extend(future.result()) - return self.__deduplicate_api_results(results) + return results - def __threaded_search_series(self, search_texts: List[str], series: Series) -> List[ExtractedDmmEntry]: + def __threaded_search_series(self, search_texts: List[str], series: Series) -> List[DMMTorrentInfo]: results = [] with ThreadPoolExecutor(max_workers=self.max_workers) as executor: future_to_text = {executor.submit(self.__make_series_request, text, series): text for text in search_texts} for future in as_completed(future_to_text): results.extend(future.result()) - return self.__deduplicate_api_results(results) + return results - def __make_movie_request(self, query_text: str) -> List[ExtractedDmmEntry]: + def __make_movie_request(self, query_text: str) -> List[DMMTorrentInfo]: try: - return self.zilean_api.dmm_search(DmmQueryRequest(queryText=query_text)) + return self.zilean_api.dmm_search(DMMQueryRequest(queryText=query_text)) except Exception as e: self.logger.exception(f"An exception occurred while searching for movie '{query_text}' on Zilean: {str(e)}") return [] - def __make_series_request(self, query_text: str, series: Series) -> List[ExtractedDmmEntry]: + def __make_series_request(self, query_text: str, series: Series) -> List[DMMTorrentInfo]: try: season = getattr(series, 'season', None) episode = getattr(series, 'episode', None) @@ -106,4 +102,11 @@ def __make_series_request(self, query_text: str, series: Series) -> List[Extract ) except Exception as e: self.logger.exception(f"An exception occurred while searching for series '{query_text}' on Zilean: {str(e)}") + return [] + + def __search_by_imdb_id(self, imdb_id: str) -> List[DMMTorrentInfo]: + try: + return self.zilean_api.dmm_filtered(imdb_id=imdb_id) + except Exception as e: + self.logger.exception(f"An exception occurred while searching for IMDb ID '{imdb_id}' on Zilean: {str(e)}") return [] \ No newline at end of file diff --git a/stream_fusion/web/root/search/views.py b/stream_fusion/web/root/search/views.py index 365a533..1f073d2 100644 --- a/stream_fusion/web/root/search/views.py +++ b/stream_fusion/web/root/search/views.py @@ -154,7 +154,7 @@ def perform_search(update_cache=False): zilean_search_results = [ ZileanResult().from_api_cached_item(torrent, media) for torrent in zilean_search_results - if len(getattr(torrent, "infoHash", "")) == 40 + if len(getattr(torrent, "info_hash", "")) == 40 ] zilean_search_results = filter_items( zilean_search_results, media, config=config