Skip to content

Commit

Permalink
Merge pull request #29 from SenZmaKi/v2.1.4
Browse files Browse the repository at this point in the history
V2.1.4
  • Loading branch information
SenZmaKi authored Mar 5, 2024
2 parents 611be48 + 80c9697 commit cda52d8
Show file tree
Hide file tree
Showing 26 changed files with 261 additions and 151 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,5 @@ marin.py
setups
build
dist
.scraps/
crap.py
56 changes: 56 additions & 0 deletions bump_version.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from io import TextIOWrapper
import os
import subprocess

PREV_VERSION = "2.1.3"
NEW_VERSION = "2.1.4"
FILES_PATHS = [
"pyproject.toml",
"senpwai/utils/static.py",
"setup.iss",
"setup_senpcli.iss",
]


def log_error(msg: str) -> None:
print(f"[-] Error: {msg}")


def log_info(msg: str) -> None:
print(f"[+] Info: {msg}")


def log_warning(msg: str) -> None:
print(f"[!] Warning: {msg}")


def truncate(file: TextIOWrapper, content: str) -> None:
file.seek(0)
file.write(content)
file.truncate()


def main() -> None:
log_info(f"Bumping version from {PREV_VERSION} -> {NEW_VERSION}\n")
for file_path in FILES_PATHS:
if not os.path.isfile(file_path):
log_error(f'"{file_path}" not found')
continue
with open(file_path, "r+") as f:
content = f.read()
new_content = content.replace(PREV_VERSION, NEW_VERSION)
if new_content == content:
if NEW_VERSION in new_content:
log_warning(
f'Failed to find previous version in "{file_path}" but the new version is in it'
)
else:
log_error(f'Failed to find previous version in "{file_path}"')
continue
truncate(f, new_content)
log_info(f'Bumped version in "{file_path}"')
subprocess.run("git diff")


if __name__ == "__main__":
main()
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "senpwai"
version = "2.1.3"
version = "2.1.4"
description = "A desktop app for tracking and batch downloading anime"
authors = ["SenZmaKi <[email protected]>"]
license = "GPL v3"
Expand All @@ -9,7 +9,7 @@ include = ["senpwai/assets"]
packages = [{ include = "senpwai" }, { include = "senpcli", from = "senpwai" }]
exclude = ["src/**/test.py"]
repository = "https://github.com/SenZmaKi/Senpwai"
documentation = "https://github.com/SenZmaKi/Senpwai/blob/master/docs"
documentation = "https://github.com/SenZmaKi/Senpwai/blob/master/README.md"
keywords = [
"anime",
"app",
Expand Down
4 changes: 2 additions & 2 deletions senpwai/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from PyQt6.QtCore import QCoreApplication, Qt
from PyQt6.QtGui import QPalette
from PyQt6.QtWidgets import QApplication
from senpwai.utils.static import APP_NAME, custom_exception_handler
from senpwai.utils.static import APP_NAME, custom_exception_handler, OS
from senpwai.windows.main import MainWindow


Expand All @@ -26,7 +26,7 @@ def windows_app_initialisation():


def main():
if sys.platform == "win32":
if OS.is_windows:
windows_app_initialisation()

QCoreApplication.setApplicationName(APP_NAME)
Expand Down
6 changes: 3 additions & 3 deletions senpwai/scrapers/gogo/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from .hls import * # noqa F403
from .main import * # noqa F403
from .constants import GOGO # noqa F401
from .hls import * # noqa F403
from .main import * # noqa F403
from .constants import GOGO # noqa F401
5 changes: 3 additions & 2 deletions senpwai/scrapers/gogo/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@

GOGO = "gogo"
GOGO_HOME_URL = "https://anitaku.to"
AJAX_SEARCH_URL = "https://ajax.gogo-load.com/site/loadAjaxSearch?keyword="
AJAX_ENTRY_POINT = "https://ajax.gogocdn.net"
AJAX_SEARCH_URL = f"{AJAX_ENTRY_POINT}/site/loadAjaxSearch?keyword="
AJAX_LOAD_EPS_URL = (
"https://ajax.gogo-load.com/ajax/load-list-episode?ep_start={}&ep_end={}&id={}"
f"{AJAX_ENTRY_POINT}/ajax/load-list-episode?ep_start={{}}&ep_end={{}}&id={{}}"
)
FULL_SITE_NAME = "Gogoanime"
DUB_EXTENSION = " (Dub)"
Expand Down
14 changes: 11 additions & 3 deletions senpwai/scrapers/gogo/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,13 @@ def search(keyword: str, ignore_dub=True) -> list[tuple[str, str]]:
for a in a_tags:
title = a.text
link = f'{GOGO_HOME_URL}/{a["href"]}'
if DUB_EXTENSION in title and ignore_dub:
continue
title_and_link.append((title, link))
for title, link in title_and_link:
if ignore_dub and DUB_EXTENSION in title:
sub_title = title.replace(DUB_EXTENSION, "")
if any([sub_title == title for title, _ in title_and_link]):
title_and_link.remove((title, link))

return title_and_link


Expand All @@ -49,6 +53,10 @@ def extract_anime_id(anime_page_content: bytes) -> int:
return int(anime_id)


def title_is_dub(title: str) -> bool:
return DUB_EXTENSION in title


def get_download_page_links(
start_episode: int, end_episode: int, anime_id: int
) -> list[str]:
Expand Down Expand Up @@ -206,6 +214,6 @@ def get_session_cookies(fresh=False) -> RequestsCookieJar:
# A valid User-Agent is required during this post request hence the CLIENT is technically only necessary here
response = CLIENT.post(login_url, form_data, cookies=response.cookies)
SESSION_COOKIES = response.cookies
if len(SESSION_COOKIES) == 0:
if not SESSION_COOKIES:
return get_session_cookies()
return SESSION_COOKIES
4 changes: 2 additions & 2 deletions senpwai/scrapers/pahe/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from .main import * # noqa: F403
from .constants import PAHE # noqa: F401
from .main import * # noqa: F403
from .constants import PAHE # noqa: F401
9 changes: 6 additions & 3 deletions senpwai/scrapers/pahe/constants.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import re

PAHE = "pahe"
PAHE_DOMAIN = "animepahe.ru"
PAHE_HOME_URL = f"https://{PAHE_DOMAIN}"
Expand All @@ -22,9 +23,11 @@
Generates the load episodes link from the provided anime page link and page number.
Example: {anime_page_link}&page={page_number}
"""
KWIK_DOMAIN = "kwik.si"
KWIK_PAGE_REGEX = re.compile(rf"https?://{KWIK_DOMAIN}/f/([^\"']+)")
KWIK_PAGE_REGEX = re.compile(r"https?://kwik.si/f/([^\"']+)")
DUB_PATTERN = "eng"

EPISODE_SIZE_REGEX = re.compile(r"\b(\d+)MB\b")
PARAM_REGEX = re.compile(r"""\(\"(\w+)\",\d+,\"(\w+)\",(\d+),(\d+),(\d+)\)""")

CHAR_MAP = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/"
CHAR_MAP_BASE = 10
CHAR_MAP_DIGITS = CHAR_MAP[:CHAR_MAP_BASE]
50 changes: 26 additions & 24 deletions senpwai/scrapers/pahe/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
match_quality,
)
from .constants import (
CHAR_MAP_BASE,
CHAR_MAP_DIGITS,
PAHE_HOME_URL,
FULL_SITE_NAME,
API_ENTRY_POINT,
Expand All @@ -36,6 +38,7 @@
}
"""


def site_request(url: str) -> Response:
"""
For requests that go specifically to the domain animepahe.ru instead of e.g., pahe.win or kwik.si
Expand Down Expand Up @@ -252,32 +255,30 @@ def calculate_total_download_size(bound_info: list[str]) -> int:
return total_size


def get_string(content: str, s1: int) -> int:
map_thing = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/"
s2 = 10
map_string = map_thing[:s2]
acc = 0
def get_char_code(content: str, s1: int) -> int:
j = 0
for index, c in enumerate(reversed(content)):
acc += (int(c) if c.isdigit() else 0) * int(math.pow(s1, index))
j += (int(c) if c.isdigit() else 0) * int(math.pow(s1, index))
k = ""
while acc > 0:
k = map_string[acc % s2] + k
acc = (acc - (acc % s2)) // s2
return int(k) if k.isdigit() else 0
while j > 0:
k = CHAR_MAP_DIGITS[j % CHAR_MAP_BASE] + k
j = (j - (j % CHAR_MAP_BASE)) // CHAR_MAP_BASE
return int(k) if k else 0


# Courtesy of Saikou app https://github.com/saikou-app/saikou
def decrypt_token_and_post_url_page(full_key: str, key: str, v1: int, v2: int) -> str:
# RIP Saikou
def decrypt_post_form(full_key: str, key: str, v1: int, v2: int) -> str:
r = ""
i = 0
while i < len(full_key):
s = ""
while full_key[i] != key[v2]:
s += full_key[i]
i += 1
for j in range(len(key)):
s = s.replace(key[j], str(j))
r += chr(get_string(s, v2) - v1)
for idx, c in enumerate(key):
s = s.replace(c, str(idx))
r += chr(get_char_code(s, v2) - v1)
i += 1
return r

Expand All @@ -294,29 +295,30 @@ def get_direct_download_links(
direct_download_links: list[str] = []
for pahewin_link in pahewin_download_page_links:
# Extract kwik page links
html_page = CLIENT.get(pahewin_link).text
download_link = cast(
re.Match[str], KWIK_PAGE_REGEX.search(html_page)
pahewin_html_page = CLIENT.get(pahewin_link).text
kwik_page_link = cast(
re.Match[str], KWIK_PAGE_REGEX.search(pahewin_html_page)
).group()

# Extract direct download links from kwik page links
response = CLIENT.get(download_link)
cookies = response.cookies
# Extract direct download links from kwik html page
response = CLIENT.get(kwik_page_link)
with open("kwik.html", "wb") as f:
f.write(response.content)
match = cast(re.Match, PARAM_REGEX.search(response.text))
full_key, key, v1, v2 = (
match.group(1),
match.group(2),
match.group(3),
match.group(4),
)
decrypted = decrypt_token_and_post_url_page(full_key, key, int(v1), int(v2))
soup = BeautifulSoup(decrypted, PARSER)
form = decrypt_post_form(full_key, key, int(v1), int(v2))
soup = BeautifulSoup(form, PARSER)
post_url = cast(str, cast(Tag, soup.form)["action"])
token_value = cast(str, cast(Tag, soup.input)["value"])
response = CLIENT.post(
post_url,
headers=CLIENT.append_headers({"Referer": download_link}),
cookies=cookies,
headers=CLIENT.append_headers({"Referer": kwik_page_link}),
cookies=response.cookies,
data={"_token": token_value},
allow_redirects=False,
)
Expand Down
19 changes: 13 additions & 6 deletions senpwai/scrapers/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ class FailedTest(Exception):
def __init__(self, msg: str):
super().__init__(msg)


def conditional_print(text: str):
if not SILENT:
print(text)
Expand Down Expand Up @@ -153,9 +154,17 @@ def test_get_episode_page_links(
run_time = get_run_time_later()
test_variables = f"Anime ID: {anime_id}"
anime_id = cast(str, anime_id)
start_page_num, end_page_num, _, first_page = pahe.get_episode_pages_info(anime_page_link, start_episode, end_episode)
start_page_num, end_page_num, _, first_page = pahe.get_episode_pages_info(
anime_page_link, start_episode, end_episode
)
episode_page_links = pahe.GetEpisodePageLinks().get_episode_page_links(
start_episode, end_episode, start_page_num, end_page_num, first_page, anime_page_link, anime_id
start_episode,
end_episode,
start_page_num,
end_page_num,
first_page,
anime_page_link,
anime_id,
)
rt = run_time()
fail_if_list_is_empty(
Expand All @@ -181,9 +190,7 @@ def test_get_download_page_links(
(
pahewin_page,
pahewin_info,
) = pahe.GetPahewinPageLinks().get_pahewin_page_links_and_info(
eps_page_links
)
) = pahe.GetPahewinPageLinks().get_pahewin_page_links_and_info(eps_page_links)
download_page_links, download_info = pahe.bind_sub_or_dub_to_link_info(
sub_or_dub, pahewin_page, pahewin_info
)
Expand Down Expand Up @@ -238,6 +245,7 @@ def fail_if_list_is_empty(
test_variables,
)


def test_getting_direct_download_links(
site: str, download_page_links: list[str], quality: str
) -> list[str]:
Expand Down Expand Up @@ -742,4 +750,3 @@ def print_metadata(metadata: AnimeMetadata):
if __name__ == "__main__":
args = ArgParser(sys.argv)
run_tests(args)

3 changes: 2 additions & 1 deletion senpwai/senpcli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
)
from senpwai.utils.static import (
IS_PIP_INSTALL,
OS,
open_folder,
DUB,
APP_EXE_PATH as SENPWAI_EXE_PATH,
Expand Down Expand Up @@ -609,7 +610,7 @@ def handle_update_check_result(
)
if SENPWAI_IS_INSTALLED:
return print("Update available, install it by updating Senpwai")
if sys.platform == "win32":
if OS.is_windows:
print("Update available, would you like to download and install it? (y/n)")
if input("> ").lower() == "y":
try:
Expand Down
Loading

0 comments on commit cda52d8

Please sign in to comment.