Merge development into master

This commit is contained in:
github-actions[bot] 2025-05-11 16:41:18 +00:00 committed by GitHub
commit 920853daee
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2259 changed files with 88325 additions and 72667 deletions

View file

@ -2,7 +2,7 @@
## Tools required
- Python 3.8.x to 3.11.x (3.10.x is highly recommended and 3.12 or greater is proscribed).
- Python 3.8.x to 3.12.x (3.10.x is highly recommended and 3.13 or greater is proscribed).
- Pycharm or Visual Studio code IDE are recommended but if you're happy with VIM, enjoy it!
- Git.
- UI testing must be done using Chrome latest version.

View file

@ -48,6 +48,7 @@ If you need something that is not already part of Bazarr, feel free to create a
## Supported subtitles providers:
- Addic7ed
- AnimeKalesi
- Animetosho (requires AniDb HTTP API client described [here](https://wiki.anidb.net/HTTP_API_Definition))
- Assrt
- AvistaZ, CinemaZ (Get session cookies using method described [here](https://github.com/morpheus65535/bazarr/pull/2375#issuecomment-2057010996))
@ -87,6 +88,7 @@ If you need something that is not already part of Bazarr, feel free to create a
- Titlovi
- Titrari.ro
- Titulky.com
- Turkcealtyazi.org
- TuSubtitulo
- TVSubtitles
- Whisper (requires [ahmetoner/whisper-asr-webservice](https://github.com/ahmetoner/whisper-asr-webservice))

View file

@ -107,6 +107,22 @@ def check_status():
child_process = start_bazarr()
def is_process_running(pid):
commands = {
"win": ["tasklist", "/FI", f"PID eq {pid}"],
"linux": ["ps", "-eo", "pid"],
"darwin": ["ps", "-ax", "-o", "pid"]
}
# Determine OS and execute corresponding command
for key in commands:
if sys.platform.startswith(key):
result = subprocess.run(commands[key], capture_output=True, text=True)
return str(pid) in result.stdout.split()
print("Unsupported OS")
return False
def interrupt_handler(signum, frame):
# catch and ignore keyboard interrupt Ctrl-C
# the child process Server object will catch SIGINT and perform an orderly shutdown
@ -116,7 +132,9 @@ def interrupt_handler(signum, frame):
interrupted = True
print('Handling keyboard interrupt...')
else:
print("Stop doing that! I heard you the first time!")
if not is_process_running(child_process):
# this will be caught by the main loop below
raise SystemExit(EXIT_INTERRUPT)
if __name__ == '__main__':

View file

@ -3,6 +3,7 @@
from flask_restx import Resource, Namespace, reqparse, fields, marshal
from app.database import TableMovies, database, update, select, func
from radarr.sync.movies import update_one_movie
from subtitles.indexer.movies import list_missing_subtitles_movies, movies_scan_subtitles
from app.event_handler import event_stream
from subtitles.wanted import wanted_search_missing_subtitles_movies
@ -158,7 +159,7 @@ class Movies(Resource):
patch_request_parser = reqparse.RequestParser()
patch_request_parser.add_argument('radarrid', type=int, required=False, help='Radarr movie ID')
patch_request_parser.add_argument('action', type=str, required=False, help='Action to perform from ["scan-disk", '
'"search-missing", "search-wanted"]')
'"search-missing", "search-wanted", "sync"]')
@authenticate
@api_ns_movies.doc(parser=patch_request_parser)
@ -184,5 +185,8 @@ class Movies(Resource):
elif action == "search-wanted":
wanted_search_missing_subtitles_movies()
return '', 204
elif action == "sync":
update_one_movie(radarrid, 'updated', True)
return '', 204
return 'Unknown action', 400

View file

@ -6,6 +6,7 @@ from flask_restx import Resource, Namespace, reqparse, fields, marshal
from functools import reduce
from app.database import get_exclusion_clause, TableEpisodes, TableShows, database, select, update, func
from sonarr.sync.series import update_one_series
from subtitles.indexer.series import list_missing_subtitles, series_scan_subtitles
from subtitles.mass_download import series_download_subtitles
from subtitles.wanted import wanted_search_missing_subtitles_series
@ -198,7 +199,7 @@ class Series(Resource):
patch_request_parser = reqparse.RequestParser()
patch_request_parser.add_argument('seriesid', type=int, required=False, help='Sonarr series ID')
patch_request_parser.add_argument('action', type=str, required=False, help='Action to perform from ["scan-disk", '
'"search-missing", "search-wanted"]')
'"search-missing", "search-wanted", "sync"]')
@authenticate
@api_ns_series.doc(parser=patch_request_parser)
@ -224,5 +225,8 @@ class Series(Resource):
elif action == "search-wanted":
wanted_search_missing_subtitles_series()
return '', 204
elif action == "sync":
update_one_series(seriesid, 'updated')
return '', 204
return 'Unknown action', 400

View file

@ -9,6 +9,7 @@ from .tasks import api_ns_system_tasks
from .logs import api_ns_system_logs
from .status import api_ns_system_status
from .health import api_ns_system_health
from .ping import api_ns_system_ping
from .releases import api_ns_system_releases
from .settings import api_ns_system_settings
from .languages import api_ns_system_languages
@ -25,6 +26,7 @@ api_ns_list_system = [
api_ns_system_languages_profiles,
api_ns_system_logs,
api_ns_system_notifications,
api_ns_system_ping,
api_ns_system_releases,
api_ns_system_searches,
api_ns_system_settings,

View file

@ -44,6 +44,7 @@ class SystemBackups(Resource):
@api_ns_system_backups.response(204, 'Success')
@api_ns_system_backups.response(400, 'Filename not provided')
@api_ns_system_backups.response(401, 'Not Authenticated')
@api_ns_system_backups.response(500, 'Error while restoring backup. Check logs.')
def patch(self):
"""Restore a backup file"""
args = self.patch_request_parser.parse_args()
@ -52,7 +53,10 @@ class SystemBackups(Resource):
restored = prepare_restore(filename)
if restored:
return '', 204
return 'Filename not provided', 400
else:
return 'Error while restoring backup. Check logs.', 500
else:
return 'Filename not provided', 400
delete_request_parser = reqparse.RequestParser()
delete_request_parser.add_argument('filename', type=str, required=True, help='Backups to delete filename')

View file

@ -23,6 +23,20 @@ class SystemLogs(Resource):
'exception': fields.String(),
})
def handle_record(self, logs, multi_line_record):
# finalize the multi line record
if logs:
# update the exception of the last entry
last_log = logs[-1]
last_log["exception"] += "\n".join(multi_line_record)
else:
# multiline record is first entry in log
last_log = dict()
last_log["type"] = "ERROR"
last_log["message"] = "See exception"
last_log["exception"] = "\n".join(multi_line_record)
logs.append(last_log)
@authenticate
@api_ns_system_logs.doc(parser=None)
@api_ns_system_logs.response(200, 'Success')
@ -54,9 +68,13 @@ class SystemLogs(Resource):
include = include.casefold()
exclude = exclude.casefold()
# regular expression to identify the start of a log record (timestamp-based)
record_start_pattern = re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}")
with io.open(get_log_file_path(), encoding='UTF-8') as file:
raw_lines = file.read()
lines = raw_lines.split('|\n')
multi_line_record = []
for line in lines:
if line == '':
continue
@ -86,18 +104,31 @@ class SystemLogs(Resource):
skip = exclude in compare_line
if skip:
continue
raw_message = line.split('|')
raw_message_len = len(raw_message)
if raw_message_len > 3:
log = dict()
log["timestamp"] = raw_message[0]
log["type"] = raw_message[1].rstrip()
log["message"] = raw_message[3]
if raw_message_len > 4 and raw_message[4] != '\n':
log['exception'] = raw_message[4].strip('\'').replace(' ', '\u2003\u2003')
else:
log['exception'] = None
logs.append(log)
# check if the line has a timestamp that matches the start of a new log record
if record_start_pattern.match(line):
if multi_line_record:
self.handle_record(logs, multi_line_record)
# reset for the next multi-line record
multi_line_record = []
raw_message = line.split('|')
raw_message_len = len(raw_message)
if raw_message_len > 3:
log = dict()
log["timestamp"] = raw_message[0]
log["type"] = raw_message[1].rstrip()
log["message"] = raw_message[3]
if raw_message_len > 4 and raw_message[4] != '\n':
log['exception'] = raw_message[4].strip('\'').replace(' ', '\u2003\u2003')
else:
log['exception'] = None
logs.append(log)
else:
# accumulate lines that do not have new record header timestamps
multi_line_record.append(line.strip())
if multi_line_record:
# finalize the multi line record and update the exception of the last entry
self.handle_record(logs, multi_line_record)
logs.reverse()
return marshal(logs, self.get_response_model, envelope='data')

13
bazarr/api/system/ping.py Normal file
View file

@ -0,0 +1,13 @@
# coding=utf-8
from flask_restx import Resource, Namespace
api_ns_system_ping = Namespace('System Ping', description='Unauthenticated endpoint to check Bazarr availability')
@api_ns_system_ping.route('system/ping')
class SystemPing(Resource):
@api_ns_system_ping.response(200, "Success")
def get(self):
"""Return status and http 200"""
return {'status': 'OK'}, 200

View file

@ -22,6 +22,8 @@ api_ns_system_status = Namespace('System Status', description='List environment
@api_ns_system_status.route('system/status')
class SystemStatus(Resource):
@authenticate
@api_ns_system_status.response(200, "Success")
@api_ns_system_status.response(401, 'Not Authenticated')
def get(self):
"""Return environment information and versions"""
package_version = ''

View file

@ -73,8 +73,8 @@ def postprocess(item):
if len(language) > 1:
item['subtitles'][i].update(
{
"forced": language[1] == 'forced',
"hi": language[1] == 'hi',
"forced": language[1].lower() == 'forced',
"hi": language[1].lower() == 'hi',
}
)
if settings.general.embedded_subs_show_desired and item.get('profileId'):

View file

@ -1,6 +1,7 @@
# coding=utf-8
import os
import sys
import hashlib
import requests
import logging
@ -12,10 +13,16 @@ from operator import itemgetter
from app.get_providers import get_enabled_providers
from app.database import TableAnnouncements, database, insert, select
from .get_args import args
from app.config import settings
from app.get_args import args
from sonarr.info import get_sonarr_info
from radarr.info import get_radarr_info
from app.check_update import deprecated_python_version
def upcoming_deprecated_python_version():
# return True if Python version is deprecated
return sys.version_info.major == 2 or (sys.version_info.major == 3 and sys.version_info.minor < 9)
# Announcements as receive by browser must be in the form of a list of dicts converted to JSON
@ -79,10 +86,10 @@ def get_local_announcements():
# opensubtitles.org end-of-life
enabled_providers = get_enabled_providers()
if enabled_providers and 'opensubtitles' in enabled_providers:
if enabled_providers and 'opensubtitles' in enabled_providers and not settings.opensubtitles.vip:
announcements.append({
'text': 'Opensubtitles.org will be deprecated soon, migrate to Opensubtitles.com ASAP and disable this '
'provider to remove this announcement.',
'text': 'Opensubtitles.org is deprecated for non-VIP users, migrate to Opensubtitles.com ASAP and disable '
'this provider to remove this announcement.',
'link': 'https://wiki.bazarr.media/Troubleshooting/OpenSubtitles-migration/',
'dismissible': False,
'timestamp': 1676236978,
@ -106,13 +113,14 @@ def get_local_announcements():
'timestamp': 1679606309,
})
# deprecated Python versions
if deprecated_python_version():
# upcoming deprecated Python versions
if upcoming_deprecated_python_version():
announcements.append({
'text': 'Starting with Bazarr 1.4, support for Python 3.7 will get dropped. Upgrade your current version of'
'text': 'Starting with Bazarr 1.6, support for Python 3.8 will get dropped. Upgrade your current version of'
' Python ASAP to get further updates.',
'link': 'https://wiki.bazarr.media/Troubleshooting/Windows_installer_reinstall/',
'dismissible': False,
'timestamp': 1691162383,
'timestamp': 1744469706,
})
for announcement in announcements:

View file

@ -95,6 +95,7 @@ validators = [
Validator('general.use_postprocessing_threshold_movie', must_exist=True, default=False, is_type_of=bool),
Validator('general.use_sonarr', must_exist=True, default=False, is_type_of=bool),
Validator('general.use_radarr', must_exist=True, default=False, is_type_of=bool),
Validator('general.use_plex', must_exist=True, default=False, is_type_of=bool),
Validator('general.path_mappings_movie', must_exist=True, default=[], is_type_of=list),
Validator('general.serie_tag_enabled', must_exist=True, default=False, is_type_of=bool),
Validator('general.movie_tag_enabled', must_exist=True, default=False, is_type_of=bool),
@ -128,14 +129,15 @@ validators = [
Validator('general.subfolder_custom', must_exist=True, default='', is_type_of=str),
Validator('general.upgrade_subs', must_exist=True, default=True, is_type_of=bool),
Validator('general.upgrade_frequency', must_exist=True, default=12, is_type_of=int,
is_in=[6, 12, 24, ONE_HUNDRED_YEARS_IN_HOURS]),
is_in=[6, 12, 24, 168, ONE_HUNDRED_YEARS_IN_HOURS]),
Validator('general.days_to_upgrade_subs', must_exist=True, default=7, is_type_of=int, gte=0, lte=30),
Validator('general.upgrade_manual', must_exist=True, default=True, is_type_of=bool),
Validator('general.anti_captcha_provider', must_exist=True, default=None, is_type_of=(NoneType, str),
is_in=[None, 'anti-captcha', 'death-by-captcha']),
Validator('general.wanted_search_frequency', must_exist=True, default=6, is_type_of=int, is_in=[6, 12, 24, ONE_HUNDRED_YEARS_IN_HOURS]),
Validator('general.wanted_search_frequency', must_exist=True, default=6, is_type_of=int,
is_in=[6, 12, 24, 168, ONE_HUNDRED_YEARS_IN_HOURS]),
Validator('general.wanted_search_frequency_movie', must_exist=True, default=6, is_type_of=int,
is_in=[6, 12, 24, ONE_HUNDRED_YEARS_IN_HOURS]),
is_in=[6, 12, 24, 168, ONE_HUNDRED_YEARS_IN_HOURS]),
Validator('general.subzero_mods', must_exist=True, default='', is_type_of=str),
Validator('general.dont_notify_manual_actions', must_exist=True, default=False, is_type_of=bool),
Validator('general.hi_extension', must_exist=True, default='hi', is_type_of=str, is_in=['hi', 'cc', 'sdh']),
@ -215,9 +217,21 @@ validators = [
Validator('radarr.defer_search_signalr', must_exist=True, default=False, is_type_of=bool),
Validator('radarr.sync_only_monitored_movies', must_exist=True, default=False, is_type_of=bool),
# plex section
Validator('plex.ip', must_exist=True, default='127.0.0.1', is_type_of=str),
Validator('plex.port', must_exist=True, default=32400, is_type_of=int, gte=1, lte=65535),
Validator('plex.ssl', must_exist=True, default=False, is_type_of=bool),
Validator('plex.apikey', must_exist=True, default='', is_type_of=str),
Validator('plex.movie_library', must_exist=True, default='', is_type_of=str),
Validator('plex.series_library', must_exist=True, default='', is_type_of=str),
Validator('plex.set_movie_added', must_exist=True, default=False, is_type_of=bool),
Validator('plex.set_episode_added', must_exist=True, default=False, is_type_of=bool),
Validator('plex.update_movie_library', must_exist=True, default=False, is_type_of=bool),
Validator('plex.update_series_library', must_exist=True, default=False, is_type_of=bool),
# proxy section
Validator('proxy.type', must_exist=True, default=None, is_type_of=(NoneType, str),
is_in=[None, 'socks5', 'http']),
is_in=[None, 'socks5', 'socks5h', 'http']),
Validator('proxy.url', must_exist=True, default='', is_type_of=str),
Validator('proxy.port', must_exist=True, default='', is_type_of=(str, int)),
Validator('proxy.username', must_exist=True, default='', is_type_of=str, cast=str),
@ -351,6 +365,10 @@ validators = [
# subdl section
Validator('subdl.api_key', must_exist=True, default='', is_type_of=str, cast=str),
# turkcealtyaziorg section
Validator('turkcealtyaziorg.cookies', must_exist=True, default='', is_type_of=str),
Validator('turkcealtyaziorg.user_agent', must_exist=True, default='', is_type_of=str),
# subsync section
Validator('subsync.use_subsync', must_exist=True, default=False, is_type_of=bool),
Validator('subsync.use_subsync_threshold', must_exist=True, default=False, is_type_of=bool),

View file

@ -3,7 +3,22 @@
import os
import argparse
from distutils.util import strtobool
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError(f"invalid truth value {val!r}")
no_update = os.environ.get("NO_UPDATE", "false").strip() == "true"
no_cli = os.environ.get("NO_CLI", "false").strip() == "true"

View file

@ -15,7 +15,7 @@ import re
from requests import ConnectionError
from subzero.language import Language
from subliminal_patch.exceptions import TooManyRequests, APIThrottled, ParseResponseError, IPAddressBlocked, \
MustGetBlacklisted, SearchLimitReached
MustGetBlacklisted, SearchLimitReached, ProviderError
from subliminal.providers.opensubtitles import DownloadLimitReached, PaymentRequired, Unauthorized
from subliminal.exceptions import DownloadLimitExceeded, ServiceUnavailable, AuthenticationError, ConfigurationError
from subliminal import region as subliminal_cache_region
@ -123,6 +123,11 @@ def provider_throttle_map():
"whisperai": {
ConnectionError: (datetime.timedelta(hours=24), "24 hours"),
},
"regielive": {
APIThrottled: (datetime.timedelta(hours=1), "1 hour"),
TooManyRequests: (datetime.timedelta(minutes=5), "5 minutes"),
ProviderError: (datetime.timedelta(minutes=10), "10 minutes"),
},
}
@ -341,6 +346,10 @@ def get_providers_auth():
},
"subdl": {
'api_key': settings.subdl.api_key,
},
'turkcealtyaziorg': {
'cookies': settings.turkcealtyaziorg.cookies,
'user_agent': settings.turkcealtyaziorg.user_agent,
}
}

View file

@ -142,6 +142,7 @@ def configure_logging(debug=False):
logging.getLogger("ffsubsync.subtitle_parser").setLevel(logging.DEBUG)
logging.getLogger("ffsubsync.speech_transformers").setLevel(logging.DEBUG)
logging.getLogger("ffsubsync.ffsubsync").setLevel(logging.DEBUG)
logging.getLogger("ffsubsync.aligners").setLevel(logging.DEBUG)
logging.getLogger("srt").setLevel(logging.DEBUG)
logging.debug('Bazarr version: %s', os.environ["BAZARR_VERSION"])
logging.debug('Bazarr branch: %s', settings.general.branch)
@ -159,6 +160,7 @@ def configure_logging(debug=False):
logging.getLogger("ffsubsync.subtitle_parser").setLevel(logging.ERROR)
logging.getLogger("ffsubsync.speech_transformers").setLevel(logging.ERROR)
logging.getLogger("ffsubsync.ffsubsync").setLevel(logging.ERROR)
logging.getLogger("ffsubsync.aligners").setLevel(logging.ERROR)
logging.getLogger("srt").setLevel(logging.ERROR)
logging.getLogger("SignalRCoreClient").setLevel(logging.CRITICAL)
logging.getLogger("websocket").setLevel(logging.CRITICAL)

View file

@ -6,7 +6,6 @@ import pretty
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.events import EVENT_JOB_SUBMITTED, EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
from datetime import datetime, timedelta
from calendar import day_name
@ -65,11 +64,17 @@ class Scheduler:
def __init__(self):
self.__running_tasks = []
# delete empty TZ environment variable to prevent UserWarning
if os.environ.get("TZ") == "":
del os.environ["TZ"]
try:
self.timezone = get_localzone()
except zoneinfo.ZoneInfoNotFoundError as e:
logging.error(f"BAZARR cannot use specified timezone: {e}")
except zoneinfo.ZoneInfoNotFoundError:
logging.error("BAZARR cannot use the specified timezone and will use UTC instead.")
self.timezone = tz.gettz("UTC")
else:
logging.info(f"Scheduler will use this timezone: {self.timezone}")
self.aps_scheduler = BackgroundScheduler({'apscheduler.timezone': self.timezone})
@ -109,7 +114,7 @@ class Scheduler:
def add_job(self, job, name=None, max_instances=1, coalesce=True, args=None, kwargs=None):
self.aps_scheduler.add_job(
job, DateTrigger(run_date=datetime.now()), name=name, id=name, max_instances=max_instances,
job, 'date', run_date=datetime.now(), name=name, id=name, max_instances=max_instances,
coalesce=coalesce, args=args, kwargs=kwargs)
def execute_job_now(self, taskid):
@ -199,34 +204,34 @@ class Scheduler:
def __sonarr_update_task(self):
if settings.general.use_sonarr:
self.aps_scheduler.add_job(
update_series, IntervalTrigger(minutes=int(settings.sonarr.series_sync)), max_instances=1,
update_series, 'interval', minutes=int(settings.sonarr.series_sync), max_instances=1,
coalesce=True, misfire_grace_time=15, id='update_series', name='Sync with Sonarr',
replace_existing=True)
def __radarr_update_task(self):
if settings.general.use_radarr:
self.aps_scheduler.add_job(
update_movies, IntervalTrigger(minutes=int(settings.radarr.movies_sync)), max_instances=1,
update_movies, 'interval', minutes=int(settings.radarr.movies_sync), max_instances=1,
coalesce=True, misfire_grace_time=15, id='update_movies', name='Sync with Radarr',
replace_existing=True)
def __cache_cleanup_task(self):
self.aps_scheduler.add_job(cache_maintenance, IntervalTrigger(hours=24), max_instances=1, coalesce=True,
self.aps_scheduler.add_job(cache_maintenance, 'interval', hours=24, max_instances=1, coalesce=True,
misfire_grace_time=15, id='cache_cleanup', name='Cache Maintenance')
def __check_health_task(self):
self.aps_scheduler.add_job(check_health, IntervalTrigger(hours=6), max_instances=1, coalesce=True,
self.aps_scheduler.add_job(check_health, 'interval', hours=6, max_instances=1, coalesce=True,
misfire_grace_time=15, id='check_health', name='Check Health')
def __automatic_backup(self):
backup = settings.backup.frequency
if backup == "Daily":
trigger = CronTrigger(hour=settings.backup.hour)
trigger = {'hour': settings.backup.hour}
elif backup == "Weekly":
trigger = CronTrigger(day_of_week=settings.backup.day, hour=settings.backup.hour)
elif backup == "Manually":
trigger = CronTrigger(year=in_a_century())
self.aps_scheduler.add_job(backup_to_zip, trigger,
trigger = {'day_of_week': settings.backup.day, 'hour': settings.backup.hour}
else:
trigger = {'year': in_a_century()}
self.aps_scheduler.add_job(backup_to_zip, 'cron', **trigger,
max_instances=1, coalesce=True, misfire_grace_time=15, id='backup',
name='Backup Database and Configuration File', replace_existing=True)
@ -235,39 +240,39 @@ class Scheduler:
full_update = settings.sonarr.full_update
if full_update == "Daily":
self.aps_scheduler.add_job(
update_all_episodes, CronTrigger(hour=settings.sonarr.full_update_hour), max_instances=1,
update_all_episodes, 'cron', hour=settings.sonarr.full_update_hour, max_instances=1,
coalesce=True, misfire_grace_time=15, id='update_all_episodes',
name='Index All Episode Subtitles from Disk', replace_existing=True)
elif full_update == "Weekly":
self.aps_scheduler.add_job(
update_all_episodes,
CronTrigger(day_of_week=settings.sonarr.full_update_day, hour=settings.sonarr.full_update_hour),
max_instances=1, coalesce=True, misfire_grace_time=15, id='update_all_episodes',
name='Index All Episode Subtitles from Disk', replace_existing=True)
update_all_episodes, 'cron', day_of_week=settings.sonarr.full_update_day,
hour=settings.sonarr.full_update_hour, max_instances=1, coalesce=True, misfire_grace_time=15,
id='update_all_episodes', name='Index All Episode Subtitles from Disk', replace_existing=True)
elif full_update == "Manually":
self.aps_scheduler.add_job(
update_all_episodes, CronTrigger(year=in_a_century()), max_instances=1, coalesce=True,
misfire_grace_time=15, id='update_all_episodes',
name='Index All Episode Subtitles from Disk', replace_existing=True)
update_all_episodes, 'cron', year=in_a_century(), max_instances=1, coalesce=True,
misfire_grace_time=15, id='update_all_episodes', name='Index All Episode Subtitles from Disk',
replace_existing=True)
def __radarr_full_update_task(self):
if settings.general.use_radarr:
full_update = settings.radarr.full_update
if full_update == "Daily":
self.aps_scheduler.add_job(
update_all_movies, CronTrigger(hour=settings.radarr.full_update_hour), max_instances=1,
update_all_movies, 'cron', hour=settings.radarr.full_update_hour, max_instances=1,
coalesce=True, misfire_grace_time=15,
id='update_all_movies', name='Index All Movie Subtitles from Disk', replace_existing=True)
elif full_update == "Weekly":
self.aps_scheduler.add_job(
update_all_movies,
CronTrigger(day_of_week=settings.radarr.full_update_day, hour=settings.radarr.full_update_hour),
'cron', day_of_week=settings.radarr.full_update_day, hour=settings.radarr.full_update_hour,
max_instances=1, coalesce=True, misfire_grace_time=15, id='update_all_movies',
name='Index All Movie Subtitles from Disk', replace_existing=True)
elif full_update == "Manually":
self.aps_scheduler.add_job(
update_all_movies, CronTrigger(year=in_a_century()), max_instances=1, coalesce=True, misfire_grace_time=15,
id='update_all_movies', name='Index All Movie Subtitles from Disk', replace_existing=True)
update_all_movies, 'cron', year=in_a_century(), max_instances=1, coalesce=True,
misfire_grace_time=15, id='update_all_movies', name='Index All Movie Subtitles from Disk',
replace_existing=True)
def __update_bazarr_task(self):
if not args.no_update and os.environ["BAZARR_VERSION"] != '':
@ -275,43 +280,42 @@ class Scheduler:
if settings.general.auto_update:
self.aps_scheduler.add_job(
check_if_new_update, IntervalTrigger(hours=6), max_instances=1, coalesce=True,
check_if_new_update, 'interval', hours=6, max_instances=1, coalesce=True,
misfire_grace_time=15, id='update_bazarr', name=task_name, replace_existing=True)
else:
self.aps_scheduler.add_job(
check_if_new_update, CronTrigger(year=in_a_century()), hour=4, id='update_bazarr', name=task_name,
check_if_new_update, 'cron', year=in_a_century(), hour=4, id='update_bazarr', name=task_name,
replace_existing=True)
self.aps_scheduler.add_job(
check_releases, IntervalTrigger(hours=3), max_instances=1, coalesce=True, misfire_grace_time=15,
check_releases, 'interval', hours=3, max_instances=1, coalesce=True, misfire_grace_time=15,
id='update_release', name='Update Release Info', replace_existing=True)
else:
self.aps_scheduler.add_job(
check_releases, IntervalTrigger(hours=3), max_instances=1, coalesce=True, misfire_grace_time=15,
check_releases, 'interval', hours=3, max_instances=1, coalesce=True, misfire_grace_time=15,
id='update_release', name='Update Release Info', replace_existing=True)
self.aps_scheduler.add_job(
get_announcements_to_file, IntervalTrigger(hours=6), max_instances=1, coalesce=True, misfire_grace_time=15,
get_announcements_to_file, 'interval', hours=6, max_instances=1, coalesce=True, misfire_grace_time=15,
id='update_announcements', name='Update Announcements File', replace_existing=True)
def __search_wanted_subtitles_task(self):
if settings.general.use_sonarr:
self.aps_scheduler.add_job(
wanted_search_missing_subtitles_series,
IntervalTrigger(hours=int(settings.general.wanted_search_frequency)), max_instances=1, coalesce=True,
misfire_grace_time=15, id='wanted_search_missing_subtitles_series', replace_existing=True,
name='Search for Missing Series Subtitles')
wanted_search_missing_subtitles_series, 'interval', hours=int(settings.general.wanted_search_frequency),
max_instances=1, coalesce=True, misfire_grace_time=15, id='wanted_search_missing_subtitles_series',
replace_existing=True, name='Search for Missing Series Subtitles')
if settings.general.use_radarr:
self.aps_scheduler.add_job(
wanted_search_missing_subtitles_movies,
IntervalTrigger(hours=int(settings.general.wanted_search_frequency_movie)), max_instances=1,
coalesce=True, misfire_grace_time=15, id='wanted_search_missing_subtitles_movies',
wanted_search_missing_subtitles_movies, 'interval',
hours=int(settings.general.wanted_search_frequency_movie), max_instances=1, coalesce=True,
misfire_grace_time=15, id='wanted_search_missing_subtitles_movies',
name='Search for Missing Movies Subtitles', replace_existing=True)
def __upgrade_subtitles_task(self):
if settings.general.use_sonarr or settings.general.use_radarr:
self.aps_scheduler.add_job(
upgrade_subtitles, IntervalTrigger(hours=int(settings.general.upgrade_frequency)), max_instances=1,
upgrade_subtitles, 'interval', hours=int(settings.general.upgrade_frequency), max_instances=1,
coalesce=True, misfire_grace_time=15, id='upgrade_subtitles',
name='Upgrade Previously Downloaded Subtitles', replace_existing=True)

View file

@ -64,7 +64,7 @@ class Server:
logging.exception("BAZARR cannot bind to default TCP port (6767) because it's already in use, "
"exiting...")
self.shutdown(EXIT_PORT_ALREADY_IN_USE_ERROR)
elif error.errno == errno.ENOLINK:
elif error.errno in [errno.ENOLINK, errno.EAFNOSUPPORT]:
logging.exception("BAZARR cannot bind to IPv6 (*), trying with 0.0.0.0")
self.address = '0.0.0.0'
self.connected = False
@ -93,8 +93,9 @@ class Server:
def close_all(self):
print("Closing database...")
close_database()
print("Closing webserver...")
self.server.close()
if self.server:
print("Closing webserver...")
self.server.close()
def shutdown(self, status=EXIT_NORMAL):
self.close_all()

View file

@ -27,9 +27,6 @@ from utilities.central import make_bazarr_dir, restart_bazarr, stop_bazarr
global startTime
startTime = time.time()
# restore backup if required
restore_from_backup()
# set subliminal_patch user agent
os.environ["SZ_USER_AGENT"] = f"Bazarr/{os.environ['BAZARR_VERSION']}"
@ -63,6 +60,9 @@ from ga4mp import GtagMP # noqa E402
configure_logging(settings.general.debug or args.debug)
import logging # noqa E402
# restore backup if required
restore_from_backup()
def is_virtualenv():
# return True if Bazarr have been start from within a virtualenv or venv

1
bazarr/plex/__init__.py Normal file
View file

@ -0,0 +1 @@
# coding=utf-8

81
bazarr/plex/operations.py Normal file
View file

@ -0,0 +1,81 @@
# coding=utf-8
from datetime import datetime
from app.config import settings
from plexapi.server import PlexServer
import logging
logger = logging.getLogger(__name__)
# Constants
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
def get_plex_server() -> PlexServer:
"""Connect to the Plex server and return the server instance."""
try:
protocol = "https://" if settings.plex.ssl else "http://"
baseurl = f"{protocol}{settings.plex.ip}:{settings.plex.port}"
return PlexServer(baseurl, settings.plex.apikey)
except Exception as e:
logger.error(f"Failed to connect to Plex server: {e}")
raise
def update_added_date(video, added_date: str) -> None:
"""Update the added date of a video in Plex."""
try:
updates = {"addedAt.value": added_date}
video.edit(**updates)
logger.info(f"Updated added date for {video.title} to {added_date}")
except Exception as e:
logger.error(f"Failed to update added date for {video.title}: {e}")
raise
def plex_set_movie_added_date_now(movie_metadata) -> None:
"""
Update the added date of a movie in Plex to the current datetime.
:param movie_metadata: Metadata object containing the movie's IMDb ID.
"""
try:
plex = get_plex_server()
library = plex.library.section(settings.plex.movie_library)
video = library.getGuid(guid=movie_metadata.imdbId)
current_date = datetime.now().strftime(DATETIME_FORMAT)
update_added_date(video, current_date)
except Exception as e:
logger.error(f"Error in plex_set_movie_added_date_now: {e}")
def plex_set_episode_added_date_now(episode_metadata) -> None:
"""
Update the added date of a TV episode in Plex to the current datetime.
:param episode_metadata: Metadata object containing the episode's IMDb ID, season, and episode number.
"""
try:
plex = get_plex_server()
library = plex.library.section(settings.plex.series_library)
show = library.getGuid(episode_metadata.imdbId)
episode = show.episode(season=episode_metadata.season, episode=episode_metadata.episode)
current_date = datetime.now().strftime(DATETIME_FORMAT)
update_added_date(episode, current_date)
except Exception as e:
logger.error(f"Error in plex_set_episode_added_date_now: {e}")
def plex_update_library(is_movie_library: bool) -> None:
"""
Trigger a library update for the specified library type.
:param is_movie_library: True for movie library, False for series library.
"""
try:
plex = get_plex_server()
library_name = settings.plex.movie_library if is_movie_library else settings.plex.series_library
library = plex.library.section(library_name)
library.update()
logger.info(f"Triggered update for library: {library_name}")
except Exception as e:
logger.error(f"Error in plex_update_library: {e}")

View file

@ -333,16 +333,17 @@ def update_one_movie(movie_id, action, defer_search=False):
logging.debug(
f'BAZARR inserted this movie into the database:{path_mappings.path_replace_movie(movie["path"])}')
# Storing existing subtitles
logging.debug(f'BAZARR storing subtitles for this movie: {path_mappings.path_replace_movie(movie["path"])}')
store_subtitles_movie(movie['path'], path_mappings.path_replace_movie(movie['path']))
# Downloading missing subtitles
if defer_search:
logging.debug(
f'BAZARR searching for missing subtitles is deferred until scheduled task execution for this movie: '
f'{path_mappings.path_replace_movie(movie["path"])}')
else:
logging.debug(
f'BAZARR downloading missing subtitles for this movie: {path_mappings.path_replace_movie(movie["path"])}')
movies_download_subtitles(movie_id)
mapped_movie_path = path_mappings.path_replace_movie(movie["path"])
if os.path.exists(mapped_movie_path):
logging.debug(f'BAZARR downloading missing subtitles for this movie: {mapped_movie_path}')
movies_download_subtitles(movie_id)
else:
logging.debug(f'BAZARR cannot find this file yet (Radarr may be slow to import movie between disks?). '
f'Searching for missing subtitles is deferred until scheduled task execution for this movie: '
f'{mapped_movie_path}')

View file

@ -124,7 +124,7 @@ def movieParser(movie, action, tags_dict, language_profiles, movie_default_profi
parsed_movie = {'radarrId': int(movie["id"]),
'title': movie["title"],
'path': os.path.join(movie["path"], movie['movieFile']['relativePath']),
'path': movie['movieFile']['path'],
'tmdbId': str(movie["tmdbId"]),
'poster': poster,
'fanart': fanart,

View file

@ -16,7 +16,8 @@ def get_profile_list():
f"apikey={apikey_radarr}")
try:
profiles_json = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
profiles_json = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False,
headers=HEADERS)
except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get profiles from Radarr. Connection Error.")
except requests.exceptions.Timeout:
@ -27,14 +28,14 @@ def get_profile_list():
# Parsing data returned from radarr
if get_radarr_info.is_legacy():
for profile in profiles_json.json():
profiles_list.append([profile['id'], profile['language'].capitalize()])
if 'language' in profile:
profiles_list.append([profile['id'], profile['language'].capitalize()])
else:
for profile in profiles_json.json():
profiles_list.append([profile['id'], profile['language']['name'].capitalize()])
if 'language' in profile and 'name' in profile['language']:
profiles_list.append([profile['id'], profile['language']['name'].capitalize()])
return profiles_list
return None
return profiles_list
def get_tags():

View file

@ -258,16 +258,17 @@ def sync_one_episode(episode_id, defer_search=False):
logging.debug(
f'BAZARR inserted this episode into the database:{path_mappings.path_replace(episode["path"])}')
# Storing existing subtitles
logging.debug(f'BAZARR storing subtitles for this episode: {path_mappings.path_replace(episode["path"])}')
store_subtitles(episode['path'], path_mappings.path_replace(episode['path']))
# Downloading missing subtitles
if defer_search:
logging.debug(
f'BAZARR searching for missing subtitles is deferred until scheduled task execution for this episode: '
f'{path_mappings.path_replace(episode["path"])}')
else:
logging.debug(
f'BAZARR downloading missing subtitles for this episode: {path_mappings.path_replace(episode["path"])}')
episode_download_subtitles(episode_id)
mapped_episode_path = path_mappings.path_replace(episode["path"])
if os.path.exists(mapped_episode_path):
logging.debug(f'BAZARR downloading missing subtitles for this episode: {mapped_episode_path}')
episode_download_subtitles(episode_id, send_progress=True)
else:
logging.debug(f'BAZARR cannot find this file yet (Sonarr may be slow to import episode between disks?). '
f'Searching for missing subtitles is deferred until scheduled task execution for this episode'
f': {mapped_episode_path}')

View file

@ -33,14 +33,16 @@ def get_profile_list():
except requests.exceptions.RequestException:
logging.exception("BAZARR Error trying to get profiles from Sonarr.")
return None
# Parsing data returned from Sonarr
if get_sonarr_info.is_legacy():
for profile in profiles_json.json():
profiles_list.append([profile['id'], profile['language'].capitalize()])
else:
for profile in profiles_json.json():
profiles_list.append([profile['id'], profile['name'].capitalize()])
# Parsing data returned from Sonarr
if get_sonarr_info.is_legacy():
for profile in profiles_json.json():
if 'language' in profile:
profiles_list.append([profile['id'], profile['language'].capitalize()])
else:
for profile in profiles_json.json():
if 'name' in profile:
profiles_list.append([profile['id'], profile['name'].capitalize()])
return profiles_list

View file

@ -132,7 +132,7 @@ def store_subtitles(original_path, reversed_path, use_cache=True):
.values(subtitles=str(actual_subtitles))
.where(TableEpisodes.path == original_path))
matching_episodes = database.execute(
select(TableEpisodes.sonarrEpisodeId, TableEpisodes.sonarrSeriesId)
select(TableEpisodes.sonarrEpisodeId)
.where(TableEpisodes.path == original_path))\
.all()

View file

@ -11,6 +11,7 @@ from charset_normalizer import detect
from constants import MAXIMUM_SUBTITLE_SIZE
from app.config import settings
from utilities.path_mappings import path_mappings
from languages.custom_lang import CustomLanguage
def get_external_subtitles_path(file, subtitle):
@ -54,8 +55,7 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
break
if x_found_lang:
if not language:
x_hi = ':hi' in x_found_lang
subtitles[subtitle] = Language.rebuild(Language.fromietf(x_found_lang), hi=x_hi)
subtitles[subtitle] = _get_lang_from_str(x_found_lang)
continue
if not language:
@ -141,3 +141,23 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
None):
subtitles[subtitle] = Language.rebuild(subtitles[subtitle], forced=False, hi=True)
return subtitles
def _get_lang_from_str(x_found_lang):
x_found_lang_split = x_found_lang.split(':')[0]
x_hi = ':hi' in x_found_lang.lower()
x_forced = ':forced' in x_found_lang.lower()
if len(x_found_lang_split) == 2:
x_custom_lang_attr = "alpha2"
elif len(x_found_lang_split) == 3:
x_custom_lang_attr = "alpha3"
else:
x_custom_lang_attr = "language"
x_custom_lang = CustomLanguage.from_value(x_found_lang_split, attr=x_custom_lang_attr)
if x_custom_lang is not None:
return Language.rebuild(x_custom_lang.subzero_language(), hi=x_hi, forced=x_forced)
else:
return Language.rebuild(Language.fromietf(x_found_lang), hi=x_hi, forced=x_forced)

View file

@ -7,10 +7,11 @@ from app.config import settings, sync_checker as _defaul_sync_checker
from utilities.path_mappings import path_mappings
from utilities.post_processing import pp_replace, set_chmod
from languages.get_languages import alpha2_from_alpha3, alpha2_from_language, alpha3_from_language, language_from_alpha3
from app.database import TableEpisodes, TableMovies, database, select
from app.database import TableShows, TableEpisodes, TableMovies, database, select
from utilities.analytics import event_tracker
from radarr.notify import notify_radarr
from sonarr.notify import notify_sonarr
from plex.operations import plex_set_movie_added_date_now, plex_update_library, plex_set_episode_added_date_now
from app.event_handler import event_stream
from .utils import _get_download_code3
@ -76,8 +77,10 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
if media_type == 'series':
episode_metadata = database.execute(
select(TableEpisodes.sonarrSeriesId, TableEpisodes.sonarrEpisodeId)
.where(TableEpisodes.path == path_mappings.path_replace_reverse(path)))\
select(TableShows.imdbId, TableEpisodes.sonarrSeriesId, TableEpisodes.sonarrEpisodeId,
TableEpisodes.season, TableEpisodes.episode)
.join(TableShows)\
.where(TableEpisodes.path == path_mappings.path_replace_reverse(path)))\
.first()
if not episode_metadata:
return
@ -95,8 +98,8 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
sonarr_episode_id=episode_metadata.sonarrEpisodeId)
else:
movie_metadata = database.execute(
select(TableMovies.radarrId)
.where(TableMovies.path == path_mappings.path_replace_reverse_movie(path)))\
select(TableMovies.radarrId, TableMovies.imdbId)
.where(TableMovies.path == path_mappings.path_replace_reverse_movie(path)))\
.first()
if not movie_metadata:
return
@ -115,7 +118,8 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
if use_postprocessing is True:
command = pp_replace(postprocessing_cmd, path, downloaded_path, downloaded_language, downloaded_language_code2,
downloaded_language_code3, audio_language, audio_language_code2, audio_language_code3,
percent_score, subtitle_id, downloaded_provider, uploader, release_info, series_id, episode_id)
percent_score, subtitle_id, downloaded_provider, uploader, release_info, series_id,
episode_id)
if media_type == 'series':
use_pp_threshold = settings.general.use_postprocessing_threshold
@ -139,12 +143,22 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
event_stream(type='series', action='update', payload=episode_metadata.sonarrSeriesId)
event_stream(type='episode-wanted', action='delete',
payload=episode_metadata.sonarrEpisodeId)
if settings.general.use_plex is True:
if settings.plex.update_series_library is True:
plex_update_library(is_movie_library=False)
if settings.plex.set_episode_added is True:
plex_set_episode_added_date_now(episode_metadata)
else:
reversed_path = path_mappings.path_replace_reverse_movie(path)
reversed_subtitles_path = path_mappings.path_replace_reverse_movie(downloaded_path)
notify_radarr(movie_metadata.radarrId)
event_stream(type='movie-wanted', action='delete', payload=movie_metadata.radarrId)
if settings.general.use_plex is True:
if settings.plex.set_movie_added is True:
plex_set_movie_added_date_now(movie_metadata)
if settings.plex.update_movie_library is True:
plex_update_library(is_movie_library=True)
event_tracker.track_subtitles(provider=downloaded_provider, action=action, language=downloaded_language)

View file

@ -27,6 +27,7 @@ def translate_subtitles_file(video_path, source_srt_file, from_lang, to_lang, fo
'zt': 'zh-TW',
}
orig_to_lang = to_lang
to_lang = alpha3_from_alpha2(to_lang)
try:
lang_obj = Language(to_lang)
@ -126,7 +127,7 @@ def translate_subtitles_file(video_path, source_srt_file, from_lang, to_lang, fo
result = ProcessSubtitlesResult(message=message,
reversed_path=prr(video_path),
downloaded_language_code2=to_lang,
downloaded_language_code2=orig_to_lang,
downloaded_provider=None,
score=None,
forced=forced,

View file

@ -7,7 +7,7 @@ import ast
from datetime import datetime, timedelta
from functools import reduce
from sqlalchemy import and_
from sqlalchemy import and_, or_
from app.config import settings
from app.database import get_exclusion_clause, get_audio_profile_languages, TableShows, TableEpisodes, TableMovies, \
@ -118,7 +118,7 @@ def upgrade_subtitles():
episode['seriesTitle'],
'series',
episode['profileId'],
forced_minimum_score=int(episode['score']),
forced_minimum_score=int(episode['score'] or 0),
is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace(
episode['subtitles_path'])))
@ -221,7 +221,7 @@ def upgrade_subtitles():
movie['title'],
'movie',
movie['profileId'],
forced_minimum_score=int(movie['score']),
forced_minimum_score=int(movie['score'] or 0),
is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace_movie(
movie['subtitles_path'])))
@ -293,8 +293,8 @@ def get_upgradable_episode_subtitles():
upgradable_episodes_conditions = [(TableHistory.action.in_(query_actions)),
(TableHistory.timestamp > minimum_timestamp),
TableHistory.score.is_not(None),
(TableHistory.score < 357)]
or_(and_(TableHistory.score.is_(None), TableHistory.action == 6),
(TableHistory.score < 357))]
upgradable_episodes_conditions += get_exclusion_clause('series')
subtitles_to_upgrade = database.execute(
select(TableHistory.id,
@ -316,6 +316,12 @@ def get_upgradable_episode_subtitles():
query_actions_without_upgrade = [x for x in query_actions if x != 3]
upgradable_episode_subtitles = {}
for subtitle_to_upgrade in subtitles_to_upgrade:
# exclude subtitles with ID that as been "upgraded from" and shouldn't be considered (should help prevent
# non-matching hi/non-hi bug)
if database.execute(select(TableHistory.id).where(TableHistory.upgradedFromId == subtitle_to_upgrade.id)).first():
logging.debug(f"Episode subtitle {subtitle_to_upgrade.id} has already been upgraded so we'll skip it.")
continue
# check if we have the original subtitles id in database and use it instead of guessing
if subtitle_to_upgrade.upgradedFromId:
upgradable_episode_subtitles.update({subtitle_to_upgrade.id: subtitle_to_upgrade.upgradedFromId})
@ -371,8 +377,8 @@ def get_upgradable_movies_subtitles():
upgradable_movies_conditions = [(TableHistoryMovie.action.in_(query_actions)),
(TableHistoryMovie.timestamp > minimum_timestamp),
TableHistoryMovie.score.is_not(None),
(TableHistoryMovie.score < 117)]
or_(and_(TableHistoryMovie.score.is_(None), TableHistoryMovie.action == 6),
(TableHistoryMovie.score < 117))]
upgradable_movies_conditions += get_exclusion_clause('movie')
subtitles_to_upgrade = database.execute(
select(TableHistoryMovie.id,
@ -393,6 +399,13 @@ def get_upgradable_movies_subtitles():
query_actions_without_upgrade = [x for x in query_actions if x != 3]
upgradable_movie_subtitles = {}
for subtitle_to_upgrade in subtitles_to_upgrade:
# exclude subtitles with ID that as been "upgraded from" and shouldn't be considered (should help prevent
# non-matching hi/non-hi bug)
if database.execute(
select(TableHistoryMovie.id).where(TableHistoryMovie.upgradedFromId == subtitle_to_upgrade.id)).first():
logging.debug(f"Movie subtitle {subtitle_to_upgrade.id} has already been upgraded so we'll skip it.")
continue
# check if we have the original subtitles id in database and use it instead of guessing
if subtitle_to_upgrade.upgradedFromId:
upgradable_movie_subtitles.update({subtitle_to_upgrade.id: subtitle_to_upgrade.upgradedFromId})

View file

@ -80,7 +80,7 @@ def backup_to_zip():
backupZip.write(database_backup_file, 'bazarr.db')
try:
os.remove(database_backup_file)
except OSError:
except (OSError, FileNotFoundError):
logging.exception(f'Unable to delete temporary database backup file: {database_backup_file}')
else:
logging.debug('Database file is not included in backup. See previous exception')
@ -104,7 +104,7 @@ def restore_from_backup():
try:
shutil.copy(restore_config_path, dest_config_path)
os.remove(restore_config_path)
except OSError:
except (OSError, FileNotFoundError):
logging.exception(f'Unable to restore or delete config file to {dest_config_path}')
else:
if new_config:
@ -116,8 +116,7 @@ def restore_from_backup():
if not settings.postgresql.enabled:
try:
shutil.copy(restore_database_path, dest_database_path)
os.remove(restore_database_path)
except OSError:
except (OSError, FileNotFoundError):
logging.exception(f'Unable to restore or delete db to {dest_database_path}')
else:
try:
@ -125,11 +124,12 @@ def restore_from_backup():
os.remove(f'{dest_database_path}-shm')
if os.path.isfile(f'{dest_database_path}-wal'):
os.remove(f'{dest_database_path}-wal')
except OSError:
except (OSError, FileNotFoundError):
logging.exception('Unable to delete SHM and WAL file.')
try:
os.remove(restore_database_path)
except OSError:
except (OSError, FileNotFoundError):
logging.exception(f'Unable to delete {dest_database_path}')
logging.info('Backup restored successfully. Bazarr will restart.')
@ -144,7 +144,7 @@ def restore_from_backup():
os.remove(restore_config_path)
except FileNotFoundError:
pass
except OSError:
except (OSError, FileNotFoundError):
logging.exception(f'Unable to delete {dest_config_path}')
@ -154,7 +154,7 @@ def prepare_restore(filename):
success = False
try:
shutil.copy(src_zip_file_path, dest_zip_file_path)
except OSError:
except (OSError, FileNotFoundError):
logging.exception(f'Unable to copy backup archive to {dest_zip_file_path}')
else:
try:
@ -162,12 +162,12 @@ def prepare_restore(filename):
zipObj.extractall(path=get_restore_path())
except BadZipFile:
logging.exception(f'Unable to extract files from backup archive {dest_zip_file_path}')
success = True
else:
success = True
finally:
try:
os.remove(dest_zip_file_path)
except OSError:
except (OSError, FileNotFoundError):
logging.exception(f'Unable to delete backup archive {dest_zip_file_path}')
if success:
@ -175,6 +175,8 @@ def prepare_restore(filename):
from app.server import webserver
webserver.restart()
return success
def backup_rotation():
backup_retention = settings.backup.retention
@ -192,7 +194,7 @@ def backup_rotation():
logging.debug(f'Deleting old backup file {file}')
try:
os.remove(file)
except OSError:
except (OSError, FileNotFoundError):
logging.debug(f'Unable to delete backup file {file}')
logging.debug('Finished cleaning up old backup files')
@ -202,7 +204,7 @@ def delete_backup_file(filename):
try:
os.remove(backup_file_path)
return True
except OSError:
except (OSError, FileNotFoundError):
logging.debug(f'Unable to delete backup file {backup_file_path}')
return False

11
custom_libs/imghdr.py Normal file
View file

@ -0,0 +1,11 @@
import filetype
_IMG_MIME = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'image/gif': 'gif'
}
def what(_, img):
img_type = filetype.guess(img)
return _IMG_MIME.get(img_type.mime) if img_type else None

View file

@ -11,7 +11,7 @@ import binascii
import types
import os
from pipes import quote
from shlex import quote
from .lib import find_executable
mswindows = False

View file

@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from pkg_resources import EntryPoint
import re
from importlib.metadata import EntryPoint
from stevedore import ExtensionManager
@ -26,23 +28,23 @@ class RegistrableExtensionManager(ExtensionManager):
self.registered_extensions = []
#: Internal extensions with entry point syntax
self.internal_extensions = internal_extensions
self.internal_extensions = list(internal_extensions)
super(RegistrableExtensionManager, self).__init__(namespace, **kwargs)
super().__init__(namespace, **kwargs)
def list_entry_points(self):
# copy of default extensions
eps = list(super(RegistrableExtensionManager, self).list_entry_points())
eps = list(super().list_entry_points())
# internal extensions
for iep in self.internal_extensions:
ep = EntryPoint.parse(iep)
ep = parse_entry_point(iep, self.namespace)
if ep.name not in [e.name for e in eps]:
eps.append(ep)
# registered extensions
for rep in self.registered_extensions:
ep = EntryPoint.parse(rep)
ep = parse_entry_point(rep, self.namespace)
if ep.name not in [e.name for e in eps]:
eps.append(ep)
@ -58,7 +60,7 @@ class RegistrableExtensionManager(ExtensionManager):
if entry_point in self.registered_extensions:
raise ValueError('Extension already registered')
ep = EntryPoint.parse(entry_point)
ep = parse_entry_point(entry_point, self.namespace)
if ep.name in self.names():
raise ValueError('An extension with the same name already exist')
@ -77,7 +79,7 @@ class RegistrableExtensionManager(ExtensionManager):
if entry_point not in self.registered_extensions:
raise ValueError('Extension not registered')
ep = EntryPoint.parse(entry_point)
ep = parse_entry_point(entry_point, self.namespace)
self.registered_extensions.remove(entry_point)
if self._extensions_by_name is not None:
del self._extensions_by_name[ep.name]
@ -87,6 +89,17 @@ class RegistrableExtensionManager(ExtensionManager):
break
def parse_entry_point(src: str, group: str) -> EntryPoint:
"""Parse a string entry point."""
pattern = re.compile(r'\s*(?P<name>.+?)\s*=\s*(?P<value>.+)')
m = pattern.match(src)
if not m:
msg = "EntryPoint must be in the 'name = module:attrs' format"
raise ValueError(msg, src)
res = m.groupdict()
return EntryPoint(res['name'], res['value'], group)
#: Provider manager
provider_manager = RegistrableExtensionManager('subliminal.providers', [
'addic7ed = subliminal.providers.addic7ed:Addic7edProvider',

View file

@ -45,7 +45,7 @@ movie_scores = {'hash': 119, 'title': 60, 'year': 30, 'release_group': 15,
'source': 7, 'audio_codec': 3, 'resolution': 2, 'video_codec': 2, 'hearing_impaired': 1}
#: Equivalent release groups
equivalent_release_groups = ({'FraMeSToR', 'W4NK3R', 'BHDStudio'}, {'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}, {'AVS', 'SVA'})
equivalent_release_groups = ({'FRAMESTOR', 'W4NK3R', 'BHDSTUDIO'}, {'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}, {'AVS', 'SVA'})
def get_equivalent_release_groups(release_group):

View file

@ -0,0 +1,353 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import re
import io
import os
import zipfile
from random import randint
from typing import Optional, Dict, List, Set
from datetime import datetime, timedelta
from babelfish import Language
from guessit import guessit
from bs4 import BeautifulSoup
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.http import RetryingCFSession
from subliminal.subtitle import fix_line_ending
from subliminal.video import Episode
from subliminal_patch.utils import sanitize, fix_inconsistent_naming
from subzero.language import Language
from subliminal.cache import region
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
logger = logging.getLogger(__name__)
# Cache expiration times
SEARCH_EXPIRATION_TIME = timedelta(hours=1).total_seconds()
def fix_turkish_chars(text: str) -> str:
"""Fix Turkish characters for proper matching."""
if not text:
return ""
tr_chars = {
'İ': 'i', 'I': 'i', 'Ğ': 'g', 'Ü': 'u', 'Ş': 's', 'Ö': 'o', 'Ç': 'c',
'ı': 'i', 'ğ': 'g', 'ü': 'u', 'ş': 's', 'ö': 'o', 'ç': 'c'
}
for tr_char, eng_char in tr_chars.items():
text = text.replace(tr_char, eng_char)
return text
def normalize_series_name(series: str) -> str:
"""Normalize series name for consistent matching."""
if not series:
return ""
# Remove special characters
series = re.sub(r'[^\w\s-]', '', series)
# Replace multiple spaces with single space
series = re.sub(r'\s+', ' ', series)
# Fix Turkish characters
series = fix_turkish_chars(series)
return series.lower().strip()
class AnimeKalesiSubtitle(Subtitle):
"""AnimeKalesi Subtitle."""
provider_name = 'animekalesi'
hearing_impaired_verifiable = False
def __init__(self, language: Language, page_link: str, series: str, season: int, episode: int,
version: str, download_link: str, uploader: str = None, release_group: str = None):
super().__init__(language)
self.page_link = page_link
self.series = series
self.season = season
self.episode = episode
self.version = version
self.download_link = download_link
self.release_info = version
self.matches = set()
self.uploader = uploader
self.release_group = release_group
self.hearing_impaired = False
@property
def id(self) -> str:
return self.download_link
def get_matches(self, video: Episode) -> Set[str]:
matches = set()
# Series name match
if video.series and self.series:
# Direct comparison
if video.series.lower() == self.series.lower():
matches.add('series')
# Normalized comparison
elif normalize_series_name(video.series) == normalize_series_name(self.series):
matches.add('series')
# Alternative series comparison
elif getattr(video, 'alternative_series', None):
for alt_name in video.alternative_series:
if normalize_series_name(alt_name) == normalize_series_name(self.series):
matches.add('series')
break
# Season match
if video.season and self.season == video.season:
matches.add('season')
# Episode match
if video.episode and self.episode == video.episode:
matches.add('episode')
# Release group match
if getattr(video, 'release_group', None) and self.release_group:
if video.release_group.lower() in self.release_group.lower():
matches.add('release_group')
matches |= guess_matches(video, guessit(self.version))
self.matches = matches
return matches
class AnimeKalesiProvider(Provider, ProviderSubtitleArchiveMixin):
"""AnimeKalesi Provider."""
languages = {Language('tur')}
video_types = (Episode,)
server_url = 'https://www.animekalesi.com'
subtitle_class = AnimeKalesiSubtitle
hearing_impaired_verifiable = False
def __init__(self):
self.session = None
super().__init__()
def initialize(self):
self.session = RetryingCFSession()
self.session.headers['User-Agent'] = AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]
self.session.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
self.session.headers['Accept-Language'] = 'tr,en-US;q=0.7,en;q=0.3'
self.session.headers['Connection'] = 'keep-alive'
self.session.headers['Referer'] = self.server_url
logger.info('AnimeKalesi provider initialized')
def terminate(self):
self.session.close()
@region.cache_on_arguments(expiration_time=SEARCH_EXPIRATION_TIME)
def _search_anime_list(self, series: str) -> Optional[Dict[str, str]]:
"""Search for series in anime list."""
if not series:
return None
try:
response = self.session.get(f'{self.server_url}/tum-anime-serileri.html', timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
normalized_search = normalize_series_name(series)
possible_matches = []
for td in soup.select('td#bolumler'):
link = td.find('a')
if not link:
continue
title = link.text.strip()
href = link.get('href', '')
if not href or 'bolumler-' not in href:
continue
normalized_title = normalize_series_name(title)
# Exact match
if normalized_title == normalized_search:
return {'title': title, 'url': href}
# Partial match
if normalized_search in normalized_title or normalized_title in normalized_search:
possible_matches.append({'title': title, 'url': href})
# Return best partial match if no exact match found
if possible_matches:
return possible_matches[0]
except Exception as e:
logger.error('Error searching anime list: %s', e)
return None
def _parse_season_episode(self, title: str) -> tuple:
"""Extract season and episode numbers from title."""
if not title:
return None, None
try:
ep_match = re.search(r'(\d+)\.\s*Bölüm', title)
episode = int(ep_match.group(1)) if ep_match else None
season_match = re.search(r'(\d+)\.\s*Sezon', title)
season = int(season_match.group(1)) if season_match else 1
return season, episode
except (AttributeError, ValueError) as e:
logger.error('Error parsing season/episode from title "%s": %s', title, e)
return None, None
@region.cache_on_arguments(expiration_time=SEARCH_EXPIRATION_TIME)
def _get_episode_list(self, series_url: str) -> Optional[List[Dict[str, str]]]:
"""Get episode list for a series."""
if not series_url:
return None
try:
subtitle_page_url = f'{self.server_url}/{series_url.replace("bolumler-", "altyazib-")}'
response = self.session.get(subtitle_page_url, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
episodes = []
for td in soup.select('td#ayazi_indir'):
link = td.find('a', href=True)
if not link:
continue
if 'indir_bolum-' in link['href'] and 'Bölüm Türkçe Altyazısı' in link.get('title', ''):
episodes.append({
'title': link['title'],
'url': f"{self.server_url}/{link['href']}"
})
return episodes
except Exception as e:
logger.error('Error getting episode list: %s', e)
return None
def query(self, series: str, season: int, episode: int) -> List[AnimeKalesiSubtitle]:
"""Search subtitles from AnimeKalesi."""
if not series or not season or not episode:
return []
subtitles = []
# Find series information
series_data = self._search_anime_list(series)
if not series_data:
logger.debug('Series not found: %s', series)
return subtitles
# Get episode list
episodes = self._get_episode_list(series_data['url'])
if not episodes:
return subtitles
try:
for episode_data in episodes:
title = episode_data['title']
link_url = episode_data['url']
# Extract season and episode numbers
current_season, current_episode = self._parse_season_episode(title)
if current_season is None or current_episode is None:
continue
if current_season == season and current_episode == episode:
try:
# Navigate to subtitle download page
response = self.session.get(link_url, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
# Find download link
subtitle_div = soup.find('div', id='altyazi_indir')
if subtitle_div and subtitle_div.find('a', href=True):
download_link = f"{self.server_url}/{subtitle_div.find('a')['href']}"
# Find uploader information
uploader = None
translator_info = soup.find('strong', text='Altyazı/Çeviri:')
if translator_info and translator_info.parent:
strong_tags = translator_info.parent.find_all('strong')
for i, tag in enumerate(strong_tags):
if tag.text == 'Altyazı/Çeviri:':
if i + 1 < len(strong_tags):
uploader = tag.next_sibling
if uploader:
uploader = uploader.strip()
else:
uploader = tag.next_sibling
if uploader:
uploader = uploader.strip()
break
version = f"{series_data['title']} - S{current_season:02d}E{current_episode:02d}"
if uploader:
version += f" by {uploader}"
try:
subtitle = self.subtitle_class(
Language('tur'),
link_url,
series_data['title'],
current_season,
current_episode,
version,
download_link,
uploader=uploader,
release_group=None
)
subtitles.append(subtitle)
except Exception as e:
logger.error('Error creating subtitle object: %s', e)
continue
except Exception as e:
logger.error('Error processing subtitle page %s: %s', link_url, e)
continue
except Exception as e:
logger.error('Error querying subtitles: %s', e)
return subtitles
def list_subtitles(self, video: Episode, languages: Set[Language]) -> List[AnimeKalesiSubtitle]:
if not video.series or not video.episode:
return []
return self.query(video.series, video.season, video.episode)
def download_subtitle(self, subtitle: AnimeKalesiSubtitle) -> None:
try:
response = self.session.get(subtitle.download_link, timeout=10)
response.raise_for_status()
# Check for ZIP file
if response.content.startswith(b'PK\x03\x04'):
with zipfile.ZipFile(io.BytesIO(response.content)) as zf:
subtitle_files = [f for f in zf.namelist() if f.lower().endswith(('.srt', '.ass'))]
if not subtitle_files:
logger.error('No subtitle file found in ZIP archive')
return
# Select best matching subtitle file
subtitle_file = subtitle_files[0]
if len(subtitle_files) > 1:
for f in subtitle_files:
if subtitle.version.lower() in f.lower():
subtitle_file = f
break
subtitle.content = fix_line_ending(zf.read(subtitle_file))
else:
# Regular subtitle file
subtitle.content = fix_line_ending(response.content)
except Exception as e:
logger.error('Error downloading subtitle: %s', e)

View file

@ -30,15 +30,19 @@ supported_languages = [
"eng", # English
"fin", # Finnish
"fra", # French
"deu", # German
"heb", # Hebrew
"ind", # Indonesian
"ita", # Italian
"jpn", # Japanese
"por", # Portuguese
"pol", # Polish
"rus", # Russian
"spa", # Spanish
"swe", # Swedish
"tha", # Thai
"tur", # Turkish
"vie", # Vietnamese
]

View file

@ -24,6 +24,8 @@ language_converters.register('assrt = subliminal_patch.converters.assrt:AssrtCon
server_url = 'https://api.assrt.net/v1'
supported_languages = list(language_converters['assrt'].to_assrt.keys())
meaningless_videoname = ['不知道']
def get_request_delay(max_request_per_minute):
return ceil(60 / max_request_per_minute)
@ -203,8 +205,21 @@ class AssrtProvider(Provider):
language = Language.fromassrt(match.group('code'))
output_language = search_language_in_list(language, languages)
if output_language:
subtitles.append(AssrtSubtitle(output_language, sub['id'], sub['videoname'], self.session,
self.token, self.max_request_per_minute))
if sub['videoname'] not in meaningless_videoname:
video_name = sub['videoname']
elif 'native_name' in sub and isinstance(sub['native_name'], str):
video_name = sub['native_name']
elif ('native_name' in sub and isinstance(sub['native_name'], list) and
len(sub['native_name']) > 0):
video_name = sub['native_name'][0]
else:
video_name = None
subtitles.append(AssrtSubtitle(language=output_language,
subtitle_id=sub['id'],
video_name=video_name,
session=self.session,
token=self.token,
max_request_per_minute=self.max_request_per_minute))
except:
pass

View file

@ -217,7 +217,6 @@ class AvistazNetworkSubtitle(Subtitle):
super().__init__(language, page_link=page_link)
self.provider_name = provider_name
self.hearing_impaired = None
self.language = language
self.filename = filename
self.release_info = release
self.page_link = page_link

View file

@ -30,7 +30,6 @@ class BSPlayerSubtitle(Subtitle):
def __init__(self, language, filename, subtype, video, link, subid):
super(BSPlayerSubtitle, self).__init__(language)
self.language = language
self.filename = filename
self.page_link = link
self.subtype = subtype

View file

@ -67,7 +67,7 @@ _ALLOWED_CODECS = ("ass", "subrip", "webvtt", "mov_text")
class EmbeddedSubtitlesProvider(Provider):
provider_name = "embeddedsubtitles"
languages = {Language("por", "BR"), Language("spa", "MX")} | {
languages = {Language("por", "BR"), Language("spa", "MX"), Language("zho", "TW")} | {
Language.fromalpha2(l) for l in language_converters["alpha2"].codes
}
languages.update(set(Language.rebuild(lang, hi=True) for lang in languages))
@ -369,7 +369,7 @@ def _basename_callback(path: str):
# TODO: improve this
_SIGNS_LINE_RE = re.compile(r",([\w|_]{,15}(sign|fx|karaoke))", flags=re.IGNORECASE)
_SIGNS_LINE_RE = re.compile(r",([\w|_]{,15}(fx|karaoke))", flags=re.IGNORECASE)
def _clean_ass_subtitles(path, output_path):

View file

@ -36,7 +36,6 @@ class LegendasdivxSubtitle(Subtitle):
def __init__(self, language, video, data, skip_wrong_fps=True):
super(LegendasdivxSubtitle, self).__init__(language)
self.language = language
self.page_link = data['link']
self.hits = data['hits']
self.exact_match = data['exact_match']

View file

@ -449,6 +449,7 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
amount=retry_amount
)
logger.debug(f'params sent to the download endpoint: {res.request.body}')
download_data = res.json()
subtitle.download_link = download_data['link']

View file

@ -17,6 +17,8 @@ from requests.adapters import HTTPAdapter
from subliminal.utils import sanitize
from subliminal_patch.subtitle import guess_matches
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.exceptions import TooManyRequests
try:
from lxml import etree
@ -202,10 +204,12 @@ class PodnapisiProvider(_PodnapisiProvider, ProviderSubtitleArchiveMixin):
# query the server
content = None
try:
content = self.session.get(self.server_url + 'search/old', params=params, timeout=30).content
xml = etree.fromstring(content)
content = self.session.get(self.server_url + 'search/old', params=params, timeout=30)
xml = etree.fromstring(content.content)
except etree.ParseError:
logger.error("Wrong data returned: %r", content)
if '429 Too Many Requests' in content.text:
raise TooManyRequests
logger.error("Wrong data returned: %r", content.text)
break
# exit if no results

View file

@ -4,8 +4,9 @@ import logging
import io
import os
from requests import Session
from requests import Session, JSONDecodeError
from guessit import guessit
from subliminal_patch.exceptions import TooManyRequests, APIThrottled, ProviderError
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.subtitle import SUBTITLE_EXTENSIONS, fix_line_ending
@ -28,7 +29,6 @@ class RegieLiveSubtitle(Subtitle):
self.page_link = link
self.video = video
self.rating = rating
self.language = language
self.release_info = filename
@property
@ -87,13 +87,18 @@ class RegieLiveProvider(Provider):
payload['nume'] = video.title
payload['an'] = video.year
response = self.session.get(
self.url + "?" + urllib.parse.urlencode(payload),
data=payload, headers=self.headers)
response = self.checked(
lambda: self.session.get(
self.url + "?" + urllib.parse.urlencode(payload),
data=payload, headers=self.headers)
)
subtitles = []
if response.status_code == 200:
results = response.json()
try:
results = response.json()
except JSONDecodeError:
raise ProviderError('Unable to parse JSON response')
if len(results) > 0:
results_subs = results['rezultate']
for film in results_subs:
@ -122,9 +127,13 @@ class RegieLiveProvider(Provider):
'Cache-Control': 'no-cache'
}
session.headers.update(_addheaders)
res = session.get('https://subtitrari.regielive.ro')
res = self.checked(
lambda: session.get('https://subtitrari.regielive.ro')
)
cookies = res.cookies
_zipped = session.get(subtitle.page_link, cookies=cookies)
_zipped = self.checked(
lambda: session.get(subtitle.page_link, cookies=cookies, allow_redirects=False)
)
if _zipped:
if _zipped.text == '500':
raise ValueError('Error 500 on server')
@ -135,7 +144,8 @@ class RegieLiveProvider(Provider):
return subtitle
raise ValueError('Problems conecting to the server')
def _get_subtitle_from_archive(self, archive):
@staticmethod
def _get_subtitle_from_archive(archive):
# some files have a non subtitle with .txt extension
_tmp = list(SUBTITLE_EXTENSIONS)
_tmp.remove('.txt')
@ -153,3 +163,27 @@ class RegieLiveProvider(Provider):
return archive.read(name)
raise APIThrottled('Can not find the subtitle in the compressed file')
@staticmethod
def checked(fn):
"""Run :fn: and check the response status before returning it.
:param fn: the function to make an API call to provider.
:return: the response.
"""
response = None
try:
response = fn()
except Exception:
logger.exception('Unhandled exception raised.')
raise ProviderError('Unhandled exception raised. Check log.')
else:
status_code = response.status_code
if status_code == 301:
raise APIThrottled()
elif status_code == 429:
raise TooManyRequests()
return response

View file

@ -31,7 +31,7 @@ class SoustitreseuSubtitle(Subtitle):
provider_name = 'soustitreseu'
def __init__(self, language, video, name, data, content, is_perfect_match):
self.language = language
super().__init__(language)
self.srt_filename = name
self.release_info = name
self.page_link = None

View file

@ -181,7 +181,7 @@ class SubdlProvider(ProviderRetryMixin, Provider):
result = res.json()
if ('success' in result and not result['success']) or ('status' in result and not result['status']):
logger.debug(result["error"])
logger.debug(result)
return []
logger.debug(f"Query returned {len(result['subtitles'])} subtitles")
@ -257,7 +257,7 @@ class SubdlProvider(ProviderRetryMixin, Provider):
retry_timeout=retry_timeout
)
if r.status_code == 429:
if r.status_code == 429 or (r.status_code == 500 and r.text == 'Download limit exceeded'):
raise DownloadLimitExceeded("Daily download limit exceeded")
elif r.status_code == 403:
raise ConfigurationError("Invalid API key")

View file

@ -38,7 +38,6 @@ class SubsynchroSubtitle(Subtitle):
language, hearing_impaired=False, page_link=download_url
)
self.download_url = download_url
self.language = language
self.file_type = file_type
self.release_info = release_info
self.filename = filename

View file

@ -126,7 +126,8 @@ class TitrariProvider(Provider, ProviderSubtitleArchiveMixin):
video_types = (Episode, Movie)
api_url = 'https://www.titrari.ro/'
# query_advanced_search = 'cautarepreaavansata'
query_advanced_search = "maicauta"
# query_advanced_search = "maicauta"
query_advanced_search = "cautamsavedem"
def __init__(self):
self.session = None

View file

@ -66,7 +66,6 @@ class TitulkySubtitle(Subtitle):
self.episode = episode
self.releases = [release_info]
self.release_info = release_info
self.language = language
self.approved = approved
self.page_link = page_link
self.uploader = uploader

View file

@ -0,0 +1,375 @@
# -*- coding: utf-8 -*-
import logging
from random import randint
from datetime import datetime
from subzero.language import Language
from guessit import guessit
from subliminal_patch.http import RetryingCFSession
from subliminal_patch.subtitle import guess_matches
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal.utils import sanitize_release_group
from subliminal.score import get_equivalent_release_groups
from subliminal.subtitle import Subtitle
from subliminal.exceptions import AuthenticationError
from http.cookies import SimpleCookie
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
from .utils import get_archive_from_bytes
from subliminal.providers import ParserBeautifulSoup, Provider
from subliminal.video import Episode, Movie
from dateutil.relativedelta import relativedelta
from requests.cookies import RequestsCookieJar
logger = logging.getLogger(__name__)
class TurkceAltyaziOrgSubtitle(Subtitle):
"""Turkcealtyazi.org Subtitle."""
provider_name = "turkcealtyaziorg"
hearing_impaired_verifiable = True
def __init__(
self,
language,
page_link,
release_info,
uploader,
hearing_impaired=False,
season=None,
episode=None,
is_pack=False,
):
super().__init__(language, hearing_impaired, page_link)
self.season = season
self.episode = episode
if episode:
self.asked_for_episode = True
self.release_info = release_info
self.releases = release_info
self.is_pack = is_pack
self.download_link = page_link
self.uploader = uploader
self.matches = None
# Currently we only search by imdb_id, so this will always be True for now
self.imdb_match = True
@property
def id(self):
id_string = self.page_link
if self.season is not None and self.episode is not None:
episode_string = f"S{self.season:02d}E{self.episode:02d}"
id_string += episode_string
return id_string
def get_matches(self, video):
matches = set()
type_ = "movie" if isinstance(video, Movie) else "episode"
# handle movies and series separately
if type_ == "episode":
# series
matches.add("series")
# season
if video.season == self.season:
matches.add("season")
# episode
if video.episode == self.episode:
matches.add("episode")
# imdb
if self.imdb_match:
matches.add("series_imdb_id")
else:
# imdb
if self.imdb_match:
matches.add("imdb_id")
# release_group
if (
video.release_group
and self.release_info
and any(
r in sanitize_release_group(self.release_info)
for r in get_equivalent_release_groups(
sanitize_release_group(video.release_group)
)
)
):
matches.add("release_group")
# other properties
matches |= guess_matches(video, guessit(self.release_info, {"type": type_}))
self.matches = matches
return matches
class TurkceAltyaziOrgProvider(Provider, ProviderSubtitleArchiveMixin):
"""Turkcealtyazi.org Provider."""
languages = {Language.fromalpha3b("tur"), Language.fromalpha3b("eng")}
video_types = (Episode, Movie)
server_url = "https://turkcealtyazi.org"
server_dl_url = f"{server_url}/ind"
subtitle_class = TurkceAltyaziOrgSubtitle
custom_identifiers = {
# Rip Types
"cps c1": "DVDRip",
"cps c2": "HDRip",
"cps c3": "TVRip",
"rps r1": "HD",
"rps r2": "DVDRip",
"rps r3": "DVDScr",
"rps r4": "R5",
"rps r5": "CAM",
"rps r6": "WEBRip",
"rps r7": "BDRip",
"rps r8": "WEB-DL",
"rps r9": "HDRip",
"rps r10": "HDTS",
"rps r12": "BluRay",
"rip1": "DVDRip",
"rip2": "DVDScr",
"rip3": "WEBRip",
"rip4": "BDRip",
"rip5": "BRRip",
"rip6": "CAM",
"rip7": "HD",
"rip8": "R5",
"rip9": "WEB-DL",
"rip10": "HDRip",
"rip11": "HDTS",
# Languages
"flagtr": "tur",
"flagen": "eng",
"flages": "spa",
"flagfr": "fra",
"flagger": "ger",
"flagita": "ita",
"flagunk": "unknown",
# Turkish time granularity
"dakika": "minutes",
"saat": "hours",
"gün": "days",
"hafta": "weeks",
"ay": "months",
"yıl": "years",
}
def __init__(self, cookies=None, user_agent=None):
self.session = None
self.cookies = cookies
self.user_agent = user_agent
def initialize(self):
self.session = RetryingCFSession()
if self.user_agent and self.user_agent != "":
self.session.headers["User-Agent"] = self.user_agent
else:
self.session.headers["User-Agent"] = AGENT_LIST[
randint(0, len(AGENT_LIST) - 1)
]
self.session.headers["Referer"] = self.server_url
if self.cookies and self.cookies != "":
self.session.cookies = RequestsCookieJar()
simple_cookie = SimpleCookie()
simple_cookie.load(self.cookies)
for k, v in simple_cookie.items():
self.session.cookies.set(k, v.value)
rr = self.session.get(self.server_url, allow_redirects=False, timeout=10)
if rr.status_code == 403:
logger.info("Cookies expired")
raise AuthenticationError("Cookies with User Agent are not valid anymore")
def terminate(self):
self.session.close()
def list_subtitles(self, video, languages):
imdbId = None
subtitles = []
if isinstance(video, Episode):
imdbId = video.series_imdb_id
else:
imdbId = video.imdb_id
if not imdbId:
logger.debug("No imdb number available to search with provider")
return subtitles
# query for subtitles with the imdbId
if isinstance(video, Episode):
subtitles = self.query(
video, languages, imdbId, season=video.season, episode=video.episode
)
else:
subtitles = self.query(video, languages, imdbId)
return subtitles
def query(self, video, languages, imdb_id, season=None, episode=None):
logger.debug("Searching subtitles for %r", imdb_id)
subtitles = []
type_ = "movie" if isinstance(video, Movie) else "episode"
search_link = f"{self.server_url}/find.php?cat=sub&find={imdb_id}"
r = self.session.get(search_link, timeout=30)
# 404 should be returned if the imdb_id was not found, but the site returns 200 but just in case
if r.status_code == 404:
logger.debug("IMDB id {} not found on turkcealtyaziorg".format(imdb_id))
return subtitles
if r.status_code != 200:
r.raise_for_status()
soup_page = ParserBeautifulSoup(
r.content.decode("utf-8", "ignore"), ["html.parser"]
)
# 404 Error is in the meta description if the imdb_id was not found
meta_tag = soup_page.find("meta", {"name": "description"})
if not meta_tag or "404 Error" in meta_tag.attrs.get("content", ""):
logger.debug("IMDB id %s not found on turkcealtyaziorg", imdb_id)
return subtitles
try:
if type_ == "movie":
entries = soup_page.select(
"div.altyazi-list-wrapper > div > div.altsonsez2"
)
else:
entries = soup_page.select(
f"div.altyazi-list-wrapper > div > div.altsonsez1.sezon_{season}"
)
for item in entries:
is_pack = False
sub_page_link = (
self.server_url
+ item.select("div.alisim > div.fl > a")[0].attrs["href"]
)
sub_language = self.custom_identifiers.get(
item.select("div.aldil > span")[0].attrs["class"][0]
)
sub_language = Language.fromalpha3b(sub_language)
if type_ == "episode":
sub_season, sub_episode = [
x.text for x in item.select("div.alcd")[0].find_all("b")
]
sub_season = int(sub_season)
try:
sub_episode = int(sub_episode)
except ValueError:
is_pack = True
sub_uploader_container = item.select("div.alcevirmen")[0]
if sub_uploader_container.text != "":
sub_uploader = sub_uploader_container.text.strip()
else:
sub_uploader = self.custom_identifiers.get(
" ".join(sub_uploader_container.find("span").attrs["class"])
)
_sub_fps = item.select("div.alfps")[0].text
_sub_download_count = item.select("div.alindirme")[0].text
sub_release_info_list = list()
sub_rip_container = item.select("div.ta-container > div.ripdiv")[0]
for sub_rip in sub_rip_container.find_all("span"):
sub_release_info_list.append(
self.custom_identifiers.get(" ".join(sub_rip.attrs["class"]))
)
sub_release_info_list.extend(
x.strip() for x in sub_rip_container.text.strip().split("/")
)
sub_release_info = ",".join(sub_release_info_list)
sub_hearing_impaired = bool(
sub_rip_container.find("img", {"src": "/images/isitme.png"})
)
sub_released_at_string = item.select("div.ta-container > div.datediv")[
0
].text
_sub_released_at = self.get_approximate_time(sub_released_at_string)
if (sub_language in languages) and (
type_ == "movie"
or (sub_season == season)
and (is_pack or sub_episode == episode)
):
subtitle = self.subtitle_class(
sub_language,
sub_page_link,
sub_release_info,
sub_uploader,
hearing_impaired=sub_hearing_impaired,
season=sub_season if type_ == "episode" else None,
episode=(
(episode if is_pack else sub_episode)
if type_ == "episode"
else None
),
is_pack=bool(is_pack),
)
logger.debug("Found subtitle %r", subtitle)
subtitles.append(subtitle)
except Exception as e:
logging.debug(e)
return subtitles
def download_subtitle(self, subtitle: TurkceAltyaziOrgSubtitle):
if not isinstance(subtitle, TurkceAltyaziOrgSubtitle):
return
page_link = subtitle.page_link
sub_page_resp = self.session.get(page_link, timeout=30)
dl_page = ParserBeautifulSoup(
sub_page_resp.content.decode("utf-8", "ignore"),
["html.parser"],
)
idid = dl_page.find("input", {"name": "idid"}).get("value")
altid = dl_page.find("input", {"name": "altid"}).get("value")
sidid = dl_page.find("input", {"name": "sidid"}).get("value")
referer = page_link.encode("utf-8")
dl_resp = self.session.post(
self.server_dl_url,
data={
"idid": idid,
"altid": altid,
"sidid": sidid,
},
headers={"Referer": referer},
timeout=10,
)
if not dl_resp.content:
logger.error("Unable to download subtitle. No data returned from provider")
archive = get_archive_from_bytes(dl_resp.content)
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
def get_approximate_time(self, time_string):
time_string = time_string.strip().replace(" önce", "")
count, granularity = time_string.split(" ")
granularity = self.custom_identifiers[granularity]
count = int(count)
return (datetime.now() - relativedelta(**{granularity: count})).isoformat()

View file

@ -37,7 +37,6 @@ class TuSubtituloSubtitle(Subtitle):
super(TuSubtituloSubtitle, self).__init__(
language, hearing_impaired=False, page_link=sub_dict["download_url"]
)
self.language = language
self.sub_dict = sub_dict
self.release_info = sub_dict["metadata"]
self.found_matches = matches

View file

@ -158,10 +158,18 @@ def encode_audio_stream(path, ffmpeg_path, audio_stream_language=None):
# Use the ISO 639-2 code if available
audio_stream_language = get_ISO_639_2_code(audio_stream_language)
logger.debug(f"Whisper will use the '{audio_stream_language}' audio stream for {path}")
inp = inp[f'a:m:language:{audio_stream_language}']
out, _ = inp.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=16000, af="aresample=async=1") \
.run(cmd=[ffmpeg_path, "-nostdin"], capture_stdout=True, capture_stderr=True)
# 0 = Pick first stream in case there are multiple language streams of the same language,
# otherwise ffmpeg will try to combine multiple streams, but our output format doesn't support that.
# The first stream is probably the correct one, as later streams are usually commentaries
lang_map = f"0:m:language:{audio_stream_language}"
else:
# there is only one stream, so just use that one
lang_map = ""
out, _ = (
inp.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=16000, af="aresample=async=1")
.global_args("-map", lang_map)
.run(cmd=[ffmpeg_path, "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
logger.warning(f"ffmpeg failed to load audio: {e.stderr.decode()}")
@ -272,9 +280,10 @@ class WhisperAIProvider(Provider):
if out == None:
logger.info(f"Whisper cannot detect language of {path} because of missing/bad audio track")
return None
video_name = path if self.pass_video_name else None
r = self.session.post(f"{self.endpoint}/detect-language",
params={'encode': 'false'},
params={'encode': 'false', 'video_file': {video_name}},
files={'audio_file': out},
timeout=(self.response, self.timeout))

View file

@ -45,7 +45,7 @@ class YavkaNetSubtitle(Subtitle):
"""YavkaNet Subtitle."""
provider_name = 'yavkanet'
def __init__(self, language, filename, type, video, link, fps, subs_id_name, subs_id_value):
def __init__(self, language, filename, type, video, link, fps, subs_form_data):
super(YavkaNetSubtitle, self).__init__(language)
self.filename = filename
self.page_link = link
@ -53,8 +53,7 @@ class YavkaNetSubtitle(Subtitle):
self.video = video
self.fps = fps
self.release_info = filename
self.subs_id_name = subs_id_name
self.subs_id_value = subs_id_value
self.subs_form_data = subs_form_data
self.content = None
self._is_valid = False
if fps:
@ -110,7 +109,7 @@ class YavkaNetProvider(Provider):
self.session.headers['User-Agent'] = AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]
self.session.headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
self.session.headers["Accept-Language"] = "en-US,en;q=0.5"
self.session.headers["Accept-Encoding"] = "gzip, deflate, br"
self.session.headers["Accept-Encoding"] = "gzip, deflate"
self.session.headers["DNT"] = "1"
self.session.headers["Connection"] = "keep-alive"
self.session.headers["Upgrade-Insecure-Requests"] = "1"
@ -139,11 +138,11 @@ class YavkaNetProvider(Provider):
logger.debug('No subtitles found')
return subtitles
soup = BeautifulSoup(response.content, 'lxml')
soup = BeautifulSoup(response.content, 'html.parser')
rows = soup.findAll('tr')
# Search on first 25 rows only
for row in rows[:25]:
for row in rows[-50:]:
element = row.select_one('a.balon, a.selector')
if element:
link = element.get('href')
@ -163,20 +162,38 @@ class YavkaNetProvider(Provider):
element = row.find('a', {'class': 'click'})
uploader = element.get_text() if element else None
logger.info('Found subtitle link %r', link)
# slow down to prevent being throttled
time.sleep(1)
response = self.retry(self.session.get('https://yavka.net' + link))
if not response:
continue
soup = BeautifulSoup(response.content, 'lxml')
subs_id = soup.find("input")
if subs_id:
subs_id_name = subs_id['name']
subs_id_value = subs_id['value']
cache_link = 'https://yavka.net' + link + '/'
cache_key = sha1(cache_link.encode("utf-8")).digest()
request = region.get(cache_key)
if request is NO_VALUE:
# slow down to prevent being throttled
time.sleep(randint(0, 1))
response = self.retry(self.session.get('https://yavka.net' + link))
if not response:
logger.info('Subtitle page did not load: %s', link)
continue
soup = BeautifulSoup(response.content, 'html.parser')
post_form = soup.find('form', attrs={'method': 'POST'})
if post_form:
input_fields = post_form.find_all('input')
subs_form_data = {}
for input_field in input_fields:
input_name = input_field.get('name')
if input_name: # Only add to dictionary if the input has a name
subs_form_data[input_name] = input_field.get('value', '')
logger.info('Found subtitle form data "%s" for %s', subs_form_data, link)
else:
logger.info('Could not find subtitle form data: %s', link)
continue
else:
continue
# will fetch from cache
subs_form_data = {}
logger.info('Skipping routines. Will use cache: %s', link)
sub = self.download_archive_and_add_subtitle_files('https://yavka.net' + link + '/', language, video,
fps, subs_id_name, subs_id_value)
fps, subs_form_data)
for s in sub:
s.title = title
s.notes = notes
@ -195,52 +212,48 @@ class YavkaNetProvider(Provider):
else:
seeking_subtitle_file = subtitle.filename
arch = self.download_archive_and_add_subtitle_files(subtitle.page_link, subtitle.language, subtitle.video,
subtitle.fps, subtitle.subs_id_name,
subtitle.subs_id_value)
subtitle.fps, subtitle.subs_form_data)
for s in arch:
if s.filename == seeking_subtitle_file:
subtitle.content = s.content
@staticmethod
def process_archive_subtitle_files(archive_stream, language, video, link, fps, subs_id_name, subs_id_value):
def process_archive_subtitle_files(archive_stream, language, video, link, fps, subs_form_data):
subtitles = []
media_type = 'episode' if isinstance(video, Episode) else 'movie'
for file_name in archive_stream.namelist():
if file_name.lower().endswith(('.srt', '.sub')):
logger.info('Found subtitle file %r', file_name)
subtitle = YavkaNetSubtitle(language, file_name, media_type, video, link, fps, subs_id_name,
subs_id_value)
subtitle = YavkaNetSubtitle(language, file_name, media_type, video, link, fps, subs_form_data)
subtitle.content = fix_line_ending(archive_stream.read(file_name))
subtitles.append(subtitle)
return subtitles
def download_archive_and_add_subtitle_files(self, link, language, video, fps, subs_id_name, subs_id_value):
def download_archive_and_add_subtitle_files(self, link, language, video, fps, subs_form_data):
logger.info('Downloading subtitle %r', link)
cache_key = sha1(link.encode("utf-8")).digest()
request = region.get(cache_key)
if request is NO_VALUE:
time.sleep(1)
request = self.retry(self.session.post(link, data={
subs_id_name: subs_id_value,
'lng': language.basename.upper()
}, headers={
request = self.retry(self.session.post(link, data=subs_form_data, headers={
'referer': link
}, allow_redirects=False))
if not request:
return []
request.raise_for_status()
region.set(cache_key, request)
logger.info('Writing caching file %s for %s', codecs.encode(cache_key, 'hex_codec').decode('utf-8'), link)
else:
logger.info('Cache file: %s', codecs.encode(cache_key, 'hex_codec').decode('utf-8'))
logger.info('Using cache file %s for %s', codecs.encode(cache_key, 'hex_codec').decode('utf-8'), link)
try:
archive_stream = io.BytesIO(request.content)
if is_rarfile(archive_stream):
return self.process_archive_subtitle_files(RarFile(archive_stream), language, video, link, fps,
subs_id_name, subs_id_value)
subs_form_data)
elif is_zipfile(archive_stream):
return self.process_archive_subtitle_files(ZipFile(archive_stream), language, video, link, fps,
subs_id_name, subs_id_value)
subs_form_data)
except:
pass

View file

@ -177,28 +177,41 @@ class ZimukuProvider(Provider):
] # remove ext because it can be an archive type
language = Language("eng")
language_list = []
for img in sub.find("td", class_="tac lang").find_all("img"):
if (
"china" in img.attrs["src"]
and "hongkong" in img.attrs["src"]
):
language = Language("zho").add(Language('zho', 'TW', None))
logger.debug("language:" + str(language))
language = Language("zho").add(Language('zho', 'TW', None))
language_list.append(language)
elif (
"china" in img.attrs["src"]
or "jollyroger" in img.attrs["src"]
):
logger.debug("language chinese simplified found: " + str(language))
language = Language("zho")
language_list.append(language)
elif "hongkong" in img.attrs["src"]:
logger.debug("language chinese traditional found: " + str(language))
language = Language('zho', 'TW', None)
break
language_list.append(language)
sub_page_link = urljoin(self.server_url, a.attrs["href"])
backup_session = copy.deepcopy(self.session)
backup_session.headers["Referer"] = link
subs.append(
self.subtitle_class(language, sub_page_link, name, backup_session, year)
)
# Mark each language of the subtitle as its own subtitle, and add it to the list, when handling archives or subtitles
# with multiple languages to ensure each language is identified as its own subtitle since they are the same archive file
# but will have its own file when downloaded and extracted.
for language in language_list:
subs.append(
self.subtitle_class(language, sub_page_link, name, backup_session, year)
)
return subs

View file

@ -414,7 +414,7 @@ class Subtitle(Subtitle_):
encoding=self.get_encoding())
submods = SubtitleModifications(debug=debug)
if submods.load(content=self.text, language=self.language):
if submods.load(content=self.text, language=self.language, mods=self.mods):
logger.info("Applying mods: %s", self.mods)
submods.modify(*self.mods)
self.mods = submods.mods_used

View file

@ -22,7 +22,7 @@ class SubtitleModifications(object):
language = None
initialized_mods = {}
mods_used = []
only_uppercase = False
mostly_uppercase = False
f = None
font_style_tag_start = u"{\\"
@ -32,15 +32,18 @@ class SubtitleModifications(object):
self.initialized_mods = {}
self.mods_used = []
def load(self, fn=None, content=None, language=None, encoding="utf-8"):
def load(self, fn=None, content=None, language=None, encoding="utf-8", mods=None):
"""
:param encoding: used for decoding the content when fn is given, not used in case content is given
:param language: babelfish.Language language of the subtitle
:param fn: filename
:param content: unicode
:param mods: list of mods to be applied to subtitles
:return:
"""
if mods is None:
mods = []
if language:
self.language = Language.rebuild(language, forced=False)
self.initialized_mods = {}
@ -48,7 +51,11 @@ class SubtitleModifications(object):
if fn:
self.f = pysubs2.load(fn, encoding=encoding)
elif content:
self.f = pysubs2.SSAFile.from_string(content)
from_string_additional_kwargs = {}
if 'remove_tags' not in mods:
from_string_additional_kwargs = {'keep_html_tags': True, 'keep_unknown_html_tags': True,
'keep_ssa_tags': True}
self.f = pysubs2.SSAFile.from_string(content, **from_string_additional_kwargs)
except (IOError,
UnicodeDecodeError,
pysubs2.exceptions.UnknownFPSError,
@ -111,7 +118,7 @@ class SubtitleModifications(object):
identifier, self.language)
continue
if mod_cls.only_uppercase and not self.only_uppercase:
if mod_cls.mostly_uppercase and not self.mostly_uppercase:
if self.debug:
logger.debug("Skipping %s, because the subtitle isn't all uppercase", identifier)
continue
@ -181,41 +188,43 @@ class SubtitleModifications(object):
return line_mods, non_line_mods, used_mods
def detect_uppercase(self):
entries_used = 0
for entry in self.f:
entry_used = False
sub = entry.text
# skip HI bracket entries, those might actually be lowercase
sub = sub.strip()
for processor in registry.mods["remove_HI"].processors[:4]:
sub = processor.process(sub)
MAXIMUM_ENTRIES = 50
MINIMUM_UPPERCASE_PERCENTAGE = 90
MINIMUM_UPPERCASE_COUNT = 100
entry_count = 0
uppercase_count = 0
lowercase_count = 0
if sub.strip():
# only consider alphabetic characters to determine if uppercase
alpha_sub = ''.join([i for i in sub if i.isalpha()])
if alpha_sub and not alpha_sub.isupper():
return False
for entry in self.f:
sub = entry.text
# skip HI bracket entries, those might actually be lowercase
sub = sub.strip()
for processor in registry.mods["remove_HI"].processors[:4]:
sub = processor.process(sub)
entry_used = True
else:
# skip full entry
break
if sub.strip():
uppercase_count += sum(1 for char in sub if char.isupper())
lowercase_count += sum(1 for char in sub if char.islower())
entry_count += 1
if entry_used:
entries_used += 1
if entry_count >= MAXIMUM_ENTRIES:
break
if entries_used == 40:
break
return True
total_character_count = lowercase_count + uppercase_count
if total_character_count > 0 and uppercase_count > MINIMUM_UPPERCASE_COUNT:
uppercase_percentage = uppercase_count * 100 / total_character_count
logger.debug(f"Uppercase mod percentage is {uppercase_percentage:.2f}% vs minimum of {MINIMUM_UPPERCASE_PERCENTAGE}%")
return uppercase_percentage >= MINIMUM_UPPERCASE_PERCENTAGE
return False
def modify(self, *mods):
new_entries = []
start = time.time()
self.only_uppercase = self.detect_uppercase()
self.mostly_uppercase = self.detect_uppercase()
if self.only_uppercase and self.debug:
logger.debug("Full-uppercase subtitle found")
if self.mostly_uppercase and self.debug:
logger.debug("Mostly-uppercase subtitle found")
line_mods, non_line_mods, mods_used = self.prepare_mods(*mods)
self.mods_used = mods_used

View file

@ -19,7 +19,7 @@ class SubtitleModification(object):
order = None
modifies_whole_file = False # operates on the whole file, not individual entries
apply_last = False
only_uppercase = False
mostly_uppercase = False
pre_processors = []
processors = []
post_processors = []

View file

@ -175,7 +175,7 @@ class FixUppercase(SubtitleModification):
modifies_whole_file = True
exclusive = True
order = 41
only_uppercase = True
mostly_uppercase = True
apply_last = True
long_description = "Some subtitles are in all-uppercase letters. This at least makes them readable."

View file

@ -48,8 +48,8 @@ class HearingImpaired(SubtitleTextModification):
else "" if not match.group(1).startswith(" ") else " ",
name="HI_before_colon_noncaps"),
# brackets (only remove if at least 3 chars in brackets)
NReProcessor(re.compile(r'(?sux)-?%(t)s["\']*[([][^([)\]]+?(?=[A-zÀ-ž"\'.]{3,})[^([)\]]+[)\]]["\']*[\s:]*%(t)s' %
# brackets (only remove if at least 3 chars in brackets, allow numbers and spaces inside brackets)
NReProcessor(re.compile(r'(?sux)-?%(t)s["\']*\[(?=[^\[\]]{3,})[A-Za-zÀ-ž0-9\s\'".:-_&+]+[)\]]["\']*[\s:]*%(t)s' %
{"t": TAG}), "", name="HI_brackets"),
#NReProcessor(re.compile(r'(?sux)-?%(t)s[([]%(t)s(?=[A-zÀ-ž"\'.]{3,})[^([)\]]+%(t)s$' % {"t": TAG}),
@ -71,9 +71,19 @@ class HearingImpaired(SubtitleTextModification):
#NReProcessor(re.compile(r'(?um)(^-?\s?[([][A-zÀ-ž-_\s]{3,}[)\]](?:(?=$)|:\s*))'), "",
# name="HI_brackets_special"),
# all caps line (at least 4 consecutive uppercase chars)
NReProcessor(re.compile(r'(?u)(^(?=.*[A-ZÀ-Ž&+]{4,})[A-ZÀ-Ž-_\s&+]+$)'), "", name="HI_all_caps",
supported=lambda p: not p.only_uppercase),
# all caps line (at least 4 consecutive uppercase chars,only remove if line matches common HI cues, otherwise keep)
NReProcessor(
re.compile(r'(?u)(^(?=.*[A-ZÀ-Ž&+]{4,})[A-ZÀ-Ž-_\s&+]+$)'),
lambda m: "" if any(
cue in m.group(1)
for cue in [
"LAUGH", "APPLAU", "CHEER", "MUSIC", "GASP", "SIGHS", "GROAN", "COUGH", "SCREAM", "SHOUT", "WHISPER",
"PHONE", "DOOR", "KNOCK", "FOOTSTEP", "THUNDER", "EXPLOSION", "GUNSHOT", "SIREN"
]
) else m.group(1),
name="HI_all_caps",
supported=lambda p: not p.mostly_uppercase
),
# remove MAN:
NReProcessor(re.compile(r'(?suxi)(\b(?:WO)MAN:\s*)'), "", name="HI_remove_man"),
@ -83,7 +93,7 @@ class HearingImpaired(SubtitleTextModification):
# all caps at start before new sentence
NReProcessor(re.compile(r'(?u)^(?=[A-ZÀ-Ž]{4,})[A-ZÀ-Ž-_\s]+\s([A-ZÀ-Ž][a-zà-ž].+)'), r"\1",
name="HI_starting_upper_then_sentence", supported=lambda p: not p.only_uppercase),
name="HI_starting_upper_then_sentence", supported=lambda p: not p.mostly_uppercase),
]
post_processors = empty_line_post_processors

View file

@ -41,7 +41,7 @@ class FixOCR(SubtitleTextModification):
# don't modify stuff inside quotes
#NReProcessor(re.compile(r'(?u)(^[^"\'’ʼ❜‘‛”“‟„]*(?<=[A-ZÀ-Ž]{3})[A-ZÀ-Ž-_\s0-9]+)'
# r'(["\'’ʼ❜‘‛”“‟„]*[.,‚،⹁、;]+)(\s*)(?!["\'’ʼ❜‘‛”“‟„])'),
# r"\1:\3", name="OCR_fix_HI_colons", supported=lambda p: not p.only_uppercase),
# r"\1:\3", name="OCR_fix_HI_colons", supported=lambda p: not p.mostly_uppercase),
# fix F'bla
NReProcessor(re.compile(r'(?u)(\bF)(\')([A-zÀ-ž]*\b)'), r"\1\3", name="OCR_fix_F"),
WholeLineProcessor(self.data_dict["WholeLines"], name="OCR_replace_line"),

View file

@ -1 +1 @@
20.13
22.13.0

33
frontend/Dockerfile Normal file
View file

@ -0,0 +1,33 @@
# syntax=docker/dockerfile:1
ARG NODE_VERSION=20
FROM node:${NODE_VERSION}-alpine
# Use development node environment by default.
ENV NODE_ENV development
WORKDIR /app
# Copy package.json and package-lock.json to the working directory
COPY package.json package-lock.json ./
# Install dependencies
RUN npm install
# Copy the rest of the source files into the image
COPY . .
# Change ownership of the /app directory to the node user
RUN chown -R node:node /app
# Switch to the node user
USER node
# Ensure node_modules/.bin is in the PATH
ENV PATH /app/node_modules/.bin:$PATH
# Expose the port that the application listens on
EXPOSE 5173
# Run the application
CMD ["npm", "start"]

View file

@ -4,6 +4,7 @@
- Either [Node.js](https://nodejs.org/) installed manually or using [Node Version Manager](https://github.com/nvm-sh/nvm)
- npm (included in Node.js)
- (Optional) [Docker](https://www.docker.com/) for building and running the frontend using a Docker image
> The recommended Node version to use and maintained is managed on the `.nvmrc` file. You can either install manually
> or use `nvm install` followed by `nvm use`.
@ -55,6 +56,36 @@
$ npm start
```
## Building with Docker
You can now build and run the frontend using Docker. Follow these steps:
### Benefits of Using Docker
- **Consistency**: Ensures the app runs in the same environment across all systems.
- **Isolation**: Avoids dependency conflicts with other projects on your machine.
- **Ease of Deployment**: Simplifies the process of deploying the app to production.
### Steps to Build and Run
1. Build the Docker image with the Node.js version specified in `.nvmrc`:
```
$ docker build --build-arg NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "20") -t your-image-name .
```
- The `docker build --build-arg NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "20") -t your-image-name .` argument ensures the Docker image uses the Node.js version specified in the `.nvmrc` file.
2. Run the Docker container:
```
$ docker run -p 5173:5173 your-image-name
```
- Add `.env.development.local` with the path to your environment file if needed.
3. Open the app in your browser at `http://localhost:5173`.
## Available Scripts
In the project directory, you can run:
@ -75,4 +106,4 @@ Builds the app in production mode and save to the `build` folder.
Format code for all files in `frontend` folder
This command will be automatic triggered before any commits to git. Run manually if you modify `.prettierignore` or `.prettierrc`
This command will be automatically triggered before any commits to git. Run manually if you modify `.prettierignore` or `.prettierrc`.

View file

@ -3,7 +3,7 @@ import { dependencies } from "../package.json";
const vendors = [
"react",
"react-router-dom",
"react-router",
"react-dom",
"@tanstack/react-query",
"axios",

View file

@ -1,7 +1,7 @@
/* eslint-disable no-console */
/// <reference types="node" />
import { readFile } from "fs/promises";
import { readFileSync } from "fs";
import { get } from "lodash";
import { parse } from "yaml";
@ -12,9 +12,9 @@ class ConfigReader {
this.config = {};
}
async open(path: string) {
open(path: string) {
try {
const rawConfig = await readFile(path, "utf8");
const rawConfig = readFileSync(path, "utf8");
this.config = parse(rawConfig);
} catch (err) {
// We don't want to catch the error here, handle it on getValue method
@ -33,7 +33,7 @@ class ConfigReader {
}
}
export default async function overrideEnv(env: Record<string, string>) {
export default function overrideEnv(env: Record<string, string>) {
const configPath = env["VITE_BAZARR_CONFIG_FILE"];
if (configPath === undefined) {
@ -41,7 +41,7 @@ export default async function overrideEnv(env: Record<string, string>) {
}
const reader = new ConfigReader();
await reader.open(configPath);
reader.open(configPath);
if (env["VITE_API_KEY"] === undefined) {
try {

File diff suppressed because it is too large Load diff

View file

@ -13,43 +13,43 @@
},
"private": true,
"dependencies": {
"@mantine/core": "^7.14.3",
"@mantine/dropzone": "^7.14.3",
"@mantine/form": "^7.14.3",
"@mantine/hooks": "^7.14.3",
"@mantine/modals": "^7.14.3",
"@mantine/notifications": "^7.14.3",
"@tanstack/react-query": "^5.40.1",
"@mantine/core": "^7.17.4",
"@mantine/dropzone": "^7.17.4",
"@mantine/form": "^7.17.4",
"@mantine/hooks": "^7.17.4",
"@mantine/modals": "^7.17.4",
"@mantine/notifications": "^7.17.4",
"@tanstack/react-query": "^5.64.1",
"@tanstack/react-table": "^8.19.2",
"axios": "^1.7.4",
"axios": "^1.8.2",
"braces": "^3.0.3",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-router-dom": "^6.23.1",
"react": "^19.1.0",
"react-dom": "^19.1.0",
"react-router": "^7.1.1",
"socket.io-client": "^4.7.5"
},
"devDependencies": {
"@fontsource/roboto": "^5.0.12",
"@fortawesome/fontawesome-svg-core": "^6.7.1",
"@fortawesome/free-brands-svg-icons": "^6.7.1",
"@fortawesome/free-regular-svg-icons": "^6.7.1",
"@fortawesome/free-solid-svg-icons": "^6.7.1",
"@fortawesome/fontawesome-svg-core": "^6.7.2",
"@fortawesome/free-brands-svg-icons": "^6.7.2",
"@fortawesome/free-regular-svg-icons": "^6.7.2",
"@fortawesome/free-solid-svg-icons": "^6.7.2",
"@fortawesome/react-fontawesome": "^0.2.2",
"@tanstack/react-query-devtools": "^5.40.1",
"@testing-library/jest-dom": "^6.4.2",
"@testing-library/react": "^15.0.5",
"@testing-library/react": "^16.1.0",
"@testing-library/user-event": "^14.5.2",
"@types/jest": "^29.5.12",
"@types/lodash": "^4.17.1",
"@types/node": "^20.12.6",
"@types/react": "^18.3.11",
"@types/react-dom": "^18.3.0",
"@types/node": "^22.14.1",
"@types/react": "^19.1.2",
"@types/react-dom": "^19.1.2",
"@typescript-eslint/eslint-plugin": "^7.16.0",
"@typescript-eslint/parser": "^7.16.0",
"@vite-pwa/assets-generator": "^0.2.4",
"@vite-pwa/assets-generator": "^1.0.0",
"@vitejs/plugin-react": "^4.2.1",
"@vitest/coverage-v8": "^1.4.0",
"@vitest/ui": "^1.2.2",
"@vitest/coverage-v8": "^3.1.1",
"@vitest/ui": "^3.1.1",
"clsx": "^2.1.0",
"eslint": "^8.57.0",
"eslint-plugin-react-hooks": "^4.6.0",
@ -57,20 +57,21 @@
"eslint-plugin-simple-import-sort": "^12.1.0",
"eslint-plugin-testing-library": "^6.2.0",
"husky": "^9.0.11",
"jsdom": "^24.0.0",
"jsdom": "^26.0.0",
"lodash": "^4.17.21",
"msw": "^2.7.0",
"postcss-preset-mantine": "^1.14.4",
"postcss-simple-vars": "^7.0.1",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^3.2.4",
"pretty-quick": "^4.0.0",
"recharts": "^2.12.7",
"sass": "^1.74.1",
"recharts": "^2.15.0",
"sass-embedded": "^1.86.1",
"typescript": "^5.4.4",
"vite": "^5.4.8",
"vite-plugin-checker": "^0.6.4",
"vite-plugin-pwa": "^0.20.0",
"vitest": "^1.2.2",
"vite": "^6.3.2",
"vite-plugin-checker": "^0.9.1",
"vite-plugin-pwa": "^1.0.0",
"vitest": "^3.1.1",
"yaml": "^2.4.1"
},
"scripts": {

View file

@ -1,9 +1,9 @@
.header {
@include light {
@include mantine.light {
color: var(--mantine-color-gray-0);
}
@include dark {
@include mantine.dark {
color: var(--mantine-color-dark-0);
}
}

View file

@ -2,16 +2,16 @@
border-color: var(--mantine-color-gray-5);
text-decoration: none;
@include dark {
@include mantine.dark {
border-color: var(--mantine-color-dark-5);
}
&.active {
border-left: 2px solid $color-brand-4;
border-left: 2px solid bazarr.$color-brand-4;
background-color: var(--mantine-color-gray-1);
@include dark {
border-left: 2px solid $color-brand-8;
@include mantine.dark {
border-left: 2px solid bazarr.$color-brand-8;
background-color: var(--mantine-color-dark-8);
}
}
@ -19,7 +19,7 @@
&.hover {
background-color: var(--mantine-color-gray-0);
@include dark {
@include mantine.dark {
background-color: var(--mantine-color-dark-7);
}
}
@ -39,7 +39,7 @@
.nav {
background-color: var(--mantine-color-gray-2);
@include dark {
@include mantine.dark {
background-color: var(--mantine-color-dark-8);
}
}
@ -50,7 +50,7 @@
width: 100%;
color: var(--mantine-color-gray-8);
@include dark {
@include mantine.dark {
color: var(--mantine-color-gray-5);
}
}

View file

@ -6,7 +6,7 @@ import React, {
useMemo,
useState,
} from "react";
import { matchPath, NavLink, RouteObject, useLocation } from "react-router-dom";
import { matchPath, NavLink, RouteObject, useLocation } from "react-router";
import {
Anchor,
AppShell,
@ -114,7 +114,10 @@ const AppNavbar: FunctionComponent = () => {
return (
<AppShell.Navbar p="xs" className={styles.nav}>
<Selection.Provider value={{ selection, select }}>
<AppShell.Section grow>
<AppShell.Section
grow
style={{ overflowY: "auto", scrollbarWidth: "none" }}
>
<Stack gap={0}>
{routes.map((route, idx) => (
<RouteItem

View file

@ -1,9 +1,18 @@
import { http } from "msw";
import { HttpResponse } from "msw";
import { describe, it } from "vitest";
import { render } from "@/tests";
import { customRender } from "@/tests";
import server from "@/tests/mocks/node";
import App from ".";
describe("App", () => {
it("should render without crash", () => {
render(<App />);
server.use(
http.get("/api/system/searches", () => {
return HttpResponse.json({});
}),
);
customRender(<App />);
});
});

View file

@ -1,5 +1,5 @@
import { FunctionComponent, useEffect, useState } from "react";
import { Outlet, useNavigate } from "react-router-dom";
import { Outlet, useNavigate } from "react-router";
import { AppShell } from "@mantine/core";
import { useWindowEvent } from "@mantine/hooks";
import { showNotification } from "@mantine/notifications";

View file

@ -1,5 +1,5 @@
import { FunctionComponent, useEffect } from "react";
import { useNavigate } from "react-router-dom";
import { useNavigate } from "react-router";
import { LoadingOverlay } from "@mantine/core";
import { useSystemSettings } from "@/apis/hooks";

View file

@ -5,7 +5,7 @@ import {
useContext,
useMemo,
} from "react";
import { createBrowserRouter, RouterProvider } from "react-router-dom";
import { createBrowserRouter, RouterProvider } from "react-router";
import {
faClock,
faCogs,
@ -34,6 +34,7 @@ import SeriesMassEditor from "@/pages/Series/Editor";
import SettingsGeneralView from "@/pages/Settings/General";
import SettingsLanguagesView from "@/pages/Settings/Languages";
import SettingsNotificationsView from "@/pages/Settings/Notifications";
import SettingsPlexView from "@/pages/Settings/Plex";
import SettingsProvidersView from "@/pages/Settings/Providers";
import SettingsRadarrView from "@/pages/Settings/Radarr";
import SettingsSchedulerView from "@/pages/Settings/Scheduler";
@ -222,6 +223,11 @@ function useRoutes(): CustomRouteObject[] {
name: "Radarr",
element: <SettingsRadarrView></SettingsRadarrView>,
},
{
path: "plex",
name: "Plex",
element: <SettingsPlexView></SettingsPlexView>,
},
{
path: "notifications",
name: "Notifications",
@ -324,7 +330,10 @@ export const Router: FunctionComponent = () => {
// TODO: Move this outside the function component scope
const router = useMemo(
() => createBrowserRouter(routes, { basename: Environment.baseUrl }),
() =>
createBrowserRouter(routes, {
basename: Environment.baseUrl,
}),
[routes],
);

View file

@ -1,4 +1,4 @@
import { RouteObject } from "react-router-dom";
import { RouteObject } from "react-router";
import { IconDefinition } from "@fortawesome/free-solid-svg-icons";
declare namespace Route {

View file

@ -1,4 +1,5 @@
import { useCallback, useEffect, useState } from "react";
import { useSearchParams } from "react-router";
import {
QueryKey,
useQuery,
@ -34,7 +35,12 @@ export function usePaginationQuery<
): UsePaginationQueryResult<TObject> {
const client = useQueryClient();
const [page, setIndex] = useState(0);
const [searchParams] = useSearchParams();
const [page, setIndex] = useState(
searchParams.get("page") ? Number(searchParams.get("page")) - 1 : 0,
);
const pageSize = usePageSize();
const start = page * pageSize;
@ -62,7 +68,14 @@ export function usePaginationQuery<
}
});
}
}, [results.isSuccess, results.data, client, cacheIndividual, queryKey]);
}, [
results.isSuccess,
results.data,
client,
cacheIndividual,
queryKey,
page,
]);
const totalCount = data?.total ?? 0;
const pageCount = Math.ceil(totalCount / pageSize);

View file

@ -1,3 +1,5 @@
@use "mantine" as *;
$color-brand-0: #f8f0fc;
$color-brand-1: #f3d9fa;
$color-brand-2: #eebefa;

View file

@ -1,18 +1,18 @@
$navbar-width: 200;
:export {
colorBrand0: $color-brand-0;
colorBrand1: $color-brand-1;
colorBrand2: $color-brand-2;
colorBrand3: $color-brand-3;
colorBrand4: $color-brand-4;
colorBrand5: $color-brand-5;
colorBrand6: $color-brand-6;
colorBrand7: $color-brand-7;
colorBrand8: $color-brand-8;
colorBrand9: $color-brand-9;
colorBrand0: bazarr.$color-brand-0;
colorBrand1: bazarr.$color-brand-1;
colorBrand2: bazarr.$color-brand-2;
colorBrand3: bazarr.$color-brand-3;
colorBrand4: bazarr.$color-brand-4;
colorBrand5: bazarr.$color-brand-5;
colorBrand6: bazarr.$color-brand-6;
colorBrand7: bazarr.$color-brand-7;
colorBrand8: bazarr.$color-brand-8;
colorBrand9: bazarr.$color-brand-9;
headerHeight: $header-height;
headerHeight: bazarr.$header-height;
navBarWidth: $navbar-width;
}

View file

@ -2,13 +2,13 @@
.root {
--ai-bg: transparent;
@include light {
@include mantine.light {
color: var(--mantine-color-dark-2);
--ai-hover: var(--mantine-color-gray-1);
--ai-hover-color: var(--mantine-color-gray-1);
}
@include dark {
@include mantine.dark {
color: var(--mantine-color-dark-0);
--ai-hover: var(--mantine-color-gray-8);
}

View file

@ -1,44 +1,46 @@
@use "sass:color";
@layer mantine {
.root {
background-color: transparentize($color-brand-6, 0.8);
background-color: color.adjust(bazarr.$color-brand-6, $alpha: -0.8);
&[data-variant="warning"] {
color: lighten($color-warning-2, 0.8);
background-color: transparentize($color-warning-6, 0.8);
color: color.adjust(bazarr.$color-warning-2, $lightness: 80%);
background-color: color.adjust(bazarr.$color-warning-6, $alpha: -0.8);
}
&[data-variant="highlight"] {
color: lighten($color-highlight-2, 1);
background-color: transparentize($color-highlight-5, 0.8);
color: color.adjust(bazarr.$color-highlight-2, $lightness: 100%);
background-color: color.adjust(bazarr.$color-highlight-5, $alpha: -0.8);
}
&[data-variant="disabled"] {
color: lighten($color-disabled-0, 1);
background-color: transparentize($color-disabled-7, 0.8);
color: color.adjust(bazarr.$color-disabled-0, $lightness: 100%);
background-color: color.adjust(bazarr.$color-disabled-7, $alpha: -0.8);
}
&[data-variant="light"] {
color: var(--mantine-color-dark-0);
background-color: transparentize($color-disabled-9, 0.8);
background-color: color.adjust(bazarr.$color-disabled-9, $alpha: -0.8);
}
@include light {
color: $color-brand-6;
background-color: transparentize($color-brand-3, 0.8);
@include mantine.light {
color: bazarr.$color-brand-6;
background-color: color.adjust(bazarr.$color-brand-3, $alpha: -0.8);
&[data-variant="warning"] {
color: darken($color-warning-7, 1);
background-color: transparentize($color-warning-6, 0.8);
color: color.adjust(bazarr.$color-warning-7, $lightness: -100%);
background-color: color.adjust(bazarr.$color-warning-6, $alpha: -0.8);
}
&[data-variant="disabled"] {
color: darken($color-disabled-6, 1);
background-color: transparentize($color-disabled-4, 0.8);
color: color.adjust(bazarr.$color-disabled-6, $lightness: -100%);
background-color: color.adjust(bazarr.$color-disabled-4, $alpha: -0.8);
}
&[data-variant="highlight"] {
color: darken($color-highlight-6, 1);
background-color: transparentize($color-highlight-5, 0.8);
color: color.adjust(bazarr.$color-highlight-6, $lightness: -100%);
background-color: color.adjust(bazarr.$color-highlight-5, $alpha: -0.8);
}
&[data-variant="light"] {

View file

@ -1,6 +1,6 @@
@layer mantine {
.root {
@include dark {
@include mantine.dark {
color: var(--mantine-color-dark-0);
}
@ -11,7 +11,7 @@
}
.root:disabled {
@include dark {
@include mantine.dark {
color: var(--mantine-color-dark-9);
}
}

View file

@ -1,9 +1,9 @@
import { describe, it } from "vitest";
import { Search } from "@/components/index";
import { render } from "@/tests";
import { customRender } from "@/tests";
describe("Search Bar", () => {
it.skip("should render the closed empty state", () => {
render(<Search />);
customRender(<Search />);
});
});

View file

@ -1,5 +1,5 @@
import { FunctionComponent, useMemo, useState } from "react";
import { useNavigate } from "react-router-dom";
import { useNavigate } from "react-router";
import {
ComboboxItem,
em,

View file

@ -11,7 +11,6 @@ type MutateActionProps<DATA, VAR> = Omit<
args: () => VAR | null;
onSuccess?: (args: DATA) => void;
onError?: () => void;
noReset?: boolean;
};
function MutateAction<DATA, VAR>({

View file

@ -10,7 +10,6 @@ type MutateButtonProps<DATA, VAR> = Omit<
args: () => VAR | null;
onSuccess?: (args: DATA) => void;
onError?: () => void;
noReset?: boolean;
};
function MutateButton<DATA, VAR>({

View file

@ -1,5 +1,5 @@
import { describe, it } from "vitest";
import { render, screen } from "@/tests";
import { customRender, screen } from "@/tests";
import { Language } from ".";
describe("Language text", () => {
@ -9,13 +9,13 @@ describe("Language text", () => {
};
it("should show short text", () => {
render(<Language.Text value={testLanguage}></Language.Text>);
customRender(<Language.Text value={testLanguage}></Language.Text>);
expect(screen.getByText(testLanguage.code2)).toBeDefined();
});
it("should show long text", () => {
render(<Language.Text value={testLanguage} long></Language.Text>);
customRender(<Language.Text value={testLanguage} long></Language.Text>);
expect(screen.getByText(testLanguage.name)).toBeDefined();
});
@ -23,7 +23,7 @@ describe("Language text", () => {
const testLanguageWithHi: Language.Info = { ...testLanguage, hi: true };
it("should show short text with HI", () => {
render(<Language.Text value={testLanguageWithHi}></Language.Text>);
customRender(<Language.Text value={testLanguageWithHi}></Language.Text>);
const expectedText = `${testLanguageWithHi.code2}:HI`;
@ -31,7 +31,9 @@ describe("Language text", () => {
});
it("should show long text with HI", () => {
render(<Language.Text value={testLanguageWithHi} long></Language.Text>);
customRender(
<Language.Text value={testLanguageWithHi} long></Language.Text>,
);
const expectedText = `${testLanguageWithHi.name} HI`;
@ -44,7 +46,9 @@ describe("Language text", () => {
};
it("should show short text with Forced", () => {
render(<Language.Text value={testLanguageWithForced}></Language.Text>);
customRender(
<Language.Text value={testLanguageWithForced}></Language.Text>,
);
const expectedText = `${testLanguageWithHi.code2}:Forced`;
@ -52,7 +56,9 @@ describe("Language text", () => {
});
it("should show long text with Forced", () => {
render(<Language.Text value={testLanguageWithForced} long></Language.Text>);
customRender(
<Language.Text value={testLanguageWithForced} long></Language.Text>,
);
const expectedText = `${testLanguageWithHi.name} Forced`;
@ -73,7 +79,7 @@ describe("Language list", () => {
];
it("should show all languages", () => {
render(<Language.List value={elements}></Language.List>);
customRender(<Language.List value={elements}></Language.List>);
elements.forEach((value) => {
expect(screen.getByText(value.name)).toBeDefined();

View file

@ -28,11 +28,11 @@ const FrameRateForm: FunctionComponent<Props> = ({ selections, onSubmit }) => {
},
validate: {
from: FormUtils.validation(
(value) => value > 0,
(value: number) => value > 0,
"The From value must be larger than 0",
),
to: FormUtils.validation(
(value) => value > 0,
(value: number) => value > 0,
"The To value must be larger than 0",
),
},

View file

@ -114,10 +114,10 @@ const MovieUploadForm: FunctionComponent<Props> = ({
})),
},
validate: {
files: FormUtils.validation((values) => {
files: FormUtils.validation((values: SubtitleFile[]) => {
return (
values.find(
(v) =>
(v: SubtitleFile) =>
v.language === null ||
v.validateResult === undefined ||
v.validateResult.state === "error",

View file

@ -1,5 +1,5 @@
.content {
@include smaller-than($mantine-breakpoint-md) {
@include mantine.smaller-than(mantine.$mantine-breakpoint-md) {
padding: 0;
}
}

View file

@ -70,10 +70,10 @@ const ProfileEditForm: FunctionComponent<Props> = ({
initialValues: profile,
validate: {
name: FormUtils.validation(
(value) => value.length > 0,
(value: string) => value.length > 0,
"Must have a name",
),
tag: FormUtils.validation((value) => {
tag: FormUtils.validation((value: string | undefined) => {
if (!value) {
return true;
}
@ -81,7 +81,7 @@ const ProfileEditForm: FunctionComponent<Props> = ({
return /^[a-z_0-9-]+$/.test(value);
}, "Only lowercase alphanumeric characters, underscores (_) and hyphens (-) are allowed"),
items: FormUtils.validation(
(value) => value.length > 0,
(value: Language.ProfileItem[]) => value.length > 0,
"Must contain at least 1 language",
),
},

View file

@ -128,9 +128,9 @@ const SeriesUploadForm: FunctionComponent<Props> = ({
},
validate: {
files: FormUtils.validation(
(values) =>
(values: SubtitleFile[]) =>
values.find(
(v) =>
(v: SubtitleFile) =>
v.language === null ||
v.episode === null ||
v.validateResult === undefined ||

View file

@ -32,11 +32,20 @@ const TimeOffsetForm: FunctionComponent<Props> = ({ selections, onSubmit }) => {
ms: 0,
},
validate: {
hour: FormUtils.validation((v) => v >= 0, "Hour must be larger than 0"),
min: FormUtils.validation((v) => v >= 0, "Minute must be larger than 0"),
sec: FormUtils.validation((v) => v >= 0, "Second must be larger than 0"),
hour: FormUtils.validation(
(v: number) => v >= 0,
"Hour must be larger than 0",
),
min: FormUtils.validation(
(v: number) => v >= 0,
"Minute must be larger than 0",
),
sec: FormUtils.validation(
(v: number) => v >= 0,
"Second must be larger than 0",
),
ms: FormUtils.validation(
(v) => v >= 0,
(v: number) => v >= 0,
"Millisecond must be larger than 0",
),
},

View file

@ -1,7 +1,7 @@
import { faStickyNote } from "@fortawesome/free-regular-svg-icons";
import userEvent from "@testing-library/user-event";
import { describe, it, vitest } from "vitest";
import { render, screen } from "@/tests";
import { customRender, screen } from "@/tests";
import Action from "./Action";
const testLabel = "Test Label";
@ -9,7 +9,7 @@ const testIcon = faStickyNote;
describe("Action button", () => {
it("should be a button", () => {
render(<Action icon={testIcon} label={testLabel}></Action>);
customRender(<Action icon={testIcon} label={testLabel}></Action>);
const element = screen.getByRole("button", { name: testLabel });
expect(element.getAttribute("type")).toEqual("button");
@ -17,7 +17,7 @@ describe("Action button", () => {
});
it("should show icon", () => {
render(<Action icon={testIcon} label={testLabel}></Action>);
customRender(<Action icon={testIcon} label={testLabel}></Action>);
// TODO: use getBy...
const element = screen.getByRole("img", { hidden: true });
@ -27,7 +27,7 @@ describe("Action button", () => {
it("should call on-click event when clicked", async () => {
const onClickFn = vitest.fn();
render(
customRender(
<Action icon={testIcon} label={testLabel} onClick={onClickFn}></Action>,
);

View file

@ -1,6 +1,6 @@
import userEvent from "@testing-library/user-event";
import { describe, it, vitest } from "vitest";
import { render, screen } from "@/tests";
import { customRender, screen } from "@/tests";
import ChipInput from "./ChipInput";
describe("ChipInput", () => {
@ -8,7 +8,7 @@ describe("ChipInput", () => {
// TODO: Support default value
it.skip("should works with default value", () => {
render(<ChipInput defaultValue={existedValues}></ChipInput>);
customRender(<ChipInput defaultValue={existedValues}></ChipInput>);
existedValues.forEach((value) => {
expect(screen.getByText(value)).toBeDefined();
@ -16,7 +16,7 @@ describe("ChipInput", () => {
});
it("should works with value", () => {
render(<ChipInput value={existedValues}></ChipInput>);
customRender(<ChipInput value={existedValues}></ChipInput>);
existedValues.forEach((value) => {
expect(screen.getByText(value)).toBeDefined();
@ -29,7 +29,9 @@ describe("ChipInput", () => {
expect(values).toContain(typedValue);
});
render(<ChipInput value={existedValues} onChange={mockedFn}></ChipInput>);
customRender(
<ChipInput value={existedValues} onChange={mockedFn}></ChipInput>,
);
const element = screen.getByRole("searchbox");

View file

@ -1,6 +1,6 @@
import userEvent from "@testing-library/user-event";
import { describe, it, vitest } from "vitest";
import { render, screen } from "@/tests";
import { customRender, screen } from "@/tests";
import { Selector, SelectorOption } from "./Selector";
const selectorName = "Test Selections";
@ -18,7 +18,9 @@ const testOptions: SelectorOption<string>[] = [
describe("Selector", () => {
describe("options", () => {
it("should work with the SelectorOption", () => {
render(<Selector name={selectorName} options={testOptions}></Selector>);
customRender(
<Selector name={selectorName} options={testOptions}></Selector>,
);
testOptions.forEach((o) => {
expect(screen.getByText(o.label)).toBeDefined();
@ -26,7 +28,9 @@ describe("Selector", () => {
});
it("should display when clicked", async () => {
render(<Selector name={selectorName} options={testOptions}></Selector>);
customRender(
<Selector name={selectorName} options={testOptions}></Selector>,
);
const element = screen.getByTestId("input-selector");
@ -41,7 +45,7 @@ describe("Selector", () => {
it("shouldn't show default value", async () => {
const option = testOptions[0];
render(
customRender(
<Selector
name={selectorName}
options={testOptions}
@ -54,7 +58,7 @@ describe("Selector", () => {
it("shouldn't show value", async () => {
const option = testOptions[0];
render(
customRender(
<Selector
name={selectorName}
options={testOptions}
@ -72,7 +76,7 @@ describe("Selector", () => {
const mockedFn = vitest.fn((value: string | null) => {
expect(value).toEqual(clickedOption.value);
});
render(
customRender(
<Selector
name={selectorName}
options={testOptions}
@ -112,7 +116,7 @@ describe("Selector", () => {
const mockedFn = vitest.fn((value: { name: string } | null) => {
expect(value).toEqual(clickedOption.value);
});
render(
customRender(
<Selector
name={selectorName}
options={objectOptions}
@ -134,7 +138,7 @@ describe("Selector", () => {
describe("placeholder", () => {
it("should show when no selection", () => {
const placeholder = "Empty Selection";
render(
customRender(
<Selector
name={selectorName}
options={testOptions}

View file

@ -9,23 +9,51 @@ import {
Text,
} from "@mantine/core";
import { ColumnDef } from "@tanstack/react-table";
import {
useEpisodeSubtitleModification,
useMovieSubtitleModification,
} from "@/apis/hooks";
import Language from "@/components/bazarr/Language";
import SubtitleToolsMenu from "@/components/SubtitleToolsMenu";
import SimpleTable from "@/components/tables/SimpleTable";
import { withModal } from "@/modules/modals";
import { isMovie } from "@/utilities";
import { useModals, withModal } from "@/modules/modals";
import { task, TaskGroup } from "@/modules/task";
import { fromPython, isMovie, toPython } from "@/utilities";
type SupportType = Item.Episode | Item.Movie;
type TableColumnType = FormType.ModifySubtitle & {
raw_language: Language.Info;
seriesId: number;
name: string;
isMovie: boolean;
};
function getIdAndType(item: SupportType): [number, "episode" | "movie"] {
type LocalisedType = {
id: number;
seriesId: number;
type: "movie" | "episode";
name: string;
isMovie: boolean;
};
function getLocalisedValues(item: SupportType): LocalisedType {
if (isMovie(item)) {
return [item.radarrId, "movie"];
return {
seriesId: 0,
id: item.radarrId,
type: "movie",
name: item.title,
isMovie: true,
};
} else {
return [item.sonarrEpisodeId, "episode"];
return {
seriesId: item.sonarrSeriesId,
id: item.sonarrEpisodeId,
type: "episode",
name: item.title,
isMovie: false,
};
}
}
@ -41,6 +69,11 @@ const SubtitleToolView: FunctionComponent<SubtitleToolViewProps> = ({
payload,
}) => {
const [selections, setSelections] = useState<TableColumnType[]>([]);
const { remove: removeEpisode, download: downloadEpisode } =
useEpisodeSubtitleModification();
const { download: downloadMovie, remove: removeMovie } =
useMovieSubtitleModification();
const modals = useModals();
const columns = useMemo<ColumnDef<TableColumnType>[]>(
() => [
@ -109,17 +142,22 @@ const SubtitleToolView: FunctionComponent<SubtitleToolViewProps> = ({
const data = useMemo<TableColumnType[]>(
() =>
payload.flatMap((item) => {
const [id, type] = getIdAndType(item);
const { seriesId, id, type, name, isMovie } = getLocalisedValues(item);
return item.subtitles.flatMap((v) => {
if (v.path) {
return [
{
id,
seriesId,
type,
language: v.code2,
path: v.path,
// eslint-disable-next-line camelcase
raw_language: v,
name,
hi: toPython(v.forced),
forced: toPython(v.hi),
isMovie,
},
];
} else {
@ -143,7 +181,51 @@ const SubtitleToolView: FunctionComponent<SubtitleToolViewProps> = ({
></SimpleTable>
<Divider></Divider>
<Group>
<SubtitleToolsMenu selections={selections}>
<SubtitleToolsMenu
selections={selections}
onAction={(action) => {
selections.forEach((selection) => {
const actionPayload = {
form: {
language: selection.language,
hi: fromPython(selection.hi),
forced: fromPython(selection.forced),
path: selection.path,
},
radarrId: 0,
seriesId: 0,
episodeId: 0,
};
if (selection.isMovie) {
actionPayload.radarrId = selection.id;
} else {
actionPayload.seriesId = selection.seriesId;
actionPayload.episodeId = selection.id;
}
const download = selection.isMovie
? downloadMovie
: downloadEpisode;
const remove = selection.isMovie ? removeMovie : removeEpisode;
if (action === "search") {
task.create(
selection.name,
TaskGroup.SearchSubtitle,
download.mutateAsync,
actionPayload,
);
} else if (action === "delete" && selection.path) {
task.create(
selection.name,
TaskGroup.DeleteSubtitle,
remove.mutateAsync,
actionPayload,
);
}
});
modals.closeAll();
}}
>
<Button disabled={selections.length === 0} variant="light">
Select Action
</Button>

View file

@ -1,4 +1,4 @@
import { FunctionComponent, useEffect } from "react";
import { FunctionComponent } from "react";
import { Group, Pagination, Text } from "@mantine/core";
import { useIsLoading } from "@/contexts";
@ -23,11 +23,6 @@ const PageControl: FunctionComponent<Props> = ({
const isLoading = useIsLoading();
// Jump to first page if total page count changes
useEffect(() => {
goto(0);
}, [total, goto]);
return (
<Group p={16} justify="space-between">
<Text size="sm">
@ -37,7 +32,9 @@ const PageControl: FunctionComponent<Props> = ({
size="sm"
color={isLoading ? "gray" : "primary"}
value={index + 1}
onChange={(page) => goto(page - 1)}
onChange={(page) => {
return goto(page - 1);
}}
hidden={count <= 1}
total={count}
></Pagination>

View file

@ -1,4 +1,5 @@
import { useEffect } from "react";
import { useSearchParams } from "react-router";
import { UsePaginationQueryResult } from "@/apis/queries/hooks";
import SimpleTable, { SimpleTableProps } from "@/components/tables/SimpleTable";
import { LoadingProvider } from "@/contexts";
@ -18,6 +19,8 @@ export default function QueryPageTable<T extends object>(props: Props<T>) {
controls: { gotoPage },
} = query;
const [searchParams, setSearchParams] = useSearchParams();
useEffect(() => {
ScrollToTop();
}, [page]);
@ -30,7 +33,13 @@ export default function QueryPageTable<T extends object>(props: Props<T>) {
index={page}
size={pageSize}
total={totalCount}
goto={gotoPage}
goto={(page) => {
searchParams.set("page", (page + 1).toString());
setSearchParams(searchParams, { replace: true });
gotoPage(page);
}}
></PageControl>
</LoadingProvider>
);

Some files were not shown because too many files have changed in this diff Show more