mirror of
https://github.com/clinton-hall/nzbToMedia.git
synced 2025-03-12 12:35:28 -07:00
Ignore line-length limits to better identify areas to be refactored.
This commit is contained in:
parent
1938fcc66a
commit
bea41c3f2e
nzb2media
__init__.py
auto_process
configuration.pydatabases.pyextractor
github_api.pymain_db.pymanagers
nzb
plugins
processor
scene_exceptions.pytorrent
transcoder.pyuser_scripts.pyutils
common.pydownload_info.pyencoding.pyfiles.pyidentification.pylinks.pynaming.pynetwork.pynzb.pyparsers.pypaths.pyprocesses.pytorrent.py
version_check.py
File diff suppressed because one or more lines are too long
@ -5,6 +5,7 @@ import logging
|
||||
import requests
|
||||
|
||||
import nzb2media
|
||||
import nzb2media.utils.common
|
||||
from nzb2media.auto_process.common import ProcessResult
|
||||
from nzb2media.utils.encoding import convert_to_ascii
|
||||
from nzb2media.utils.network import server_responding
|
||||
@ -14,85 +15,42 @@ log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def process(
|
||||
*,
|
||||
section: str,
|
||||
dir_name: str,
|
||||
input_name: str = '',
|
||||
status: int = 0,
|
||||
client_agent: str = 'manual',
|
||||
download_id: str = '',
|
||||
input_category: str = '',
|
||||
failure_link: str = '',
|
||||
) -> ProcessResult:
|
||||
def process(*, section: str, dir_name: str, input_name: str = '', input_category: str = '', **kwargs) -> ProcessResult:
|
||||
log.debug(f'Unused kwargs: {kwargs}')
|
||||
# Get configuration
|
||||
if nzb2media.CFG is None:
|
||||
raise RuntimeError('Configuration not loaded.')
|
||||
cfg = nzb2media.CFG[section][input_category]
|
||||
|
||||
# Base URL
|
||||
ssl = int(cfg.get('ssl', 0))
|
||||
scheme = 'https' if ssl else 'http'
|
||||
host = cfg['host']
|
||||
port = cfg['port']
|
||||
web_root = cfg.get('web_root', '')
|
||||
|
||||
# Authentication
|
||||
apikey = cfg.get('apikey', '')
|
||||
|
||||
# Params
|
||||
remote_path = int(cfg.get('remote_path', 0))
|
||||
|
||||
# Misc
|
||||
|
||||
# Begin processing
|
||||
url = nzb2media.utils.common.create_url(scheme, host, port, web_root)
|
||||
if not server_responding(url):
|
||||
log.error('Server did not respond. Exiting')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - {section} did not respond.',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.')
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
|
||||
params = {
|
||||
'apikey': apikey,
|
||||
'cmd': 'forceProcess',
|
||||
'dir': remote_dir(dir_name) if remote_path else dir_name,
|
||||
}
|
||||
|
||||
params = {'apikey': apikey, 'cmd': 'forceProcess', 'dir': remote_dir(dir_name) if remote_path else dir_name}
|
||||
log.debug(f'Opening URL: {url} with params: {params}')
|
||||
|
||||
try:
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 300))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to connect to '
|
||||
f'{section}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}')
|
||||
log.debug(response.text)
|
||||
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{response.status_code}',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}')
|
||||
if response.text == 'OK':
|
||||
log.debug(
|
||||
f'SUCCESS: ForceProcess for {dir_name} has been started in LazyLibrarian',
|
||||
)
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
log.debug(f'SUCCESS: ForceProcess for {dir_name} has been started in LazyLibrarian')
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
log.error(f'FAILED: ForceProcess of {dir_name} has Failed in LazyLibrarian')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.')
|
||||
|
@ -6,6 +6,7 @@ import os
|
||||
import requests
|
||||
|
||||
import nzb2media
|
||||
import nzb2media.utils.common
|
||||
from nzb2media.auto_process.common import ProcessResult
|
||||
from nzb2media.utils.encoding import convert_to_ascii
|
||||
from nzb2media.utils.network import server_responding
|
||||
@ -15,104 +16,58 @@ log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def process(
|
||||
*,
|
||||
section: str,
|
||||
dir_name: str,
|
||||
input_name: str = '',
|
||||
status: int = 0,
|
||||
client_agent: str = 'manual',
|
||||
download_id: str = '',
|
||||
input_category: str = '',
|
||||
failure_link: str = '',
|
||||
) -> ProcessResult:
|
||||
def process(*, section: str, dir_name: str, input_name: str = '', input_category: str = '', status: int = 0, **kwargs) -> ProcessResult:
|
||||
log.debug(f'Unused kwargs: {kwargs}')
|
||||
# Get configuration
|
||||
if nzb2media.CFG is None:
|
||||
raise RuntimeError('Configuration not loaded.')
|
||||
cfg = nzb2media.CFG[section][input_category]
|
||||
|
||||
# Base URL
|
||||
ssl = int(cfg.get('ssl', 0))
|
||||
scheme = 'https' if ssl else 'http'
|
||||
host = cfg['host']
|
||||
port = cfg['port']
|
||||
web_root = cfg.get('web_root', '')
|
||||
|
||||
# Authentication
|
||||
apikey = cfg.get('apikey', '')
|
||||
|
||||
# Params
|
||||
remote_path = int(cfg.get('remote_path', 0))
|
||||
|
||||
# Misc
|
||||
apc_version = '2.04'
|
||||
comicrn_version = '1.01'
|
||||
|
||||
# Begin processing
|
||||
url = nzb2media.utils.common.create_url(scheme, host, port, web_root)
|
||||
if not server_responding(url):
|
||||
log.error('Server did not respond. Exiting')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - {section} did not respond.',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.')
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
clean_name, ext = os.path.splitext(input_name)
|
||||
if len(ext) == 4: # we assume this was a standard extension.
|
||||
input_name = clean_name
|
||||
|
||||
params = {
|
||||
'cmd': 'forceProcess',
|
||||
'apikey': apikey,
|
||||
'nzb_folder': remote_dir(dir_name) if remote_path else dir_name,
|
||||
}
|
||||
|
||||
params = {'cmd': 'forceProcess', 'apikey': apikey, 'nzb_folder': remote_dir(dir_name) if remote_path else dir_name}
|
||||
if input_name is not None:
|
||||
params['nzb_name'] = input_name
|
||||
params['failed'] = int(status)
|
||||
params['apc_version'] = apc_version
|
||||
params['comicrn_version'] = comicrn_version
|
||||
|
||||
success = False
|
||||
|
||||
log.debug(f'Opening URL: {url}')
|
||||
try:
|
||||
response = requests.post(
|
||||
url, params=params, stream=True, verify=False, timeout=(30, 300),
|
||||
)
|
||||
response = requests.post(url, params=params, stream=True, verify=False, timeout=(30, 300))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to connect to '
|
||||
f'{section}',
|
||||
)
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}')
|
||||
log.debug(response.text)
|
||||
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{response.status_code}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}')
|
||||
for line in response.text.split('\n'):
|
||||
if line:
|
||||
log.debug(line)
|
||||
if 'Post Processing SUCCESSFUL' in line:
|
||||
success = True
|
||||
|
||||
if success:
|
||||
log.debug('SUCCESS: This issue has been processed successfully')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
log.warning(
|
||||
'The issue does not appear to have successfully processed. '
|
||||
'Please check your Logs',
|
||||
)
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from '
|
||||
f'{section} was not as expected.',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
log.warning('The issue does not appear to have successfully processed. ' 'Please check your Logs')
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.')
|
||||
|
@ -34,22 +34,11 @@ class ProcessResult(typing.NamedTuple):
|
||||
|
||||
def command_complete(url, params, headers, section):
|
||||
try:
|
||||
respone = requests.get(
|
||||
url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 60),
|
||||
)
|
||||
respone = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL: {url}')
|
||||
return None
|
||||
if respone.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
if respone.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {respone.status_code}')
|
||||
return None
|
||||
try:
|
||||
@ -63,22 +52,11 @@ def command_complete(url, params, headers, section):
|
||||
|
||||
def completed_download_handling(url2, headers, section='MAIN'):
|
||||
try:
|
||||
response = requests.get(
|
||||
url2,
|
||||
params={},
|
||||
headers=headers,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 60),
|
||||
)
|
||||
response = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL: {url2}')
|
||||
return False
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return False
|
||||
try:
|
||||
|
@ -7,6 +7,7 @@ import shutil
|
||||
import requests
|
||||
|
||||
import nzb2media
|
||||
import nzb2media.utils.common
|
||||
from nzb2media.auto_process.common import ProcessResult
|
||||
from nzb2media.utils.encoding import convert_to_ascii
|
||||
from nzb2media.utils.network import server_responding
|
||||
@ -15,71 +16,39 @@ log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def process(
|
||||
*,
|
||||
section: str,
|
||||
dir_name: str,
|
||||
input_name: str = '',
|
||||
status: int = 0,
|
||||
client_agent: str = 'manual',
|
||||
download_id: str = '',
|
||||
input_category: str = '',
|
||||
failure_link: str = '',
|
||||
) -> ProcessResult:
|
||||
def process(*, section: str, dir_name: str, input_name: str = '', status: int = 0, input_category: str = '', **kwargs) -> ProcessResult:
|
||||
log.debug(f'Unused kwargs: {kwargs}')
|
||||
# Get configuration
|
||||
if nzb2media.CFG is None:
|
||||
raise RuntimeError('Configuration not loaded.')
|
||||
cfg = nzb2media.CFG[section][input_category]
|
||||
|
||||
# Base URL
|
||||
ssl = int(cfg.get('ssl', 0))
|
||||
scheme = 'https' if ssl else 'http'
|
||||
host = cfg['host']
|
||||
port = cfg['port']
|
||||
web_root = cfg.get('web_root', '')
|
||||
|
||||
# Authentication
|
||||
apikey = cfg.get('apikey', '')
|
||||
|
||||
# Params
|
||||
|
||||
# Misc
|
||||
library = cfg.get('library')
|
||||
|
||||
# Begin processing
|
||||
url = nzb2media.utils.common.create_url(scheme, host, port, web_root)
|
||||
if not server_responding(url):
|
||||
log.error('Server did not respond. Exiting')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - {section} did not respond.',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.')
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
|
||||
fields = input_name.split('-')
|
||||
|
||||
gamez_id = fields[0].replace('[', '').replace(']', '').replace(' ', '')
|
||||
|
||||
download_status = 'Downloaded' if status == 0 else 'Wanted'
|
||||
|
||||
params = {
|
||||
'api_key': apikey,
|
||||
'mode': 'UPDATEREQUESTEDSTATUS',
|
||||
'db_id': gamez_id,
|
||||
'status': download_status,
|
||||
}
|
||||
|
||||
params = {'api_key': apikey, 'mode': 'UPDATEREQUESTEDSTATUS', 'db_id': gamez_id, 'status': download_status}
|
||||
log.debug(f'Opening URL: {url}')
|
||||
|
||||
try:
|
||||
resposne = requests.get(url, params=params, verify=False, timeout=(30, 300))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to connect to '
|
||||
f'{section}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}')
|
||||
result = resposne.json()
|
||||
log.debug(result)
|
||||
if library:
|
||||
@ -88,33 +57,15 @@ def process(
|
||||
shutil.move(dir_name, os.path.join(library, input_name))
|
||||
except Exception:
|
||||
log.error(f'Unable to move {dir_name} to {os.path.join(library, input_name)}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to move files',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to move files')
|
||||
else:
|
||||
log.error('No library specified to move files to. Please edit your configuration.')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - No library defined in '
|
||||
f'{section}',
|
||||
)
|
||||
|
||||
if resposne.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - No library defined in {section}')
|
||||
if resposne.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {resposne.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{resposne.status_code}',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {resposne.status_code}')
|
||||
if result['success']:
|
||||
log.debug(f'SUCCESS: Status for {gamez_id} has been set to {download_status} in Gamez')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
log.error(f'FAILED: Status for {gamez_id} has NOT been updated in Gamez')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.')
|
||||
|
@ -8,6 +8,7 @@ import time
|
||||
import requests
|
||||
|
||||
import nzb2media
|
||||
import nzb2media.utils.common
|
||||
from nzb2media import transcoder
|
||||
from nzb2media.auto_process.common import ProcessResult
|
||||
from nzb2media.auto_process.common import command_complete
|
||||
@ -30,38 +31,24 @@ log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def process(
|
||||
*,
|
||||
section: str,
|
||||
dir_name: str,
|
||||
input_name: str = '',
|
||||
status: int = 0,
|
||||
client_agent: str = 'manual',
|
||||
download_id: str = '',
|
||||
input_category: str = '',
|
||||
failure_link: str = '',
|
||||
) -> ProcessResult:
|
||||
def process(*, section: str, dir_name: str, input_name: str = '', status: int = 0, client_agent: str = 'manual', download_id: str = '', input_category: str = '', failure_link: str = '') -> ProcessResult:
|
||||
# Get configuration
|
||||
if nzb2media.CFG is None:
|
||||
raise RuntimeError('Configuration not loaded.')
|
||||
cfg = nzb2media.CFG[section][input_category]
|
||||
|
||||
# Base URL
|
||||
ssl = int(cfg.get('ssl', 0))
|
||||
scheme = 'https' if ssl else 'http'
|
||||
host = cfg['host']
|
||||
port = cfg['port']
|
||||
web_root = cfg.get('web_root', '')
|
||||
|
||||
# Authentication
|
||||
apikey = cfg.get('apikey', '')
|
||||
omdbapikey = cfg.get('omdbapikey', '')
|
||||
|
||||
# Params
|
||||
delete_failed = int(cfg.get('delete_failed', 0))
|
||||
remote_path = int(cfg.get('remote_path', 0))
|
||||
wait_for = int(cfg.get('wait_for', 2))
|
||||
|
||||
# Misc
|
||||
if status > 0 and nzb2media.NOEXTRACTFAILED:
|
||||
extract = 0
|
||||
@ -75,7 +62,6 @@ def process(
|
||||
method = cfg.get('method', None)
|
||||
if section != 'CouchPotato':
|
||||
method = None
|
||||
|
||||
# Begin processing
|
||||
imdbid = find_imdbid(dir_name, input_name, omdbapikey)
|
||||
if section == 'CouchPotato':
|
||||
@ -100,10 +86,7 @@ def process(
|
||||
release = None
|
||||
else:
|
||||
log.error('Server did not respond. Exiting')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - {section} did not respond.',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.')
|
||||
# pull info from release found if available
|
||||
release_id = None
|
||||
media_id = None
|
||||
@ -118,48 +101,29 @@ def process(
|
||||
release_status_old = release[release_id]['status']
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not os.path.isdir(dir_name) and os.path.isfile(
|
||||
dir_name,
|
||||
): # If the input directory is a file, assume single file download and split dir/name.
|
||||
if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name.
|
||||
dir_name = os.path.split(os.path.normpath(dir_name))[0]
|
||||
|
||||
specific_path = os.path.join(dir_name, str(input_name))
|
||||
clean_name = os.path.splitext(specific_path)
|
||||
if clean_name[1] == '.nzb':
|
||||
specific_path = clean_name[0]
|
||||
if os.path.isdir(specific_path):
|
||||
dir_name = specific_path
|
||||
|
||||
process_all_exceptions(input_name, dir_name)
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
|
||||
if (
|
||||
not list_media_files(
|
||||
dir_name, media=True, audio=False, meta=False, archives=False,
|
||||
)
|
||||
and list_media_files(
|
||||
dir_name, media=False, audio=False, meta=False, archives=True,
|
||||
)
|
||||
and extract
|
||||
):
|
||||
if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract:
|
||||
log.debug(f'Checking for archives to extract in directory: {dir_name}')
|
||||
extract_files(dir_name)
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
|
||||
good_files = 0
|
||||
valid_files = 0
|
||||
num_files = 0
|
||||
# Check video files for corruption
|
||||
for video in list_media_files(
|
||||
dir_name, media=True, audio=False, meta=False, archives=False,
|
||||
):
|
||||
for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
|
||||
num_files += 1
|
||||
if transcoder.is_video_good(video, status):
|
||||
good_files += 1
|
||||
if not nzb2media.REQUIRE_LAN or transcoder.is_video_good(
|
||||
video, status, require_lan=nzb2media.REQUIRE_LAN,
|
||||
):
|
||||
if not nzb2media.REQUIRE_LAN or transcoder.is_video_good(video, status, require_lan=nzb2media.REQUIRE_LAN):
|
||||
valid_files += 1
|
||||
import_subs(video)
|
||||
rename_subs(dir_name)
|
||||
@ -170,18 +134,13 @@ def process(
|
||||
elif num_files and valid_files < num_files:
|
||||
log.info('Status shown as success from Downloader, but corrupt video files found. Setting as failed.')
|
||||
status = 1
|
||||
if (
|
||||
'NZBOP_VERSION' in os.environ
|
||||
and os.environ['NZBOP_VERSION'][0:5] >= '14.0'
|
||||
):
|
||||
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
|
||||
print('[NZB] MARK=BAD')
|
||||
if good_files == num_files:
|
||||
log.debug(f'Video marked as failed due to missing required language: {nzb2media.REQUIRE_LAN}')
|
||||
else:
|
||||
log.debug('Video marked as failed due to missing playable audio or video')
|
||||
if (
|
||||
good_files < num_files and failure_link
|
||||
): # only report corrupt files
|
||||
if good_files < num_files and failure_link: # only report corrupt files
|
||||
failure_link += '&corrupt=true'
|
||||
elif client_agent == 'manual':
|
||||
log.warning(f'No media files found in directory {dir_name} to manually process.')
|
||||
@ -192,141 +151,76 @@ def process(
|
||||
else:
|
||||
log.warning(f'No media files found in directory {dir_name}. Processing this as a failed download')
|
||||
status = 1
|
||||
if (
|
||||
'NZBOP_VERSION' in os.environ
|
||||
and os.environ['NZBOP_VERSION'][0:5] >= '14.0'
|
||||
):
|
||||
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
|
||||
print('[NZB] MARK=BAD')
|
||||
|
||||
if status == 0:
|
||||
if nzb2media.TRANSCODE == 1:
|
||||
result, new_dir_name = transcoder.transcode_directory(dir_name)
|
||||
if result == 0:
|
||||
log.debug(f'Transcoding succeeded for files in {dir_name}')
|
||||
dir_name = new_dir_name
|
||||
|
||||
log.debug(f'Config setting \'chmodDirectory\' currently set to {oct(chmod_directory)}')
|
||||
if chmod_directory:
|
||||
log.info(f'Attempting to set the octal permission of \'{oct(chmod_directory)}\' on directory \'{dir_name}\'')
|
||||
rchmod(dir_name, chmod_directory)
|
||||
else:
|
||||
log.error(f'Transcoding failed for files in {dir_name}')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Failed to post-process - Transcoding failed',
|
||||
status_code=1,
|
||||
)
|
||||
for video in list_media_files(
|
||||
dir_name, media=True, audio=False, meta=False, archives=False,
|
||||
):
|
||||
return ProcessResult(message=f'{section}: Failed to post-process - Transcoding failed', status_code=1)
|
||||
for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
|
||||
if not release and '.cp(tt' not in video and imdbid:
|
||||
video_name, video_ext = os.path.splitext(video)
|
||||
video2 = f'{video_name}.cp({imdbid}){video_ext}'
|
||||
if not (
|
||||
client_agent in [nzb2media.TORRENT_CLIENT_AGENT, 'manual']
|
||||
and nzb2media.USE_LINK == 'move-sym'
|
||||
):
|
||||
if not (client_agent in [nzb2media.TORRENT_CLIENT_AGENT, 'manual'] and nzb2media.USE_LINK == 'move-sym'):
|
||||
log.debug(f'Renaming: {video} to: {video2}')
|
||||
os.rename(video, video2)
|
||||
|
||||
if not apikey: # If only using Transcoder functions, exit here.
|
||||
log.info('No CouchPotato or Radarr or Watcher3 apikey entered. Processing completed.')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Successfully post-processed {input_name}',
|
||||
status_code=0,
|
||||
)
|
||||
|
||||
params = {
|
||||
'media_folder': remote_dir(dir_name) if remote_path else dir_name,
|
||||
}
|
||||
|
||||
return ProcessResult(message=f'{section}: Successfully post-processed {input_name}', status_code=0)
|
||||
params = {'media_folder': remote_dir(dir_name) if remote_path else dir_name}
|
||||
if download_id and release_id:
|
||||
params['downloader'] = downloader or client_agent
|
||||
params['download_id'] = download_id
|
||||
|
||||
if section == 'CouchPotato':
|
||||
if method == 'manage':
|
||||
command = 'manage.update'
|
||||
params.clear()
|
||||
else:
|
||||
command = 'renamer.scan'
|
||||
|
||||
url = f'{base_url}{command}'
|
||||
log.debug(f'Opening URL: {url} with PARAMS: {params}')
|
||||
log.debug(f'Starting {method} scan for {input_name}')
|
||||
|
||||
if section == 'Radarr':
|
||||
payload = {
|
||||
'name': 'DownloadedMoviesScan',
|
||||
'path': params['media_folder'],
|
||||
'downloadClientId': download_id,
|
||||
'importMode': import_mode,
|
||||
}
|
||||
payload = {'name': 'DownloadedMoviesScan', 'path': params['media_folder'], 'downloadClientId': download_id, 'importMode': import_mode}
|
||||
if not download_id:
|
||||
payload.pop('downloadClientId')
|
||||
log.debug(f'Opening URL: {base_url} with PARAMS: {payload}')
|
||||
log.debug(f'Starting DownloadedMoviesScan scan for {input_name}')
|
||||
|
||||
if section == 'Watcher3':
|
||||
if input_name and os.path.isfile(
|
||||
os.path.join(dir_name, input_name),
|
||||
):
|
||||
params['media_folder'] = os.path.join(
|
||||
params['media_folder'], input_name,
|
||||
)
|
||||
payload = {
|
||||
'apikey': apikey,
|
||||
'path': params['media_folder'],
|
||||
'guid': download_id,
|
||||
'mode': 'complete',
|
||||
}
|
||||
if input_name and os.path.isfile(os.path.join(dir_name, input_name)):
|
||||
params['media_folder'] = os.path.join(params['media_folder'], input_name)
|
||||
payload = {'apikey': apikey, 'path': params['media_folder'], 'guid': download_id, 'mode': 'complete'}
|
||||
if not download_id:
|
||||
payload.pop('guid')
|
||||
log.debug(f'Opening URL: {base_url} with PARAMS: {payload}')
|
||||
log.debug(f'Starting postprocessing scan for {input_name}')
|
||||
|
||||
try:
|
||||
if section == 'CouchPotato':
|
||||
response = requests.get(
|
||||
url, params=params, verify=False, timeout=(30, 1800),
|
||||
)
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 1800))
|
||||
elif section == 'Watcher3':
|
||||
response = requests.post(
|
||||
base_url, data=payload, verify=False, timeout=(30, 1800),
|
||||
)
|
||||
response = requests.post(base_url, data=payload, verify=False, timeout=(30, 1800))
|
||||
else:
|
||||
response = requests.post(
|
||||
base_url,
|
||||
data=json.dumps(payload),
|
||||
headers=headers,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
response = requests.post(base_url, data=json.dumps(payload), headers=headers, stream=True, verify=False, timeout=(30, 1800))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Failed to post-process - Unable to connect to {section}',
|
||||
status_code=1,
|
||||
)
|
||||
|
||||
return ProcessResult(message=f'{section}: Failed to post-process - Unable to connect to {section}', status_code=1)
|
||||
result = response.json()
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Failed to post-process - Server returned status {response.status_code}',
|
||||
status_code=1,
|
||||
)
|
||||
return ProcessResult(message=f'{section}: Failed to post-process - Server returned status {response.status_code}', status_code=1)
|
||||
if section == 'CouchPotato' and result['success']:
|
||||
log.debug(f'SUCCESS: Finished {method} scan for folder {dir_name}')
|
||||
if method == 'manage':
|
||||
return ProcessResult(
|
||||
message=f'{section}: Successfully post-processed {input_name}',
|
||||
status_code=0,
|
||||
)
|
||||
return ProcessResult(message=f'{section}: Successfully post-processed {input_name}', status_code=0)
|
||||
elif section == 'Radarr':
|
||||
try:
|
||||
scan_id = int(result['id'])
|
||||
@ -338,167 +232,85 @@ def process(
|
||||
update_movie_status = result['tasks']['update_movie_status']
|
||||
log.debug(f'Watcher3 updated status to {section}')
|
||||
if update_movie_status == 'Finished':
|
||||
return ProcessResult(
|
||||
message=f'{section}: Successfully post-processed {input_name}',
|
||||
status_code=status,
|
||||
)
|
||||
return ProcessResult(
|
||||
message=f'{section}: Failed to post-process - changed status to {update_movie_status}',
|
||||
status_code=1,
|
||||
)
|
||||
return ProcessResult(message=f'{section}: Successfully post-processed {input_name}', status_code=status)
|
||||
return ProcessResult(message=f'{section}: Failed to post-process - changed status to {update_movie_status}', status_code=1)
|
||||
else:
|
||||
log.error(f'FAILED: {method} scan was unable to finish for folder {dir_name}. exiting!')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Failed to post-process - Server did not return success',
|
||||
status_code=1,
|
||||
)
|
||||
return ProcessResult(message=f'{section}: Failed to post-process - Server did not return success', status_code=1)
|
||||
else:
|
||||
nzb2media.FAILED = True
|
||||
log.debug(f'FAILED DOWNLOAD DETECTED FOR {input_name}')
|
||||
if failure_link:
|
||||
report_nzb(failure_link, client_agent)
|
||||
|
||||
if section == 'Radarr':
|
||||
log.debug(f'SUCCESS: Sending failed download to {section} for CDH processing')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Sending failed download back to {section}',
|
||||
status_code=1,
|
||||
# Return as failed to flag this in the downloader.
|
||||
status_code=1, # Return as failed to flag this in the downloader.
|
||||
) # Return failed flag, but log the event as successful.
|
||||
if section == 'Watcher3':
|
||||
log.debug(f'Sending failed download to {section} for CDH processing')
|
||||
path = remote_dir(dir_name) if remote_path else dir_name
|
||||
if input_name and os.path.isfile(
|
||||
os.path.join(dir_name, input_name),
|
||||
):
|
||||
if input_name and os.path.isfile(os.path.join(dir_name, input_name)):
|
||||
path = os.path.join(path, input_name)
|
||||
payload = {
|
||||
'apikey': apikey,
|
||||
'path': path,
|
||||
'guid': download_id,
|
||||
'mode': 'failed',
|
||||
}
|
||||
response = requests.post(
|
||||
base_url, data=payload, verify=False, timeout=(30, 1800),
|
||||
)
|
||||
payload = {'apikey': apikey, 'path': path, 'guid': download_id, 'mode': 'failed'}
|
||||
response = requests.post(base_url, data=payload, verify=False, timeout=(30, 1800))
|
||||
result = response.json()
|
||||
log.debug(f'Watcher3 response: {result}')
|
||||
if result['status'] == 'finished':
|
||||
return ProcessResult(
|
||||
message=f'{section}: Sending failed download back to {section}',
|
||||
status_code=1,
|
||||
# Return as failed to flag this in the downloader.
|
||||
status_code=1, # Return as failed to flag this in the downloader.
|
||||
) # Return failed flag, but log the event as successful.
|
||||
|
||||
if (
|
||||
delete_failed
|
||||
and os.path.isdir(dir_name)
|
||||
and not os.path.dirname(dir_name) == dir_name
|
||||
):
|
||||
if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name:
|
||||
log.debug(f'Deleting failed files and folder {dir_name}')
|
||||
remove_dir(dir_name)
|
||||
|
||||
if not release_id and not media_id:
|
||||
log.error(f'Could not find a downloaded movie in the database matching {input_name}, exiting!')
|
||||
return ProcessResult(
|
||||
message='{0}: Failed to post-process - Failed download not found in {0}'.format(section),
|
||||
status_code=1,
|
||||
)
|
||||
|
||||
return ProcessResult(message='{0}: Failed to post-process - Failed download not found in {0}'.format(section), status_code=1)
|
||||
if release_id:
|
||||
log.debug(f'Setting failed release {input_name} to ignored ...')
|
||||
|
||||
url = f'{base_url}release.ignore'
|
||||
params = {'id': release_id}
|
||||
|
||||
log.debug(f'Opening URL: {url} with PARAMS: {params}')
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
url, params=params, verify=False,
|
||||
timeout=(30, 120),
|
||||
)
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL {url}')
|
||||
return ProcessResult(
|
||||
message='{0}: Failed to post-process - Unable to connect to {0}'.format(section),
|
||||
status_code=1,
|
||||
)
|
||||
|
||||
return ProcessResult(message='{0}: Failed to post-process - Unable to connect to {0}'.format(section), status_code=1)
|
||||
result = response.json()
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult(
|
||||
status_code=1,
|
||||
message=f'{section}: Failed to post-process - Server returned status {response.status_code}',
|
||||
)
|
||||
return ProcessResult(status_code=1, message=f'{section}: Failed to post-process - Server returned status {response.status_code}')
|
||||
if result['success']:
|
||||
log.debug(f'SUCCESS: {input_name} has been set to ignored ...')
|
||||
else:
|
||||
log.warning(f'FAILED: Unable to set {input_name} to ignored!')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Failed to post-process - Unable to set {input_name} to ignored',
|
||||
status_code=1,
|
||||
)
|
||||
|
||||
return ProcessResult(message=f'{section}: Failed to post-process - Unable to set {input_name} to ignored', status_code=1)
|
||||
log.debug('Trying to snatch the next highest ranked release.')
|
||||
|
||||
url = f'{base_url}movie.searcher.try_next'
|
||||
log.debug(f'Opening URL: {url}')
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
url,
|
||||
params={'media_id': media_id},
|
||||
verify=False,
|
||||
timeout=(30, 600),
|
||||
)
|
||||
response = requests.get(url, params={'media_id': media_id}, verify=False, timeout=(30, 600))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL {url}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to connect to '
|
||||
f'{section}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}')
|
||||
result = response.json()
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{response.status_code}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}')
|
||||
if result['success']:
|
||||
log.debug('SUCCESS: Snatched the next highest release ...')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully snatched next highest release',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Successfully snatched next highest release')
|
||||
log.debug('SUCCESS: Unable to find a new release to snatch now. CP will keep searching!')
|
||||
return ProcessResult.success(
|
||||
f'{section}: No new release found now. '
|
||||
f'{section} will keep searching',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: No new release found now. {section} will keep searching')
|
||||
# Added a release that was not in the wanted list so confirm rename
|
||||
# successful by finding this movie media.list.
|
||||
if not release:
|
||||
# we don't want to filter new releases based on this.
|
||||
download_id = ''
|
||||
|
||||
if no_status_check:
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully processed but no change in status '
|
||||
f'confirmed',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: Successfully processed but no change in status confirmed')
|
||||
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
|
||||
timeout = time.time() + 60 * wait_for
|
||||
while time.time() < timeout: # only wait 2 (default) minutes, then return.
|
||||
@ -512,20 +324,13 @@ def process(
|
||||
try:
|
||||
release_id = list(release.keys())[0]
|
||||
release_status_new = release[release_id]['status']
|
||||
if (
|
||||
release_status_old is None
|
||||
): # we didn't have a release before, but now we do.
|
||||
if release_status_old is None: # we didn't have a release before, but now we do.
|
||||
title = release[release_id]['title']
|
||||
log.debug(f'SUCCESS: Movie {title} has now been added to CouchPotato with release status of [{str(release_status_new).upper()}]')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if release_status_new != release_status_old:
|
||||
log.debug(f'SUCCESS: Release {release_id} has now been marked with a status of [{str(release_status_new).upper()}]')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
except Exception:
|
||||
pass
|
||||
elif scan_id:
|
||||
@ -535,52 +340,29 @@ def process(
|
||||
log.debug(f'The Scan command return status: {command_status}')
|
||||
if command_status in ['completed']:
|
||||
log.debug('The Scan command has completed successfully. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if command_status in ['failed']:
|
||||
log.debug('The Scan command has failed. Renaming was not successful.')
|
||||
# return ProcessResult(
|
||||
# message='{0}: Failed to post-process {1}'.format(section, input_name),
|
||||
# status_code=1,
|
||||
# )
|
||||
|
||||
# return ProcessResult(message='{0}: Failed to post-process {1}'.format(section, input_name), status_code=1)
|
||||
if not os.path.isdir(dir_name):
|
||||
log.debug(f'SUCCESS: Input Directory [{dir_name}] has been processed and removed')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
if not list_media_files(
|
||||
dir_name, media=True, audio=False, meta=False, archives=True,
|
||||
):
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=True):
|
||||
log.debug(f'SUCCESS: Input Directory [{dir_name}] has no remaining media files. This has been fully processed.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
# pause and let CouchPotatoServer/Radarr catch its breath
|
||||
time.sleep(10 * wait_for)
|
||||
|
||||
# The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now.
|
||||
if section == 'Radarr' and completed_download_handling(
|
||||
url2, headers, section=section,
|
||||
):
|
||||
if section == 'Radarr' and completed_download_handling(url2, headers, section=section):
|
||||
log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Complete DownLoad Handling is enabled. Passing back '
|
||||
f'to {section}',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Complete DownLoad Handling is enabled. Passing back to {section}')
|
||||
log.warning(f'{input_name} does not appear to have changed status after {wait_for} minutes, Please check your logs.')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - No change in status',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - No change in status')
|
||||
|
||||
|
||||
def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
||||
results = {}
|
||||
params = {}
|
||||
|
||||
# determine cmd and params to send to CouchPotato to get our results
|
||||
section = 'movies'
|
||||
cmd = 'media.list'
|
||||
@ -588,20 +370,16 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
||||
section = 'media'
|
||||
cmd = 'media.get'
|
||||
params['id'] = release_id or imdb_id
|
||||
|
||||
if not (release_id or imdb_id or download_id):
|
||||
log.debug('No information available to filter CP results')
|
||||
return results
|
||||
|
||||
url = f'{base_url}{cmd}'
|
||||
log.debug(f'Opening URL: {url} with PARAMS: {params}')
|
||||
|
||||
try:
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 60))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL {url}')
|
||||
return results
|
||||
|
||||
try:
|
||||
result = response.json()
|
||||
except ValueError:
|
||||
@ -610,7 +388,6 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
||||
for line in response.iter_lines():
|
||||
log.error(line)
|
||||
return results
|
||||
|
||||
if not result['success']:
|
||||
if 'error' in result:
|
||||
log.error(result['error'])
|
||||
@ -618,7 +395,6 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
||||
id_param = params['id']
|
||||
log.error(f'no media found for id {id_param}')
|
||||
return results
|
||||
|
||||
# Gather release info and return it back, no need to narrow results
|
||||
if release_id:
|
||||
try:
|
||||
@ -627,9 +403,7 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
||||
return results
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Gather release info and proceed with trying to narrow results to one release choice
|
||||
|
||||
movies = result[section]
|
||||
if not isinstance(movies, list):
|
||||
movies = [movies]
|
||||
@ -644,18 +418,13 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
||||
if release['status'] not in ['snatched', 'downloaded', 'done']:
|
||||
continue
|
||||
if download_id:
|
||||
if (
|
||||
download_id.lower()
|
||||
!= release['download_info']['id'].lower()
|
||||
):
|
||||
if download_id.lower() != release['download_info']['id'].lower():
|
||||
continue
|
||||
|
||||
key = release['_id']
|
||||
results[key] = release
|
||||
results[key]['title'] = movie['title']
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Narrow results by removing old releases by comparing their last_edit field
|
||||
if len(results) > 1:
|
||||
rem_id = set()
|
||||
@ -668,20 +437,15 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
||||
continue
|
||||
for ea_id in rem_id:
|
||||
results.pop(ea_id)
|
||||
|
||||
# Search downloads on clients for a match to try and narrow our results down to 1
|
||||
if len(results) > 1:
|
||||
rem_id = set()
|
||||
for key, val1 in results.items():
|
||||
try:
|
||||
if not find_download(
|
||||
str(val1['download_info']['downloader']).lower(),
|
||||
val1['download_info']['id'],
|
||||
):
|
||||
if not find_download(str(val1['download_info']['downloader']).lower(), val1['download_info']['id']):
|
||||
rem_id.add(key)
|
||||
except Exception:
|
||||
continue
|
||||
for ea_id in rem_id:
|
||||
results.pop(ea_id)
|
||||
|
||||
return results
|
||||
|
@ -8,6 +8,7 @@ import time
|
||||
import requests
|
||||
|
||||
import nzb2media
|
||||
import nzb2media.utils.common
|
||||
from nzb2media.auto_process.common import ProcessResult
|
||||
from nzb2media.auto_process.common import command_complete
|
||||
from nzb2media.scene_exceptions import process_all_exceptions
|
||||
@ -22,118 +23,64 @@ log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def process(
|
||||
*,
|
||||
section: str,
|
||||
dir_name: str,
|
||||
input_name: str = '',
|
||||
status: int = 0,
|
||||
client_agent: str = 'manual',
|
||||
download_id: str = '',
|
||||
input_category: str = '',
|
||||
failure_link: str = '',
|
||||
) -> ProcessResult:
|
||||
def process(*, section: str, dir_name: str, input_name: str = '', status: int = 0, input_category: str = '', **kwargs) -> ProcessResult:
|
||||
log.debug(f'Unused kwargs: {kwargs}')
|
||||
# Get configuration
|
||||
if nzb2media.CFG is None:
|
||||
raise RuntimeError('Configuration not loaded.')
|
||||
cfg = nzb2media.CFG[section][input_category]
|
||||
|
||||
# Base URL
|
||||
ssl = int(cfg.get('ssl', 0))
|
||||
scheme = 'https' if ssl else 'http'
|
||||
host = cfg['host']
|
||||
port = cfg['port']
|
||||
web_root = cfg.get('web_root', '')
|
||||
|
||||
# Authentication
|
||||
apikey = cfg.get('apikey', '')
|
||||
|
||||
# Params
|
||||
delete_failed = int(cfg.get('delete_failed', 0))
|
||||
remote_path = int(cfg.get('remote_path', 0))
|
||||
wait_for = int(cfg.get('wait_for', 2))
|
||||
|
||||
# Misc
|
||||
if status > 0 and nzb2media.NOEXTRACTFAILED:
|
||||
extract = 0
|
||||
else:
|
||||
extract = int(cfg.get('extract', 0))
|
||||
|
||||
# Begin processing
|
||||
route = f'{web_root}/api/v1' if section == 'Lidarr' else f'{web_root}/api'
|
||||
url = nzb2media.utils.common.create_url(scheme, host, port, route)
|
||||
if not server_responding(url):
|
||||
log.error('Server did not respond. Exiting')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - {section} did not respond.',
|
||||
)
|
||||
|
||||
if not os.path.isdir(dir_name) and os.path.isfile(
|
||||
dir_name,
|
||||
): # If the input directory is a file, assume single file download and split dir/name.
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.')
|
||||
if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name.
|
||||
dir_name = os.path.split(os.path.normpath(dir_name))[0]
|
||||
|
||||
specific_path = os.path.join(dir_name, str(input_name))
|
||||
clean_name = os.path.splitext(specific_path)
|
||||
if clean_name[1] == '.nzb':
|
||||
specific_path = clean_name[0]
|
||||
if os.path.isdir(specific_path):
|
||||
dir_name = specific_path
|
||||
|
||||
process_all_exceptions(input_name, dir_name)
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
|
||||
if (
|
||||
not list_media_files(
|
||||
dir_name, media=False, audio=True, meta=False, archives=False,
|
||||
)
|
||||
and list_media_files(
|
||||
dir_name, media=False, audio=False, meta=False, archives=True,
|
||||
)
|
||||
and extract
|
||||
):
|
||||
if not list_media_files(dir_name, media=False, audio=True, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract:
|
||||
log.debug(f'Checking for archives to extract in directory: {dir_name}')
|
||||
extract_files(dir_name)
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
|
||||
# if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status:
|
||||
# logger.info('Status shown as failed from Downloader, but valid video files found. Setting as successful.', section)
|
||||
# status = 0
|
||||
|
||||
if status == 0 and section == 'HeadPhones':
|
||||
|
||||
params = {
|
||||
'apikey': apikey,
|
||||
'cmd': 'forceProcess',
|
||||
'dir': remote_dir(dir_name) if remote_path else dir_name,
|
||||
}
|
||||
|
||||
res = force_process(
|
||||
params, url, apikey, input_name, dir_name, section, wait_for,
|
||||
)
|
||||
params = {'apikey': apikey, 'cmd': 'forceProcess', 'dir': remote_dir(dir_name) if remote_path else dir_name}
|
||||
res = force_process(params, url, apikey, input_name, dir_name, section, wait_for)
|
||||
if res.status_code in [0, 1]:
|
||||
return res
|
||||
|
||||
params = {
|
||||
'apikey': apikey,
|
||||
'cmd': 'forceProcess',
|
||||
'dir': os.path.split(remote_dir(dir_name))[0]
|
||||
if remote_path
|
||||
else os.path.split(dir_name)[0],
|
||||
}
|
||||
|
||||
res = force_process(
|
||||
params, url, apikey, input_name, dir_name, section, wait_for,
|
||||
)
|
||||
params = {'apikey': apikey, 'cmd': 'forceProcess', 'dir': os.path.split(remote_dir(dir_name))[0] if remote_path else os.path.split(dir_name)[0]}
|
||||
res = force_process(params, url, apikey, input_name, dir_name, section, wait_for)
|
||||
if res.status_code in [0, 1]:
|
||||
return res
|
||||
|
||||
# The status hasn't changed. uTorrent can resume seeding now.
|
||||
log.warning(f'The music album does not appear to have changed status after {wait_for} minutes. Please check your Logs')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - No change in wanted status',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - No change in wanted status')
|
||||
if status == 0 and section == 'Lidarr':
|
||||
route = f'{web_root}/api/v1/command'
|
||||
url = nzb2media.utils.common.create_url(scheme, host, port, route)
|
||||
@ -146,31 +93,17 @@ def process(
|
||||
data = {'name': 'Rename', 'path': dir_name}
|
||||
try:
|
||||
log.debug(f'Opening URL: {url} with data: {data}')
|
||||
response = requests.post(
|
||||
url,
|
||||
data=json.dumps(data),
|
||||
headers=headers,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
response = requests.post(url, data=json.dumps(data), headers=headers, stream=True, verify=False, timeout=(30, 1800))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL: {url}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to connect to '
|
||||
f'{section}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}')
|
||||
try:
|
||||
res = response.json()
|
||||
scan_id = int(res['id'])
|
||||
log.debug(f'Scan started with id: {scan_id}')
|
||||
except Exception as error:
|
||||
log.warning(f'No scan id was returned due to: {error}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to start scan',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to start scan')
|
||||
num = 0
|
||||
params = {}
|
||||
url = f'{url}/{scan_id}'
|
||||
@ -182,141 +115,80 @@ def process(
|
||||
num += 1
|
||||
if command_status:
|
||||
log.debug(f'The Scan command return status: {command_status}')
|
||||
|
||||
if not os.path.exists(dir_name):
|
||||
log.debug(f'The directory {dir_name} has been removed. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if command_status and command_status in ['completed']:
|
||||
log.debug('The Scan command has completed successfully. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if command_status and command_status in ['failed']:
|
||||
log.debug('The Scan command has failed. Renaming was not successful.')
|
||||
# return ProcessResult.failure(
|
||||
# f'{section}: Failed to post-process {input_name}'
|
||||
# )
|
||||
# return ProcessResult.failure(f'{section}: Failed to post-process {input_name}')
|
||||
else:
|
||||
log.debug(f'The Scan command did not return status completed. Passing back to {section} to attempt complete download handling.')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Passing back to {section} to attempt '
|
||||
f'Complete Download Handling',
|
||||
status_code=status,
|
||||
)
|
||||
|
||||
return ProcessResult(message=f'{section}: Passing back to {section} to attempt Complete Download Handling', status_code=status)
|
||||
else:
|
||||
if section == 'Lidarr':
|
||||
log.debug(f'FAILED: The download failed. Sending failed download to {section} for CDH processing')
|
||||
# Return as failed to flag this in the downloader.
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Download Failed. Sending back to {section}',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Download Failed. Sending back to {section}')
|
||||
log.warning('FAILED DOWNLOAD DETECTED')
|
||||
if (
|
||||
delete_failed
|
||||
and os.path.isdir(dir_name)
|
||||
and not os.path.dirname(dir_name) == dir_name
|
||||
):
|
||||
if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name:
|
||||
log.debug(f'Deleting failed files and folder {dir_name}')
|
||||
remove_dir(dir_name)
|
||||
# Return as failed to flag this in the downloader.
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process. {section} does not '
|
||||
f'support failed downloads',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process. {section} does not support failed downloads')
|
||||
return ProcessResult.failure()
|
||||
|
||||
|
||||
def get_status(url, apikey, dir_name):
|
||||
log.debug(f'Attempting to get current status for release:{os.path.basename(dir_name)}')
|
||||
|
||||
params = {
|
||||
'apikey': apikey,
|
||||
'cmd': 'getHistory',
|
||||
}
|
||||
|
||||
params = {'apikey': apikey, 'cmd': 'getHistory'}
|
||||
log.debug(f'Opening URL: {url} with PARAMS: {params}')
|
||||
|
||||
try:
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
except requests.RequestException:
|
||||
log.error('Unable to open URL')
|
||||
return None
|
||||
|
||||
try:
|
||||
result = response.json()
|
||||
except ValueError:
|
||||
# ValueError catches simplejson's JSONDecodeError and json's ValueError
|
||||
return None
|
||||
|
||||
for album in result:
|
||||
if os.path.basename(dir_name) == album['FolderName']:
|
||||
return album['Status'].lower()
|
||||
|
||||
|
||||
def force_process(
|
||||
params, url, apikey, input_name, dir_name, section, wait_for,
|
||||
):
|
||||
def force_process(params, url, apikey, input_name, dir_name, section, wait_for):
|
||||
release_status = get_status(url, apikey, dir_name)
|
||||
if not release_status:
|
||||
log.error(f'Could not find a status for {input_name}, is it in the wanted list ?')
|
||||
|
||||
log.debug(f'Opening URL: {url} with PARAMS: {params}')
|
||||
|
||||
try:
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 300))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL {url}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to connect to '
|
||||
f'{section}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}')
|
||||
log.debug(f'Result: {response.text}')
|
||||
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status {response.status_code}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}')
|
||||
if response.text == 'OK':
|
||||
log.debug(f'SUCCESS: Post-Processing started for {input_name} in folder {dir_name} ...')
|
||||
else:
|
||||
log.error(f'FAILED: Post-Processing has NOT started for {input_name} in folder {dir_name}. exiting!')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.')
|
||||
# we will now wait for this album to be processed before returning to TorrentToMedia and unpausing.
|
||||
timeout = time.time() + 60 * wait_for
|
||||
while time.time() < timeout:
|
||||
current_status = get_status(url, apikey, dir_name)
|
||||
if (
|
||||
current_status is not None and current_status != release_status
|
||||
): # Something has changed. CPS must have processed this movie.
|
||||
if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie.
|
||||
log.debug(f'SUCCESS: This release is now marked as status [{current_status}]')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if not os.path.isdir(dir_name):
|
||||
log.debug(f'SUCCESS: The input directory {dir_name} has been removed Processing must have finished.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
time.sleep(10 * wait_for)
|
||||
# The status hasn't changed.
|
||||
return ProcessResult(
|
||||
message='no change',
|
||||
status_code=2,
|
||||
)
|
||||
return ProcessResult(message='no change', status_code=2)
|
||||
|
@ -12,6 +12,7 @@ from oauthlib.oauth2 import LegacyApplicationClient
|
||||
from requests_oauthlib import OAuth2Session
|
||||
|
||||
import nzb2media
|
||||
import nzb2media.utils.common
|
||||
from nzb2media import transcoder
|
||||
from nzb2media.auto_process.common import ProcessResult
|
||||
from nzb2media.auto_process.common import command_complete
|
||||
@ -34,29 +35,17 @@ log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def process(
|
||||
*,
|
||||
section: str,
|
||||
dir_name: str,
|
||||
input_name: str = '',
|
||||
status: int = 0,
|
||||
client_agent: str = 'manual',
|
||||
download_id: str = '',
|
||||
input_category: str = '',
|
||||
failure_link: str = '',
|
||||
) -> ProcessResult:
|
||||
def process(*, section: str, dir_name: str, input_name: str = '', status: int = 0, client_agent: str = 'manual', download_id: str = '', input_category: str = '', failure_link: str = '') -> ProcessResult:
|
||||
# Get configuration
|
||||
if nzb2media.CFG is None:
|
||||
raise RuntimeError('Configuration not loaded.')
|
||||
cfg = nzb2media.CFG[section][input_category]
|
||||
|
||||
# Base URL
|
||||
ssl = int(cfg.get('ssl', 0))
|
||||
scheme = 'https' if ssl else 'http'
|
||||
host = cfg['host']
|
||||
port = cfg['port']
|
||||
web_root = cfg.get('web_root', '')
|
||||
|
||||
# Authentication
|
||||
apikey = cfg.get('apikey', '')
|
||||
username = cfg.get('username', '')
|
||||
@ -64,12 +53,10 @@ def process(
|
||||
api_version = int(cfg.get('api_version', 2))
|
||||
sso_username = cfg.get('sso_username', '')
|
||||
sso_password = cfg.get('sso_password', '')
|
||||
|
||||
# Params
|
||||
delete_failed = int(cfg.get('delete_failed', 0))
|
||||
remote_path = int(cfg.get('remote_path', 0))
|
||||
wait_for = int(cfg.get('wait_for', 2))
|
||||
|
||||
# Misc
|
||||
if status > 0 and nzb2media.NOEXTRACTFAILED:
|
||||
extract = 0
|
||||
@ -82,13 +69,10 @@ def process(
|
||||
force = int(cfg.get('force', 0))
|
||||
delete_on = int(cfg.get('delete_on', 0))
|
||||
ignore_subs = int(cfg.get('ignore_subs', 0))
|
||||
|
||||
# Begin processing
|
||||
|
||||
# Refactor into an OO structure.
|
||||
# For now let's do botch the OO and the serialized code, until everything has been migrated.
|
||||
init_sickbeard = InitSickBeard(cfg, section, input_category)
|
||||
|
||||
url = nzb2media.utils.common.create_url(scheme, host, port, web_root)
|
||||
if server_responding(url):
|
||||
# auto-detect correct fork
|
||||
@ -100,27 +84,17 @@ def process(
|
||||
fork, fork_params = 'None', {}
|
||||
else:
|
||||
log.error('Server did not respond. Exiting')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - {section} did not respond.',
|
||||
)
|
||||
|
||||
if (
|
||||
client_agent == nzb2media.TORRENT_CLIENT_AGENT
|
||||
and nzb2media.USE_LINK == 'move-sym'
|
||||
):
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.')
|
||||
if client_agent == nzb2media.TORRENT_CLIENT_AGENT and nzb2media.USE_LINK == 'move-sym':
|
||||
process_method = 'symlink'
|
||||
if not os.path.isdir(dir_name) and os.path.isfile(
|
||||
dir_name,
|
||||
): # If the input directory is a file, assume single file download and split dir/name.
|
||||
if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name.
|
||||
dir_name = os.path.split(os.path.normpath(dir_name))[0]
|
||||
|
||||
specific_path = os.path.join(dir_name, str(input_name))
|
||||
clean_name = os.path.splitext(specific_path)
|
||||
if clean_name[1] == '.nzb':
|
||||
specific_path = clean_name[0]
|
||||
if os.path.isdir(specific_path):
|
||||
dir_name = specific_path
|
||||
|
||||
# Attempt to create the directory if it doesn't exist and ignore any
|
||||
# error stating that it already exists. This fixes a bug where SickRage
|
||||
# won't process the directory because it doesn't exist.
|
||||
@ -131,51 +105,27 @@ def process(
|
||||
# Re-raise the error if it wasn't about the directory not existing
|
||||
if error.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
if 'process_method' not in fork_params or (
|
||||
client_agent in ['nzbget', 'sabnzbd']
|
||||
and nzb_extraction_by != 'Destination'
|
||||
):
|
||||
if 'process_method' not in fork_params or (client_agent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != 'Destination'):
|
||||
if input_name:
|
||||
process_all_exceptions(input_name, dir_name)
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
|
||||
# Now check if tv files exist in destination.
|
||||
if not list_media_files(
|
||||
dir_name, media=True, audio=False, meta=False, archives=False,
|
||||
):
|
||||
if (
|
||||
list_media_files(
|
||||
dir_name,
|
||||
media=False,
|
||||
audio=False,
|
||||
meta=False,
|
||||
archives=True,
|
||||
)
|
||||
and extract
|
||||
):
|
||||
if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
|
||||
if list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract:
|
||||
log.debug(f'Checking for archives to extract in directory: {dir_name}')
|
||||
extract_files(dir_name)
|
||||
input_name, dir_name = convert_to_ascii(input_name, dir_name)
|
||||
|
||||
if list_media_files(
|
||||
dir_name, media=True, audio=False, meta=False, archives=False,
|
||||
): # Check that a video exists. if not, assume failed.
|
||||
if list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed.
|
||||
flatten(dir_name)
|
||||
|
||||
# Check video files for corruption
|
||||
good_files = 0
|
||||
valid_files = 0
|
||||
num_files = 0
|
||||
for video in list_media_files(
|
||||
dir_name, media=True, audio=False, meta=False, archives=False,
|
||||
):
|
||||
for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
|
||||
num_files += 1
|
||||
if transcoder.is_video_good(video, status):
|
||||
good_files += 1
|
||||
if not nzb2media.REQUIRE_LAN or transcoder.is_video_good(
|
||||
video, status, require_lan=nzb2media.REQUIRE_LAN,
|
||||
):
|
||||
if not nzb2media.REQUIRE_LAN or transcoder.is_video_good(video, status, require_lan=nzb2media.REQUIRE_LAN):
|
||||
valid_files += 1
|
||||
import_subs(video)
|
||||
rename_subs(dir_name)
|
||||
@ -186,18 +136,13 @@ def process(
|
||||
if valid_files < num_files and status == 0:
|
||||
log.info('Found corrupt videos. Setting status Failed')
|
||||
status = 1
|
||||
if (
|
||||
'NZBOP_VERSION' in os.environ
|
||||
and os.environ['NZBOP_VERSION'][0:5] >= '14.0'
|
||||
):
|
||||
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
|
||||
print('[NZB] MARK=BAD')
|
||||
if good_files == num_files:
|
||||
log.debug(f'Video marked as failed due to missing required language: {nzb2media.REQUIRE_LAN}')
|
||||
else:
|
||||
log.debug('Video marked as failed due to missing playable audio or video')
|
||||
if (
|
||||
good_files < num_files and failure_link
|
||||
): # only report corrupt files
|
||||
if good_files < num_files and failure_link: # only report corrupt files
|
||||
failure_link += '&corrupt=true'
|
||||
elif client_agent == 'manual':
|
||||
log.warning(f'No media files found in directory {dir_name} to manually process.')
|
||||
@ -213,36 +158,23 @@ def process(
|
||||
else:
|
||||
log.warning(f'No media files found in directory {dir_name}. Processing this as a failed download')
|
||||
status = 1
|
||||
if (
|
||||
'NZBOP_VERSION' in os.environ
|
||||
and os.environ['NZBOP_VERSION'][0:5] >= '14.0'
|
||||
):
|
||||
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
|
||||
print('[NZB] MARK=BAD')
|
||||
|
||||
if (
|
||||
status == 0 and nzb2media.TRANSCODE == 1
|
||||
): # only transcode successful downloads
|
||||
if status == 0 and nzb2media.TRANSCODE == 1: # only transcode successful downloads
|
||||
result, new_dir_name = transcoder.transcode_directory(dir_name)
|
||||
if result == 0:
|
||||
log.debug(f'SUCCESS: Transcoding succeeded for files in {dir_name}')
|
||||
dir_name = new_dir_name
|
||||
|
||||
log.debug(f'Config setting \'chmodDirectory\' currently set to {oct(chmod_directory)}')
|
||||
if chmod_directory:
|
||||
log.info(f'Attempting to set the octal permission of \'{oct(chmod_directory)}\' on directory \'{dir_name}\'')
|
||||
rchmod(dir_name, chmod_directory)
|
||||
else:
|
||||
log.error(f'FAILED: Transcoding failed for files in {dir_name}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Transcoding failed',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Transcoding failed')
|
||||
# Part of the refactor
|
||||
if init_sickbeard.fork_obj:
|
||||
init_sickbeard.fork_obj.initialize(
|
||||
dir_name, input_name, status, client_agent='manual',
|
||||
)
|
||||
|
||||
init_sickbeard.fork_obj.initialize(dir_name, input_name, status, client_agent='manual')
|
||||
# configure SB params to pass
|
||||
# We don't want to remove params, for the Forks that have been refactored.
|
||||
# As we don't want to duplicate this part of the code.
|
||||
@ -251,7 +183,6 @@ def process(
|
||||
fork_params['proc_type'] = 'manual'
|
||||
if input_name is not None:
|
||||
fork_params['nzbName'] = input_name
|
||||
|
||||
for param in copy.copy(fork_params):
|
||||
if param == 'failed':
|
||||
if status > 1:
|
||||
@ -261,67 +192,47 @@ def process(
|
||||
del fork_params['proc_type']
|
||||
if 'type' in fork_params:
|
||||
del fork_params['type']
|
||||
|
||||
if param == 'return_data':
|
||||
fork_params[param] = 0
|
||||
if 'quiet' in fork_params:
|
||||
del fork_params['quiet']
|
||||
|
||||
if param == 'type':
|
||||
if (
|
||||
'type' in fork_params
|
||||
): # only set if we haven't already deleted for 'failed' above.
|
||||
if 'type' in fork_params: # only set if we haven't already deleted for 'failed' above.
|
||||
fork_params[param] = 'manual'
|
||||
if 'proc_type' in fork_params:
|
||||
del fork_params['proc_type']
|
||||
|
||||
if param in [
|
||||
'dir_name',
|
||||
'dir',
|
||||
'proc_dir',
|
||||
'process_directory',
|
||||
'path',
|
||||
]:
|
||||
if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']:
|
||||
fork_params[param] = dir_name
|
||||
if remote_path:
|
||||
fork_params[param] = remote_dir(dir_name)
|
||||
|
||||
if param == 'process_method':
|
||||
if process_method:
|
||||
fork_params[param] = process_method
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param in ['force', 'force_replace']:
|
||||
if force:
|
||||
fork_params[param] = force
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param in ['delete_on', 'delete']:
|
||||
if delete_on:
|
||||
fork_params[param] = delete_on
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param == 'ignore_subs':
|
||||
if ignore_subs:
|
||||
fork_params[param] = ignore_subs
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param == 'force_next':
|
||||
fork_params[param] = 1
|
||||
|
||||
# delete any unused params so we don't pass them to SB by mistake
|
||||
[fork_params.pop(k) for k, v in list(fork_params.items()) if v is None]
|
||||
|
||||
if status == 0:
|
||||
if section == 'NzbDrone' and not apikey:
|
||||
log.info('No Sonarr apikey entered. Processing completed.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
log.debug('SUCCESS: The download succeeded, sending a post-process request')
|
||||
else:
|
||||
nzb2media.FAILED = True
|
||||
@ -332,23 +243,14 @@ def process(
|
||||
elif section == 'NzbDrone':
|
||||
log.debug(f'FAILED: The download failed. Sending failed download to {fork} for CDH processing')
|
||||
# Return as failed to flag this in the downloader.
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Download Failed. Sending back to {section}',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Download Failed. Sending back to {section}')
|
||||
else:
|
||||
log.debug(f'FAILED: The download failed. {fork} branch does not handle failed downloads. Nothing to process')
|
||||
if (
|
||||
delete_failed
|
||||
and os.path.isdir(dir_name)
|
||||
and not os.path.dirname(dir_name) == dir_name
|
||||
):
|
||||
if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name:
|
||||
log.debug(f'Deleting failed files and folder {dir_name}')
|
||||
remove_dir(dir_name)
|
||||
# Return as failed to flag this in the downloader.
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process. {section} does not support failed downloads',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process. {section} does not support failed downloads')
|
||||
route = ''
|
||||
if section == 'SickBeard':
|
||||
if apikey:
|
||||
@ -372,20 +274,10 @@ def process(
|
||||
# params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'}
|
||||
if remote_path:
|
||||
log.debug(f'remote_path: {remote_dir(dir_name)}')
|
||||
data = {
|
||||
'name': 'DownloadedEpisodesScan',
|
||||
'path': remote_dir(dir_name),
|
||||
'downloadClientId': download_id,
|
||||
'importMode': import_mode,
|
||||
}
|
||||
data = {'name': 'DownloadedEpisodesScan', 'path': remote_dir(dir_name), 'downloadClientId': download_id, 'importMode': import_mode}
|
||||
else:
|
||||
log.debug(f'path: {dir_name}')
|
||||
data = {
|
||||
'name': 'DownloadedEpisodesScan',
|
||||
'path': dir_name,
|
||||
'downloadClientId': download_id,
|
||||
'importMode': import_mode,
|
||||
}
|
||||
data = {'name': 'DownloadedEpisodesScan', 'path': dir_name, 'downloadClientId': download_id, 'importMode': import_mode}
|
||||
if not download_id:
|
||||
data.pop('downloadClientId')
|
||||
url = nzb2media.utils.common.create_url(scheme, host, port, route)
|
||||
@ -394,7 +286,6 @@ def process(
|
||||
if init_sickbeard.fork_obj:
|
||||
return init_sickbeard.fork_obj.api_call()
|
||||
session = requests.Session()
|
||||
|
||||
log.debug(f'Opening URL: {url} with params: {fork_params}')
|
||||
if not apikey and username and password:
|
||||
login = f'{web_root}/login'
|
||||
@ -402,92 +293,27 @@ def process(
|
||||
response = session.get(login, verify=False, timeout=(30, 60))
|
||||
if response.status_code in [401, 403] and response.cookies.get('_xsrf'):
|
||||
login_params['_xsrf'] = response.cookies.get('_xsrf')
|
||||
session.post(
|
||||
login,
|
||||
data=login_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 60),
|
||||
)
|
||||
response = session.get(
|
||||
url,
|
||||
auth=(username, password),
|
||||
params=fork_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
session.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60))
|
||||
response = session.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800))
|
||||
elif section == 'SiCKRAGE':
|
||||
session = requests.Session()
|
||||
|
||||
if api_version >= 2 and sso_username and sso_password:
|
||||
oauth = OAuth2Session(
|
||||
client=LegacyApplicationClient(
|
||||
client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID,
|
||||
),
|
||||
)
|
||||
oauth_token = oauth.fetch_token(
|
||||
client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID,
|
||||
token_url=nzb2media.SICKRAGE_OAUTH_TOKEN_URL,
|
||||
username=sso_username,
|
||||
password=sso_password,
|
||||
)
|
||||
session.headers.update(
|
||||
{'Authorization': 'Bearer ' + oauth_token['access_token']},
|
||||
)
|
||||
|
||||
params = {
|
||||
'path': fork_params['path'],
|
||||
'failed': str(bool(fork_params['failed'])).lower(),
|
||||
'processMethod': 'move',
|
||||
'forceReplace': str(
|
||||
bool(fork_params['force_replace']),
|
||||
).lower(),
|
||||
'returnData': str(
|
||||
bool(fork_params['return_data']),
|
||||
).lower(),
|
||||
'delete': str(bool(fork_params['delete'])).lower(),
|
||||
'forceNext': str(bool(fork_params['force_next'])).lower(),
|
||||
'nzbName': fork_params['nzbName'],
|
||||
}
|
||||
oauth = OAuth2Session(client=LegacyApplicationClient(client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID))
|
||||
oauth_token = oauth.fetch_token(client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID, token_url=nzb2media.SICKRAGE_OAUTH_TOKEN_URL, username=sso_username, password=sso_password)
|
||||
session.headers.update({'Authorization': 'Bearer ' + oauth_token['access_token']})
|
||||
params = {'path': fork_params['path'], 'failed': str(bool(fork_params['failed'])).lower(), 'processMethod': 'move', 'forceReplace': str(bool(fork_params['force_replace'])).lower(), 'returnData': str(bool(fork_params['return_data'])).lower(), 'delete': str(bool(fork_params['delete'])).lower(), 'forceNext': str(bool(fork_params['force_next'])).lower(), 'nzbName': fork_params['nzbName']}
|
||||
else:
|
||||
params = fork_params
|
||||
|
||||
response = session.get(
|
||||
url,
|
||||
params=params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
response = session.get(url, params=params, stream=True, verify=False, timeout=(30, 1800))
|
||||
elif section == 'NzbDrone':
|
||||
log.debug(f'Opening URL: {url} with data: {data}')
|
||||
response = requests.post(
|
||||
url,
|
||||
data=json.dumps(data),
|
||||
headers=headers,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
response = requests.post(url, data=json.dumps(data), headers=headers, stream=True, verify=False, timeout=(30, 1800))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL: {url}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Unable to connect to '
|
||||
f'{section}',
|
||||
)
|
||||
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}')
|
||||
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{response.status_code}',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}')
|
||||
success = False
|
||||
queued = False
|
||||
started = False
|
||||
@ -504,12 +330,8 @@ def process(
|
||||
input_name = os.path.split(line)[1]
|
||||
if 'added to the queue' in line:
|
||||
queued = True
|
||||
if (
|
||||
'Processing succeeded' in line
|
||||
or 'Successfully processed' in line
|
||||
):
|
||||
if 'Processing succeeded' in line or 'Successfully processed' in line:
|
||||
success = True
|
||||
|
||||
if queued:
|
||||
time.sleep(60)
|
||||
elif section == 'SiCKRAGE':
|
||||
@ -528,20 +350,11 @@ def process(
|
||||
log.warning(f'No scan id was returned due to: {error}')
|
||||
scan_id = None
|
||||
started = False
|
||||
|
||||
if (
|
||||
status != 0
|
||||
and delete_failed
|
||||
and not os.path.dirname(dir_name) == dir_name
|
||||
):
|
||||
if status != 0 and delete_failed and not os.path.dirname(dir_name) == dir_name:
|
||||
log.debug(f'Deleting failed files and folder {dir_name}')
|
||||
remove_dir(dir_name)
|
||||
|
||||
if success:
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if section == 'NzbDrone' and started:
|
||||
num = 0
|
||||
params = {}
|
||||
@ -554,39 +367,20 @@ def process(
|
||||
num += 1
|
||||
if command_status:
|
||||
log.debug(f'The Scan command return status: {command_status}')
|
||||
|
||||
if not os.path.exists(dir_name):
|
||||
log.debug(f'The directory {dir_name} has been removed. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if command_status and command_status in ['completed']:
|
||||
log.debug('The Scan command has completed successfully. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
return ProcessResult.success(f'{section}: Successfully post-processed {input_name}')
|
||||
if command_status and command_status in ['failed']:
|
||||
log.debug('The Scan command has failed. Renaming was not successful.')
|
||||
# return ProcessResult.failure(
|
||||
# f'{section}: Failed to post-process {input_name}'
|
||||
# )
|
||||
|
||||
# return ProcessResult.failure(f'{section}: Failed to post-process {input_name}')
|
||||
url2 = nzb2media.utils.common.create_url(scheme, host, port, route2)
|
||||
if completed_download_handling(url2, headers, section=section):
|
||||
log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.')
|
||||
return ProcessResult(
|
||||
message=f'{section}: Complete DownLoad Handling is enabled. '
|
||||
f'Passing back to {section}',
|
||||
status_code=status,
|
||||
)
|
||||
return ProcessResult(message=f'{section}: Complete DownLoad Handling is enabled. Passing back to {section}', status_code=status)
|
||||
log.warning('The Scan command did not return a valid status. Renaming was not successful.')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process {input_name}',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process {input_name}')
|
||||
# We did not receive Success confirmation.
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.')
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -8,7 +8,6 @@ from nzb2media.utils.files import backup_versioned_file
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
MIN_DB_VERSION = 1 # oldest db version we support migrating from
|
||||
MAX_DB_VERSION = 2
|
||||
|
||||
@ -26,8 +25,6 @@ def backup_database(version):
|
||||
# = Main DB Migrations =
|
||||
# ======================
|
||||
# Add new migrations at the bottom of the list; subclass the previous migration.
|
||||
|
||||
|
||||
class InitialSchema(main_db.SchemaUpgrade):
|
||||
def test(self):
|
||||
no_update = False
|
||||
@ -37,34 +34,19 @@ class InitialSchema(main_db.SchemaUpgrade):
|
||||
return no_update
|
||||
|
||||
def execute(self):
|
||||
if not self.has_table('downloads') and not self.has_table(
|
||||
'db_version',
|
||||
):
|
||||
queries = [
|
||||
'CREATE TABLE db_version (db_version INTEGER);',
|
||||
'CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));',
|
||||
'INSERT INTO db_version (db_version) VALUES (2);',
|
||||
]
|
||||
if not self.has_table('downloads') and not self.has_table('db_version'):
|
||||
queries = ['CREATE TABLE db_version (db_version INTEGER);', 'CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));', 'INSERT INTO db_version (db_version) VALUES (2);']
|
||||
for query in queries:
|
||||
self.connection.action(query)
|
||||
|
||||
else:
|
||||
cur_db_version = self.check_db_version()
|
||||
|
||||
if cur_db_version < MIN_DB_VERSION:
|
||||
log.critical(f'Your database version ({cur_db_version}) is too old to migrate from what this version of nzbToMedia supports ({MIN_DB_VERSION}).\nPlease remove nzbtomedia.db file to begin fresh.')
|
||||
sys.exit(1)
|
||||
|
||||
if cur_db_version > MAX_DB_VERSION:
|
||||
log.critical(f'Your database version ({cur_db_version}) has been incremented past what this version of nzbToMedia supports ({MAX_DB_VERSION}).\nIf you have used other forks of nzbToMedia, your database may be unusable due to their modifications.')
|
||||
sys.exit(1)
|
||||
if cur_db_version < MAX_DB_VERSION: # We need to upgrade.
|
||||
queries = [
|
||||
'CREATE TABLE downloads2 (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));',
|
||||
'INSERT INTO downloads2 SELECT * FROM downloads;',
|
||||
'DROP TABLE IF EXISTS downloads;',
|
||||
'ALTER TABLE downloads2 RENAME TO downloads;',
|
||||
'INSERT INTO db_version (db_version) VALUES (2);',
|
||||
]
|
||||
queries = ['CREATE TABLE downloads2 (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));', 'INSERT INTO downloads2 SELECT * FROM downloads;', 'DROP TABLE IF EXISTS downloads;', 'ALTER TABLE downloads2 RENAME TO downloads;', 'INSERT INTO db_version (db_version) VALUES (2);']
|
||||
for query in queries:
|
||||
self.connection.action(query)
|
||||
|
@ -6,9 +6,9 @@ import platform
|
||||
import shutil
|
||||
import stat
|
||||
import subprocess
|
||||
from subprocess import call
|
||||
from subprocess import Popen
|
||||
from subprocess import DEVNULL
|
||||
from subprocess import Popen
|
||||
from subprocess import call
|
||||
from time import sleep
|
||||
|
||||
import nzb2media
|
||||
@ -24,68 +24,18 @@ def extract(file_path, output_destination):
|
||||
if not os.path.exists(nzb2media.SEVENZIP):
|
||||
log.error('EXTRACTOR: Could not find 7-zip, Exiting')
|
||||
return False
|
||||
wscriptlocation = os.path.join(
|
||||
os.environ['WINDIR'], 'system32', 'wscript.exe',
|
||||
)
|
||||
invislocation = os.path.join(
|
||||
nzb2media.APP_ROOT, 'nzb2media', 'extractor', 'bin', 'invisible.vbs',
|
||||
)
|
||||
cmd_7zip = [
|
||||
wscriptlocation,
|
||||
invislocation,
|
||||
str(nzb2media.SHOWEXTRACT),
|
||||
nzb2media.SEVENZIP,
|
||||
'x',
|
||||
'-y',
|
||||
]
|
||||
ext_7zip = [
|
||||
'.rar',
|
||||
'.zip',
|
||||
'.tar.gz',
|
||||
'tgz',
|
||||
'.tar.bz2',
|
||||
'.tbz',
|
||||
'.tar.lzma',
|
||||
'.tlz',
|
||||
'.7z',
|
||||
'.xz',
|
||||
'.gz',
|
||||
]
|
||||
wscriptlocation = os.path.join(os.environ['WINDIR'], 'system32', 'wscript.exe')
|
||||
invislocation = os.path.join(nzb2media.APP_ROOT, 'nzb2media', 'extractor', 'bin', 'invisible.vbs')
|
||||
cmd_7zip = [wscriptlocation, invislocation, str(nzb2media.SHOWEXTRACT), nzb2media.SEVENZIP, 'x', '-y']
|
||||
ext_7zip = ['.rar', '.zip', '.tar.gz', 'tgz', '.tar.bz2', '.tbz', '.tar.lzma', '.tlz', '.7z', '.xz', '.gz']
|
||||
extract_commands = dict.fromkeys(ext_7zip, cmd_7zip)
|
||||
# Using unix
|
||||
else:
|
||||
required_cmds = [
|
||||
'unrar',
|
||||
'unzip',
|
||||
'tar',
|
||||
'unxz',
|
||||
'unlzma',
|
||||
'7zr',
|
||||
'bunzip2',
|
||||
'gunzip',
|
||||
]
|
||||
required_cmds = ['unrar', 'unzip', 'tar', 'unxz', 'unlzma', '7zr', 'bunzip2', 'gunzip']
|
||||
# ## Possible future suport:
|
||||
# gunzip: gz (cmd will delete original archive)
|
||||
# ## the following do not extract to dest dir
|
||||
# '.xz': ['xz', '-d --keep'],
|
||||
# '.lzma': ['xz', '-d --format=lzma --keep'],
|
||||
# '.bz2': ['bzip2', '-d --keep'],
|
||||
|
||||
extract_commands = {
|
||||
'.rar': ['unrar', 'x', '-o+', '-y'],
|
||||
'.tar': ['tar', '-xf'],
|
||||
'.zip': ['unzip'],
|
||||
'.tar.gz': ['tar', '-xzf'],
|
||||
'.tgz': ['tar', '-xzf'],
|
||||
'.tar.bz2': ['tar', '-xjf'],
|
||||
'.tbz': ['tar', '-xjf'],
|
||||
'.tar.lzma': ['tar', '--lzma', '-xf'],
|
||||
'.tlz': ['tar', '--lzma', '-xf'],
|
||||
'.tar.xz': ['tar', '--xz', '-xf'],
|
||||
'.txz': ['tar', '--xz', '-xf'],
|
||||
'.7z': ['7zr', 'x'],
|
||||
'.gz': ['gunzip'],
|
||||
}
|
||||
# '.xz': ['xz', '-d --keep'], # '.lzma': ['xz', '-d --format=lzma --keep'], # '.bz2': ['bzip2', '-d --keep'], extract_commands = { '.rar': ['unrar', 'x', '-o+', '-y'], '.tar': ['tar', '-xf'], '.zip': ['unzip'], '.tar.gz': ['tar', '-xzf'], '.tgz': ['tar', '-xzf'], '.tar.bz2': ['tar', '-xjf'], '.tbz': ['tar', '-xjf'], '.tar.lzma': ['tar', '--lzma', '-xf'], '.tlz': ['tar', '--lzma', '-xf'], '.tar.xz': ['tar', '--xz', '-xf'], '.txz': ['tar', '--xz', '-xf'], '.7z': ['7zr', 'x'], '.gz': ['gunzip'], }
|
||||
# Test command exists and if not, remove
|
||||
if not os.getenv('TR_TORRENT_DIR'):
|
||||
for cmd in required_cmds:
|
||||
@ -107,10 +57,8 @@ def extract(file_path, output_destination):
|
||||
del extract_commands[key]
|
||||
else:
|
||||
log.warning('EXTRACTOR: Cannot determine which tool to use when called from Transmission')
|
||||
|
||||
if not extract_commands:
|
||||
log.warning('EXTRACTOR: No archive extracting programs found, plugin will be disabled')
|
||||
|
||||
ext = os.path.splitext(file_path)
|
||||
cmd = []
|
||||
if ext[1] in ('.gz', '.bz2', '.lzma'):
|
||||
@ -130,24 +78,15 @@ def extract(file_path, output_destination):
|
||||
else:
|
||||
log.debug(f'EXTRACTOR: Unknown file type: {ext[1]}')
|
||||
return False
|
||||
|
||||
# Create outputDestination folder
|
||||
nzb2media.make_dir(output_destination)
|
||||
|
||||
if nzb2media.PASSWORDS_FILE and os.path.isfile(
|
||||
os.path.normpath(nzb2media.PASSWORDS_FILE),
|
||||
):
|
||||
if nzb2media.PASSWORDS_FILE and os.path.isfile(os.path.normpath(nzb2media.PASSWORDS_FILE)):
|
||||
with open(os.path.normpath(nzb2media.PASSWORDS_FILE)) as fin:
|
||||
passwords = [
|
||||
line.strip()
|
||||
for line in fin
|
||||
]
|
||||
passwords = [line.strip() for line in fin]
|
||||
else:
|
||||
passwords = []
|
||||
|
||||
log.info(f'Extracting {file_path} to {output_destination}')
|
||||
log.debug(f'Extracting {cmd} {file_path} {output_destination}')
|
||||
|
||||
orig_files = []
|
||||
orig_dirs = []
|
||||
for directory, subdirs, files in os.walk(output_destination):
|
||||
@ -155,12 +94,9 @@ def extract(file_path, output_destination):
|
||||
orig_dirs.append(os.path.join(directory, subdir))
|
||||
for file in files:
|
||||
orig_files.append(os.path.join(directory, file))
|
||||
|
||||
pwd = os.getcwd() # Get our Present Working Directory
|
||||
os.chdir(
|
||||
output_destination,
|
||||
) # Not all unpack commands accept full paths, so just extract into this directory
|
||||
|
||||
# Not all unpack commands accept full paths, so just extract into this directory
|
||||
os.chdir(output_destination)
|
||||
try: # now works same for nt and *nix
|
||||
info = None
|
||||
cmd.append(file_path) # add filePath to final cmd arg.
|
||||
@ -180,9 +116,7 @@ def extract(file_path, output_destination):
|
||||
elif len(passwords) > 0 and 'gunzip' not in cmd:
|
||||
log.info('EXTRACTOR: Attempting to extract with passwords')
|
||||
for password in passwords:
|
||||
if (
|
||||
password == ''
|
||||
): # if edited in windows or otherwise if blank lines.
|
||||
if password == '': # if edited in windows or otherwise if blank lines.
|
||||
continue
|
||||
cmd2 = cmd
|
||||
# append password here.
|
||||
@ -200,8 +134,6 @@ def extract(file_path, output_destination):
|
||||
log.error(f'EXTRACTOR: Extraction failed for {file_path}. Could not call command {cmd}')
|
||||
os.chdir(pwd)
|
||||
return False
|
||||
|
||||
devnull.close()
|
||||
os.chdir(pwd) # Go back to our Original Working Directory
|
||||
if success:
|
||||
# sleep to let files finish writing to disk
|
||||
@ -217,9 +149,7 @@ def extract(file_path, output_destination):
|
||||
for file in files:
|
||||
if not os.path.join(directory, file) in orig_files:
|
||||
try:
|
||||
shutil.copymode(
|
||||
file_path, os.path.join(directory, file),
|
||||
)
|
||||
shutil.copymode(file_path, os.path.join(directory, file))
|
||||
except Exception:
|
||||
pass
|
||||
return True
|
||||
|
@ -2,63 +2,40 @@
|
||||
~~~~~
|
||||
License for use and distribution
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
7-Zip Copyright (C) 1999-2018 Igor Pavlov.
|
||||
|
||||
The licenses for files are:
|
||||
|
||||
1) 7z.dll:
|
||||
- The "GNU LGPL" as main license for most of the code
|
||||
- The "GNU LGPL" with "unRAR license restriction" for some code
|
||||
- The "BSD 3-clause License" for some code
|
||||
2) All other files: the "GNU LGPL".
|
||||
|
||||
Redistributions in binary form must reproduce related license information from this file.
|
||||
|
||||
Note:
|
||||
You can use 7-Zip on any computer, including a computer in a commercial
|
||||
organization. You don't need to register or pay for 7-Zip.
|
||||
|
||||
|
||||
GNU LGPL information
|
||||
--------------------
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You can receive a copy of the GNU Lesser General Public License from
|
||||
http://www.gnu.org/
|
||||
|
||||
|
||||
|
||||
|
||||
BSD 3-clause License
|
||||
--------------------
|
||||
|
||||
The "BSD 3-clause License" is used for the code in 7z.dll that implements LZFSE data decompression.
|
||||
That code was derived from the code in the "LZFSE compression library" developed by Apple Inc,
|
||||
that also uses the "BSD 3-clause License":
|
||||
|
||||
That code was derived from the code in the "LZFSE compression library" developed by Apple Inc, that also uses the "BSD 3-clause License":
|
||||
----
|
||||
Copyright (c) 2015-2016, Apple Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
@ -66,25 +43,15 @@
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
----
|
||||
|
||||
|
||||
|
||||
|
||||
unRAR license restriction
|
||||
-------------------------
|
||||
|
||||
The decompression engine for RAR archives was developed using source
|
||||
code of unRAR program.
|
||||
All copyrights to original unRAR code are owned by Alexander Roshal.
|
||||
|
||||
The license for original unRAR code has the following restriction:
|
||||
|
||||
The unRAR sources cannot be used to re-create the RAR compression algorithm,
|
||||
which is proprietary. Distribution of modified unRAR sources in separate form
|
||||
The unRAR sources cannot be used to re-create the RAR compression algorithm, which is proprietary. Distribution of modified unRAR sources in separate form
|
||||
or as a part of other software is permitted, provided that it is clearly
|
||||
stated in the documentation and source comments that the code may
|
||||
not be used to develop a RAR (WinRAR) compatible archiver.
|
||||
|
||||
|
||||
--
|
||||
Igor Pavlov
|
||||
|
@ -1,11 +1,9 @@
|
||||
set args = WScript.Arguments
|
||||
num = args.Count
|
||||
|
||||
if num < 2 then
|
||||
WScript.Echo "Usage: [CScript | WScript] invis.vbs aScript.bat <visible or invisible 1/0> <some script arguments>"
|
||||
WScript.Quit 1
|
||||
end if
|
||||
|
||||
sargs = ""
|
||||
if num > 2 then
|
||||
sargs = " "
|
||||
@ -14,8 +12,6 @@ if num > 2 then
|
||||
sargs = sargs & anArg & " "
|
||||
next
|
||||
end if
|
||||
|
||||
Set WshShell = WScript.CreateObject("WScript.Shell")
|
||||
|
||||
returnValue = WshShell.Run("""" & args(1) & """" & sargs, args(0), True)
|
||||
WScript.Quit(returnValue)
|
||||
|
@ -2,63 +2,40 @@
|
||||
~~~~~
|
||||
License for use and distribution
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
7-Zip Copyright (C) 1999-2018 Igor Pavlov.
|
||||
|
||||
The licenses for files are:
|
||||
|
||||
1) 7z.dll:
|
||||
- The "GNU LGPL" as main license for most of the code
|
||||
- The "GNU LGPL" with "unRAR license restriction" for some code
|
||||
- The "BSD 3-clause License" for some code
|
||||
2) All other files: the "GNU LGPL".
|
||||
|
||||
Redistributions in binary form must reproduce related license information from this file.
|
||||
|
||||
Note:
|
||||
You can use 7-Zip on any computer, including a computer in a commercial
|
||||
organization. You don't need to register or pay for 7-Zip.
|
||||
|
||||
|
||||
GNU LGPL information
|
||||
--------------------
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You can receive a copy of the GNU Lesser General Public License from
|
||||
http://www.gnu.org/
|
||||
|
||||
|
||||
|
||||
|
||||
BSD 3-clause License
|
||||
--------------------
|
||||
|
||||
The "BSD 3-clause License" is used for the code in 7z.dll that implements LZFSE data decompression.
|
||||
That code was derived from the code in the "LZFSE compression library" developed by Apple Inc,
|
||||
that also uses the "BSD 3-clause License":
|
||||
|
||||
That code was derived from the code in the "LZFSE compression library" developed by Apple Inc, that also uses the "BSD 3-clause License":
|
||||
----
|
||||
Copyright (c) 2015-2016, Apple Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
@ -66,25 +43,15 @@
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
----
|
||||
|
||||
|
||||
|
||||
|
||||
unRAR license restriction
|
||||
-------------------------
|
||||
|
||||
The decompression engine for RAR archives was developed using source
|
||||
code of unRAR program.
|
||||
All copyrights to original unRAR code are owned by Alexander Roshal.
|
||||
|
||||
The license for original unRAR code has the following restriction:
|
||||
|
||||
The unRAR sources cannot be used to re-create the RAR compression algorithm,
|
||||
which is proprietary. Distribution of modified unRAR sources in separate form
|
||||
The unRAR sources cannot be used to re-create the RAR compression algorithm, which is proprietary. Distribution of modified unRAR sources in separate form
|
||||
or as a part of other software is permitted, provided that it is clearly
|
||||
stated in the documentation and source comments that the code may
|
||||
not be used to develop a RAR (WinRAR) compatible archiver.
|
||||
|
||||
|
||||
--
|
||||
Igor Pavlov
|
||||
|
@ -7,7 +7,6 @@ class GitHub:
|
||||
"""Simple api wrapper for the Github API v3."""
|
||||
|
||||
def __init__(self, github_repo_user, github_repo, branch='master'):
|
||||
|
||||
self.github_repo_user = github_repo_user
|
||||
self.github_repo = github_repo
|
||||
self.branch = branch
|
||||
@ -21,37 +20,21 @@ class GitHub:
|
||||
def commits(self):
|
||||
"""
|
||||
Get the 100 most recent commits from the specified user/repo/branch, starting from HEAD.
|
||||
|
||||
user: The github username of the person whose repo you're querying
|
||||
repo: The repo name to query
|
||||
branch: Optional, the branch name to show commits from
|
||||
|
||||
Returns a deserialized json object containing the commit info. See http://developer.github.com/v3/repos/commits/
|
||||
"""
|
||||
return self._access_api(
|
||||
['repos', self.github_repo_user, self.github_repo, 'commits'],
|
||||
params={'per_page': 100, 'sha': self.branch},
|
||||
)
|
||||
return self._access_api(['repos', self.github_repo_user, self.github_repo, 'commits'], params={'per_page': 100, 'sha': self.branch})
|
||||
|
||||
def compare(self, base, head, per_page=1):
|
||||
"""
|
||||
Get compares between base and head.
|
||||
|
||||
user: The github username of the person whose repo you're querying
|
||||
repo: The repo name to query
|
||||
base: Start compare from branch
|
||||
head: Current commit sha or branch name to compare
|
||||
per_page: number of items per page
|
||||
|
||||
Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/
|
||||
"""
|
||||
return self._access_api(
|
||||
[
|
||||
'repos',
|
||||
self.github_repo_user,
|
||||
self.github_repo,
|
||||
'compare',
|
||||
f'{base}...{head}',
|
||||
],
|
||||
params={'per_page': per_page},
|
||||
)
|
||||
return self._access_api(['repos', self.github_repo_user, self.github_repo, 'compare', f'{base}...{head}'], params={'per_page': per_page})
|
||||
|
@ -14,9 +14,7 @@ log.addHandler(logging.NullHandler())
|
||||
def db_filename(filename='nzbtomedia.db', suffix=None):
|
||||
"""
|
||||
Return the correct location of the database file.
|
||||
|
||||
@param filename: The sqlite database filename to use. If not specified,
|
||||
will be made to be nzbtomedia.db
|
||||
@param filename: The sqlite database filename to use. If not specified, will be made to be nzbtomedia.db
|
||||
@param suffix: The suffix to append to the filename. A '.' will be added
|
||||
automatically, i.e. suffix='v0' will make dbfile.db.v0
|
||||
@return: the correct location of the database file.
|
||||
@ -28,7 +26,6 @@ def db_filename(filename='nzbtomedia.db', suffix=None):
|
||||
|
||||
class DBConnection:
|
||||
def __init__(self, filename='nzbtomedia.db', suffix=None, row_type=None):
|
||||
|
||||
self.filename = filename
|
||||
self.connection = sqlite3.connect(db_filename(filename), 20)
|
||||
self.connection.row_factory = sqlite3.Row
|
||||
@ -40,7 +37,6 @@ class DBConnection:
|
||||
except sqlite3.OperationalError as error:
|
||||
if 'no such table: db_version' in error.args[0]:
|
||||
return 0
|
||||
|
||||
if result:
|
||||
return int(result[0]['db_version'])
|
||||
return 0
|
||||
@ -48,10 +44,8 @@ class DBConnection:
|
||||
def fetch(self, query, args=None):
|
||||
if query is None:
|
||||
return
|
||||
|
||||
sql_result = None
|
||||
attempt = 0
|
||||
|
||||
while attempt < 5:
|
||||
try:
|
||||
if args is None:
|
||||
@ -64,14 +58,10 @@ class DBConnection:
|
||||
cursor = self.connection.cursor()
|
||||
cursor.execute(query, args)
|
||||
sql_result = cursor.fetchone()[0]
|
||||
|
||||
# get out of the connection attempt loop since we were successful
|
||||
break
|
||||
except sqlite3.OperationalError as error:
|
||||
if (
|
||||
'unable to open database file' in error.args[0]
|
||||
or 'database is locked' in error.args[0]
|
||||
):
|
||||
if 'unable to open database file' in error.args[0] or 'database is locked' in error.args[0]:
|
||||
log.warning(f'DB error: {error}')
|
||||
attempt += 1
|
||||
time.sleep(1)
|
||||
@ -81,16 +71,13 @@ class DBConnection:
|
||||
except sqlite3.DatabaseError as error:
|
||||
log.error(f'Fatal error executing query: {error}')
|
||||
raise
|
||||
|
||||
return sql_result
|
||||
|
||||
def mass_action(self, querylist, log_transaction=False):
|
||||
if querylist is None:
|
||||
return
|
||||
|
||||
sql_result = []
|
||||
attempt = 0
|
||||
|
||||
while attempt < 5:
|
||||
try:
|
||||
for query in querylist:
|
||||
@ -101,9 +88,7 @@ class DBConnection:
|
||||
elif len(query) > 1:
|
||||
if log_transaction:
|
||||
log.debug(f'{query[0]} with args {query[1]}')
|
||||
sql_result.append(
|
||||
self.connection.execute(query[0], query[1]),
|
||||
)
|
||||
sql_result.append(self.connection.execute(query[0], query[1]))
|
||||
self.connection.commit()
|
||||
log.debug(f'Transaction with {len(querylist)} query\'s executed')
|
||||
return sql_result
|
||||
@ -111,10 +96,7 @@ class DBConnection:
|
||||
sql_result = []
|
||||
if self.connection:
|
||||
self.connection.rollback()
|
||||
if (
|
||||
'unable to open database file' in error.args[0]
|
||||
or 'database is locked' in error.args[0]
|
||||
):
|
||||
if 'unable to open database file' in error.args[0] or 'database is locked' in error.args[0]:
|
||||
log.warning(f'DB error: {error}')
|
||||
attempt += 1
|
||||
time.sleep(1)
|
||||
@ -126,16 +108,13 @@ class DBConnection:
|
||||
self.connection.rollback()
|
||||
log.error(f'Fatal error executing query: {error}')
|
||||
raise
|
||||
|
||||
return sql_result
|
||||
|
||||
def action(self, query, args=None):
|
||||
if query is None:
|
||||
return
|
||||
|
||||
sql_result = None
|
||||
attempt = 0
|
||||
|
||||
while attempt < 5:
|
||||
try:
|
||||
if args is None:
|
||||
@ -148,10 +127,7 @@ class DBConnection:
|
||||
# get out of the connection attempt loop since we were successful
|
||||
break
|
||||
except sqlite3.OperationalError as error:
|
||||
if (
|
||||
'unable to open database file' in error.args[0]
|
||||
or 'database is locked' in error.args[0]
|
||||
):
|
||||
if 'unable to open database file' in error.args[0] or 'database is locked' in error.args[0]:
|
||||
log.warning(f'DB error: {error}')
|
||||
attempt += 1
|
||||
time.sleep(1)
|
||||
@ -161,16 +137,12 @@ class DBConnection:
|
||||
except sqlite3.DatabaseError as error:
|
||||
log.error(f'Fatal error executing query: {error}')
|
||||
raise
|
||||
|
||||
return sql_result
|
||||
|
||||
def select(self, query, args=None):
|
||||
|
||||
sql_results = self.action(query, args).fetchall()
|
||||
|
||||
if sql_results is None:
|
||||
return []
|
||||
|
||||
return sql_results
|
||||
|
||||
def upsert(self, table_name, value_dict, key_dict):
|
||||
@ -179,27 +151,9 @@ class DBConnection:
|
||||
|
||||
changes_before = self.connection.total_changes
|
||||
items = list(value_dict.values()) + list(key_dict.values())
|
||||
self.action(
|
||||
'UPDATE {table} '
|
||||
'SET {params} '
|
||||
'WHERE {conditions}'.format(
|
||||
table=table_name,
|
||||
params=', '.join(gen_params(value_dict)),
|
||||
conditions=' AND '.join(gen_params(key_dict)),
|
||||
),
|
||||
items,
|
||||
)
|
||||
|
||||
self.action('UPDATE {table} SET {params} WHERE {conditions}'.format(table=table_name, params=', '.join(gen_params(value_dict)), conditions=' AND '.join(gen_params(key_dict))), items)
|
||||
if self.connection.total_changes == changes_before:
|
||||
self.action(
|
||||
'INSERT OR IGNORE INTO {table} ({columns}) '
|
||||
'VALUES ({values})'.format(
|
||||
table=table_name,
|
||||
columns=', '.join(map(str, value_dict.keys())),
|
||||
values=', '.join(['?'] * len(value_dict.values())),
|
||||
),
|
||||
list(value_dict.values()),
|
||||
)
|
||||
self.action('INSERT OR IGNORE INTO {table} ({columns}) VALUES ({values})'.format(table=table_name, columns=', '.join(map(str, value_dict.keys())), values=', '.join(['?'] * len(value_dict.values()))), list(value_dict.values()))
|
||||
|
||||
def table_info(self, table_name):
|
||||
# FIXME ? binding is not supported here, but I cannot find a way to escape a string manually
|
||||
@ -222,17 +176,13 @@ class DBSanityCheck:
|
||||
# ===============
|
||||
# = Upgrade API =
|
||||
# ===============
|
||||
|
||||
|
||||
def upgrade_database(connection, schema):
|
||||
log.info('Checking database structure...')
|
||||
_process_upgrade(connection, schema)
|
||||
|
||||
|
||||
def pretty_name(class_name):
|
||||
return ' '.join(
|
||||
[x.group() for x in re.finditer('([A-Z])([a-z0-9]+)', class_name)],
|
||||
)
|
||||
return ' '.join([x.group() for x in re.finditer('([A-Z])([a-z0-9]+)', class_name)])
|
||||
|
||||
|
||||
def _process_upgrade(connection, upgrade_class):
|
||||
@ -243,14 +193,11 @@ def _process_upgrade(connection, upgrade_class):
|
||||
try:
|
||||
instance.execute()
|
||||
except sqlite3.DatabaseError as error:
|
||||
print(
|
||||
f'Error in {upgrade_class.__name__}: {error}',
|
||||
)
|
||||
print(f'Error in {upgrade_class.__name__}: {error}')
|
||||
raise
|
||||
log.debug(f'{upgrade_class.__name__} upgrade completed')
|
||||
else:
|
||||
log.debug(f'{upgrade_class.__name__} upgrade not required')
|
||||
|
||||
for upgrade_sub_class in upgrade_class.__subclasses__():
|
||||
_process_upgrade(connection, upgrade_sub_class)
|
||||
|
||||
@ -261,15 +208,7 @@ class SchemaUpgrade:
|
||||
self.connection = connection
|
||||
|
||||
def has_table(self, table_name):
|
||||
return (
|
||||
len(
|
||||
self.connection.action(
|
||||
'SELECT 1 FROM sqlite_master WHERE name = ?;',
|
||||
(table_name,),
|
||||
).fetchall(),
|
||||
)
|
||||
> 0
|
||||
)
|
||||
return len(self.connection.action('SELECT 1 FROM sqlite_master WHERE name = ?;', (table_name,)).fetchall()) > 0
|
||||
|
||||
def has_column(self, table_name, column):
|
||||
return column in self.connection.table_info(table_name)
|
||||
@ -286,7 +225,5 @@ class SchemaUpgrade:
|
||||
|
||||
def inc_db_version(self):
|
||||
new_version = self.check_db_version() + 1
|
||||
self.connection.action(
|
||||
'UPDATE db_version SET db_version = ?', [new_version],
|
||||
)
|
||||
self.connection.action('UPDATE db_version SET db_version = ?', [new_version])
|
||||
return new_version
|
||||
|
@ -19,12 +19,7 @@ class PyMedusa(SickBeard):
|
||||
@property
|
||||
def url(self):
|
||||
route = f'{self.sb_init.web_root}/home/postprocess/processEpisode'
|
||||
return nzb2media.utils.common.create_url(
|
||||
self.sb_init.protocol,
|
||||
self.sb_init.host,
|
||||
self.sb_init.port,
|
||||
route,
|
||||
)
|
||||
return nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route)
|
||||
|
||||
|
||||
class PyMedusaApiV1(SickBeard):
|
||||
@ -33,54 +28,25 @@ class PyMedusaApiV1(SickBeard):
|
||||
@property
|
||||
def url(self) -> str:
|
||||
route = f'{self.sb_init.web_root}/api/{self.sb_init.apikey}/'
|
||||
return nzb2media.utils.common.create_url(
|
||||
self.sb_init.protocol,
|
||||
self.sb_init.host,
|
||||
self.sb_init.port,
|
||||
route,
|
||||
)
|
||||
return nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route)
|
||||
|
||||
def api_call(self) -> ProcessResult:
|
||||
self._process_fork_prarams()
|
||||
log.debug(f'Opening URL: {self.url} with params: {self.sb_init.fork_params}')
|
||||
try:
|
||||
response = self.session.get(
|
||||
self.url,
|
||||
auth=(self.sb_init.username, self.sb_init.password),
|
||||
params=self.sb_init.fork_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
response = self.session.get(self.url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL: {self.url}')
|
||||
return ProcessResult.failure(
|
||||
f'{self.sb_init.section}: Failed to post-process - Unable to '
|
||||
f'connect to {self.sb_init.section}',
|
||||
)
|
||||
|
||||
successful_status_codes = [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]
|
||||
return ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Unable to connect to {self.sb_init.section}')
|
||||
successful_status_codes = [requests.codes.ok, requests.codes.created, requests.codes.accepted]
|
||||
if response.status_code not in successful_status_codes:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
result = ProcessResult.failure(
|
||||
f'{self.sb_init.section}: Failed to post-process - Server '
|
||||
f'returned status {response.status_code}',
|
||||
)
|
||||
result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Server returned status {response.status_code}')
|
||||
elif response.json()['result'] == 'success':
|
||||
result = ProcessResult.success(
|
||||
f'{self.sb_init.section}: Successfully post-processed '
|
||||
f'{self.input_name}',
|
||||
)
|
||||
result = ProcessResult.success(f'{self.sb_init.section}: Successfully post-processed {self.input_name}')
|
||||
else:
|
||||
# We did not receive Success confirmation.
|
||||
result = ProcessResult.failure(
|
||||
f'{self.sb_init.section}: Failed to post-process - Returned '
|
||||
f'log from {self.sb_init.section} was not as expected.',
|
||||
)
|
||||
result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Returned log from {self.sb_init.section} was not as expected.')
|
||||
return result
|
||||
|
||||
|
||||
@ -89,25 +55,16 @@ class PyMedusaApiV2(SickBeard):
|
||||
|
||||
def __init__(self, sb_init):
|
||||
super().__init__(sb_init)
|
||||
|
||||
# Check for an apikey
|
||||
# This is required with using fork = medusa-apiv2
|
||||
if not sb_init.apikey:
|
||||
log.error(
|
||||
'For the section SickBeard `fork = medusa-apiv2` you also '
|
||||
'need to configure an `apikey`',
|
||||
)
|
||||
log.error('For the section SickBeard `fork = medusa-apiv2` you also ' 'need to configure an `apikey`')
|
||||
raise ValueError('Missing apikey for fork: medusa-apiv2')
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
route = f'{self.sb_init.web_root}/api/v2/postprocess'
|
||||
return nzb2media.utils.common.create_url(
|
||||
self.sb_init.protocol,
|
||||
self.sb_init.host,
|
||||
self.sb_init.port,
|
||||
route,
|
||||
)
|
||||
return nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route)
|
||||
|
||||
def _get_identifier_status(self, url):
|
||||
# Loop through requesting medusa for the status on the queueitem.
|
||||
@ -116,12 +73,10 @@ class PyMedusaApiV2(SickBeard):
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to get postprocess identifier status')
|
||||
return False
|
||||
|
||||
try:
|
||||
jdata = response.json()
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
return jdata
|
||||
|
||||
def api_call(self) -> ProcessResult:
|
||||
@ -130,29 +85,15 @@ class PyMedusaApiV2(SickBeard):
|
||||
payload = self.sb_init.fork_params
|
||||
payload['resource'] = self.sb_init.fork_params['nzbName']
|
||||
del payload['nzbName']
|
||||
|
||||
# Update the session with the x-api-key
|
||||
headers = {
|
||||
'x-api-key': self.sb_init.apikey,
|
||||
'Content-type': 'application/json',
|
||||
}
|
||||
headers = {'x-api-key': self.sb_init.apikey, 'Content-type': 'application/json'}
|
||||
self.session.headers.update(headers)
|
||||
|
||||
# Send postprocess request
|
||||
try:
|
||||
response = self.session.post(
|
||||
self.url,
|
||||
json=payload,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
response = self.session.post(self.url, json=payload, verify=False, timeout=(30, 1800))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to send postprocess request')
|
||||
return ProcessResult.failure(
|
||||
f'{self.sb_init.section}: Unable to send postprocess request '
|
||||
f'to PyMedusa',
|
||||
)
|
||||
|
||||
return ProcessResult.failure(f'{self.sb_init.section}: Unable to send postprocess request to PyMedusa')
|
||||
# Get UUID
|
||||
if response:
|
||||
try:
|
||||
@ -162,15 +103,12 @@ class PyMedusaApiV2(SickBeard):
|
||||
return ProcessResult.failure('No data returned from provider')
|
||||
else:
|
||||
jdata = {}
|
||||
|
||||
status = jdata.get('status', None)
|
||||
if status != 'success':
|
||||
return ProcessResult.failure()
|
||||
|
||||
wait_for = int(self.sb_init.config.get('wait_for', 2))
|
||||
num = 0
|
||||
response = {}
|
||||
|
||||
queue_item_identifier = jdata['queueItem']['identifier']
|
||||
url = f'{self.url}/{queue_item_identifier}'
|
||||
while num < 12: # set up wait_for minutes to see if command completes..
|
||||
@ -181,24 +119,16 @@ class PyMedusaApiV2(SickBeard):
|
||||
if 'error' in response:
|
||||
break
|
||||
num += 1
|
||||
|
||||
# Log Medusa's PP logs here.
|
||||
if response.get('output'):
|
||||
for line in response['output']:
|
||||
log.debug(line)
|
||||
|
||||
# For now this will most likely always be True.
|
||||
# In the future we could return an exit state for when the PP in
|
||||
# medusa didn't yield an expected result.
|
||||
if response.get('success'):
|
||||
result = ProcessResult.success(
|
||||
f'{self.sb_init.section}: Successfully post-processed '
|
||||
f'{self.input_name}',
|
||||
)
|
||||
result = ProcessResult.success(f'{self.sb_init.section}: Successfully post-processed {self.input_name}')
|
||||
else:
|
||||
# We did not receive Success confirmation.
|
||||
result = ProcessResult.failure(
|
||||
f'{self.sb_init.section}: Failed to post-process - Returned '
|
||||
f'log from {self.sb_init.section} was not as expected.',
|
||||
)
|
||||
result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Returned log from {self.sb_init.section} was not as expected.')
|
||||
return result
|
||||
|
@ -17,7 +17,6 @@ log.addHandler(logging.NullHandler())
|
||||
|
||||
class InitSickBeard:
|
||||
"""SickBeard init class.
|
||||
|
||||
Used to determine which SickBeard fork object to initialize.
|
||||
"""
|
||||
|
||||
@ -26,7 +25,6 @@ class InitSickBeard:
|
||||
self.config = cfg
|
||||
self.section = section
|
||||
self.input_category = input_category
|
||||
|
||||
self.host = cfg['host']
|
||||
self.port = cfg['port']
|
||||
self.ssl = int(cfg.get('ssl', 0))
|
||||
@ -38,19 +36,10 @@ class InitSickBeard:
|
||||
self.api_version = int(cfg.get('api_version', 2))
|
||||
self.sso_username = cfg.get('sso_username', '')
|
||||
self.sso_password = cfg.get('sso_password', '')
|
||||
|
||||
self.fork = ''
|
||||
self.fork_params = None
|
||||
self.fork_obj = None
|
||||
|
||||
replace = {
|
||||
'medusa': 'Medusa',
|
||||
'medusa-api': 'Medusa-api',
|
||||
'sickbeard-api': 'SickBeard-api',
|
||||
'sickgear': 'SickGear',
|
||||
'sickchill': 'SickChill',
|
||||
'stheno': 'Stheno',
|
||||
}
|
||||
replace = {'medusa': 'Medusa', 'medusa-api': 'Medusa-api', 'sickbeard-api': 'SickBeard-api', 'sickgear': 'SickGear', 'sickchill': 'SickChill', 'stheno': 'Stheno'}
|
||||
_val = cfg.get('fork', 'auto')
|
||||
fork_name = replace.get(_val, _val)
|
||||
try:
|
||||
@ -64,24 +53,10 @@ class InitSickBeard:
|
||||
# config settings
|
||||
if nzb2media.FORK_SET:
|
||||
# keep using determined fork for multiple (manual) post-processing
|
||||
log.info(
|
||||
f'{self.section}:{self.input_category} fork already set to '
|
||||
f'{nzb2media.FORK_SET[0]}',
|
||||
)
|
||||
log.info(f'{self.section}:{self.input_category} fork already set to {nzb2media.FORK_SET[0]}')
|
||||
return nzb2media.FORK_SET[0], nzb2media.FORK_SET[1]
|
||||
|
||||
cfg = dict(nzb2media.CFG[self.section][self.input_category])
|
||||
|
||||
replace = {
|
||||
'medusa': 'Medusa',
|
||||
'medusa-api': 'Medusa-api',
|
||||
'medusa-apiv1': 'Medusa-api',
|
||||
'medusa-apiv2': 'Medusa-apiv2',
|
||||
'sickbeard-api': 'SickBeard-api',
|
||||
'sickgear': 'SickGear',
|
||||
'sickchill': 'SickChill',
|
||||
'stheno': 'Stheno',
|
||||
}
|
||||
replace = {'medusa': 'Medusa', 'medusa-api': 'Medusa-api', 'medusa-apiv1': 'Medusa-api', 'medusa-apiv2': 'Medusa-apiv2', 'sickbeard-api': 'SickBeard-api', 'sickgear': 'SickGear', 'sickchill': 'SickChill', 'stheno': 'Stheno'}
|
||||
_val = cfg.get('fork', 'auto')
|
||||
fork_name = replace.get(_val.lower(), _val)
|
||||
try:
|
||||
@ -89,118 +64,42 @@ class InitSickBeard:
|
||||
except KeyError:
|
||||
self.fork = 'auto'
|
||||
protocol = 'https://' if self.ssl else 'http://'
|
||||
|
||||
if self.section == 'NzbDrone':
|
||||
log.info(f'Attempting to verify {self.input_category} fork')
|
||||
url = nzb2media.utils.common.create_url(
|
||||
scheme=protocol,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
path=f'{self.web_root}/api/rootfolder',
|
||||
)
|
||||
url = nzb2media.utils.common.create_url(scheme=protocol, host=self.host, port=self.port, path=f'{self.web_root}/api/rootfolder')
|
||||
headers = {'X-Api-Key': self.apikey}
|
||||
try:
|
||||
response = requests.get(
|
||||
url,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
verify=False,
|
||||
)
|
||||
response = requests.get(url, headers=headers, stream=True, verify=False)
|
||||
except requests.ConnectionError:
|
||||
log.warning(
|
||||
f'Could not connect to {self.section}:'
|
||||
f'{self.input_category} to verify fork!',
|
||||
)
|
||||
|
||||
log.warning(f'Could not connect to {self.section}:{self.input_category} to verify fork!')
|
||||
if not response.ok:
|
||||
log.warning(
|
||||
f'Connection to {self.section}:{self.input_category} '
|
||||
f'failed! Check your configuration',
|
||||
)
|
||||
|
||||
log.warning(f'Connection to {self.section}:{self.input_category} failed! Check your configuration')
|
||||
self.fork = ['default', {}]
|
||||
|
||||
elif self.section == 'SiCKRAGE':
|
||||
log.info(f'Attempting to verify {self.input_category} fork')
|
||||
|
||||
if self.api_version >= 2:
|
||||
url = nzb2media.utils.common.create_url(
|
||||
scheme=protocol,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
path=f'{self.web_root}/api/v{self.api_version}/ping',
|
||||
)
|
||||
url = nzb2media.utils.common.create_url(scheme=protocol, host=self.host, port=self.port, path=f'{self.web_root}/api/v{self.api_version}/ping')
|
||||
api_params = {}
|
||||
else:
|
||||
api_version = f'v{self.api_version}'
|
||||
url = nzb2media.utils.common.create_url(
|
||||
scheme=protocol,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
path=f'{self.web_root}/api/{api_version}/{self.apikey}/',
|
||||
)
|
||||
url = nzb2media.utils.common.create_url(scheme=protocol, host=self.host, port=self.port, path=f'{self.web_root}/api/{api_version}/{self.apikey}/')
|
||||
api_params = {'cmd': 'postprocess', 'help': '1'}
|
||||
|
||||
try:
|
||||
if (
|
||||
self.api_version >= 2
|
||||
and self.sso_username
|
||||
and self.sso_password
|
||||
):
|
||||
oauth = OAuth2Session(
|
||||
client=LegacyApplicationClient(
|
||||
client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID,
|
||||
),
|
||||
)
|
||||
oauth_token = oauth.fetch_token(
|
||||
client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID,
|
||||
token_url=nzb2media.SICKRAGE_OAUTH_TOKEN_URL,
|
||||
username=self.sso_username,
|
||||
password=self.sso_password,
|
||||
)
|
||||
if self.api_version >= 2 and self.sso_username and self.sso_password:
|
||||
oauth = OAuth2Session(client=LegacyApplicationClient(client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID))
|
||||
oauth_token = oauth.fetch_token(client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID, token_url=nzb2media.SICKRAGE_OAUTH_TOKEN_URL, username=self.sso_username, password=self.sso_password)
|
||||
token = oauth_token['access_token']
|
||||
response = requests.get(
|
||||
url,
|
||||
headers={'Authorization': f'Bearer {token}'},
|
||||
stream=True,
|
||||
verify=False,
|
||||
)
|
||||
response = requests.get(url, headers={'Authorization': f'Bearer {token}'}, stream=True, verify=False)
|
||||
else:
|
||||
response = requests.get(
|
||||
url,
|
||||
params=api_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
)
|
||||
|
||||
response = requests.get(url, params=api_params, stream=True, verify=False)
|
||||
if not response.ok:
|
||||
log.warning(
|
||||
f'Connection to {self.section}:{self.input_category} '
|
||||
f'failed! Check your configuration',
|
||||
)
|
||||
log.warning(f'Connection to {self.section}:{self.input_category} failed! Check your configuration')
|
||||
except requests.ConnectionError:
|
||||
log.warning(
|
||||
f'Could not connect to {self.section}:'
|
||||
f'{self.input_category} to verify API version!',
|
||||
)
|
||||
|
||||
params = {
|
||||
'path': None,
|
||||
'failed': None,
|
||||
'process_method': None,
|
||||
'force_replace': None,
|
||||
'return_data': None,
|
||||
'type': None,
|
||||
'delete': None,
|
||||
'force_next': None,
|
||||
'is_priority': None,
|
||||
}
|
||||
|
||||
log.warning(f'Could not connect to {self.section}:{self.input_category} to verify API version!')
|
||||
params = {'path': None, 'failed': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete': None, 'force_next': None, 'is_priority': None}
|
||||
self.fork = ['default', params]
|
||||
|
||||
elif self.fork == 'auto':
|
||||
self.detect_fork()
|
||||
|
||||
log.info(f'{self.section}:{self.input_category} fork set to {self.fork[0]}')
|
||||
nzb2media.FORK_SET = self.fork
|
||||
self.fork, self.fork_params = self.fork[0], self.fork[1]
|
||||
@ -216,7 +115,6 @@ class InitSickBeard:
|
||||
log.error('Failed to get JSON data from response')
|
||||
log.debug('Response received')
|
||||
raise
|
||||
|
||||
try:
|
||||
json_data = json_data['data']
|
||||
except KeyError:
|
||||
@ -227,13 +125,12 @@ class InitSickBeard:
|
||||
if isinstance(json_data, str):
|
||||
return rem_params, False
|
||||
json_data = json_data.get('data', json_data)
|
||||
|
||||
try:
|
||||
optional_parameters = json_data['optionalParameters'].keys()
|
||||
# Find excess parameters
|
||||
excess_parameters = set(params).difference(optional_parameters)
|
||||
excess_parameters.remove('cmd') # Don't remove cmd from api params
|
||||
log.debug(f'Removing excess parameters: ' f'{sorted(excess_parameters)}')
|
||||
log.debug(f'Removing excess parameters: {sorted(excess_parameters)}')
|
||||
rem_params.extend(excess_parameters)
|
||||
return rem_params, True
|
||||
except Exception:
|
||||
@ -249,56 +146,26 @@ class InitSickBeard:
|
||||
# Define the order to test.
|
||||
# Default must be first since default fork doesn't reject parameters.
|
||||
# Then in order of most unique parameters.
|
||||
|
||||
if self.apikey:
|
||||
url = nzb2media.utils.common.create_url(
|
||||
scheme=self.protocol,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
path=f'{self.web_root}/api/{self.apikey}/',
|
||||
)
|
||||
url = nzb2media.utils.common.create_url(scheme=self.protocol, host=self.host, port=self.port, path=f'{self.web_root}/api/{self.apikey}/')
|
||||
api_params = {'cmd': 'sg.postprocess', 'help': '1'}
|
||||
else:
|
||||
url = nzb2media.utils.common.create_url(
|
||||
scheme=self.protocol,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
path=f'{self.web_root}/home/postprocess',
|
||||
)
|
||||
url = nzb2media.utils.common.create_url(scheme=self.protocol, host=self.host, port=self.port, path=f'{self.web_root}/home/postprocess')
|
||||
api_params = {}
|
||||
|
||||
# attempting to auto-detect fork
|
||||
try:
|
||||
session = requests.Session()
|
||||
|
||||
if not self.apikey and self.username and self.password:
|
||||
login = nzb2media.utils.common.create_url(
|
||||
scheme=self.protocol,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
path=f'{self.web_root}/login',
|
||||
)
|
||||
login_params = {
|
||||
'username': self.username,
|
||||
'password': self.password,
|
||||
}
|
||||
login = nzb2media.utils.common.create_url(scheme=self.protocol, host=self.host, port=self.port, path=f'{self.web_root}/login')
|
||||
login_params = {'username': self.username, 'password': self.password}
|
||||
response = session.get(login, verify=False, timeout=(30, 60))
|
||||
if response.status_code in [401, 403] and response.cookies.get('_xsrf'):
|
||||
login_params['_xsrf'] = response.cookies.get('_xsrf')
|
||||
session.post(login, data=login_params, stream=True, verify=False)
|
||||
response = session.get(
|
||||
url,
|
||||
auth=(self.username, self.password),
|
||||
params=api_params,
|
||||
verify=False,
|
||||
)
|
||||
response = session.get(url, auth=(self.username, self.password), params=api_params, verify=False)
|
||||
except requests.ConnectionError:
|
||||
log.info(
|
||||
f'Could not connect to {self.section}:{self.input_category} '
|
||||
f'to perform auto-fork detection!',
|
||||
)
|
||||
log.info(f'Could not connect to {self.section}:{self.input_category} to perform auto-fork detection!')
|
||||
response = []
|
||||
|
||||
if response and response.ok:
|
||||
if self.apikey:
|
||||
rem_params, found = self._api_check(response, params, rem_params)
|
||||
@ -308,78 +175,44 @@ class InitSickBeard:
|
||||
api_params = {'cmd': 'help', 'subject': 'postprocess'}
|
||||
try:
|
||||
if not self.apikey and self.username and self.password:
|
||||
response = session.get(
|
||||
url,
|
||||
auth=(self.username, self.password),
|
||||
params=api_params,
|
||||
verify=False,
|
||||
)
|
||||
response = session.get(url, auth=(self.username, self.password), params=api_params, verify=False)
|
||||
else:
|
||||
response = session.get(url, params=api_params, verify=False)
|
||||
except requests.ConnectionError:
|
||||
log.info(
|
||||
f'Could not connect to {self.section}:'
|
||||
f'{self.input_category} to perform auto-fork '
|
||||
f'detection!',
|
||||
)
|
||||
log.info(f'Could not connect to {self.section}:{self.input_category} to perform auto-fork detection!')
|
||||
rem_params, found = self._api_check(response, params, rem_params)
|
||||
params['cmd'] = 'postprocess'
|
||||
else:
|
||||
# Find excess parameters
|
||||
rem_params.extend(
|
||||
param
|
||||
for param in params
|
||||
if f'name="{param}"' not in response.text
|
||||
)
|
||||
|
||||
rem_params.extend(param for param in params if f'name="{param}"' not in response.text)
|
||||
# Remove excess params
|
||||
for param in rem_params:
|
||||
params.pop(param)
|
||||
|
||||
for fork in sorted(nzb2media.FORKS, reverse=False):
|
||||
if params == fork[1]:
|
||||
detected = True
|
||||
break
|
||||
|
||||
if detected:
|
||||
self.fork = fork
|
||||
log.info(
|
||||
f'{self.section}:{self.input_category} fork auto-detection '
|
||||
f'successful ...',
|
||||
)
|
||||
log.info(f'{self.section}:{self.input_category} fork auto-detection successful ...')
|
||||
elif rem_params:
|
||||
log.info(
|
||||
f'{self.section}:{self.input_category} fork auto-detection '
|
||||
f'found custom params {params}',
|
||||
)
|
||||
log.info(f'{self.section}:{self.input_category} fork auto-detection found custom params {params}')
|
||||
self.fork = ['custom', params]
|
||||
else:
|
||||
log.info(
|
||||
f'{self.section}:{self.input_category} fork auto-detection '
|
||||
f'failed',
|
||||
)
|
||||
self.fork = list(nzb2media.FORKS.items())[
|
||||
list(nzb2media.FORKS.keys()).index(nzb2media.FORK_DEFAULT)
|
||||
]
|
||||
log.info(f'{self.section}:{self.input_category} fork auto-detection failed')
|
||||
self.fork = list(nzb2media.FORKS.items())[list(nzb2media.FORKS.keys()).index(nzb2media.FORK_DEFAULT)]
|
||||
|
||||
def _init_fork(self):
|
||||
# These need to be imported here, to prevent a circular import.
|
||||
from .pymedusa import PyMedusa, PyMedusaApiV1, PyMedusaApiV2
|
||||
|
||||
mapped_forks = {
|
||||
'Medusa': PyMedusa,
|
||||
'Medusa-api': PyMedusaApiV1,
|
||||
'Medusa-apiv2': PyMedusaApiV2,
|
||||
}
|
||||
mapped_forks = {'Medusa': PyMedusa, 'Medusa-api': PyMedusaApiV1, 'Medusa-apiv2': PyMedusaApiV2}
|
||||
log.debug(f'Create object for fork {self.fork}')
|
||||
if self.fork and mapped_forks.get(self.fork):
|
||||
# Create the fork object and pass self (SickBeardInit) to it for all the data, like Config.
|
||||
self.fork_obj = mapped_forks[self.fork](self)
|
||||
else:
|
||||
log.info(
|
||||
f'{self.section}:{self.input_category} Could not create a '
|
||||
f'fork object for {self.fork}. Probaly class not added yet.',
|
||||
)
|
||||
log.info(f'{self.section}:{self.input_category} Could not create a fork object for {self.fork}. Probaly class not added yet.')
|
||||
|
||||
|
||||
class SickBeard:
|
||||
@ -391,17 +224,12 @@ class SickBeard:
|
||||
"""SB constructor."""
|
||||
self.sb_init = sb_init
|
||||
self.session = requests.Session()
|
||||
|
||||
self.failed = None
|
||||
self.status = None
|
||||
self.input_name = None
|
||||
self.dir_name = None
|
||||
|
||||
self.delete_failed = int(self.sb_init.config.get('delete_failed', 0))
|
||||
self.nzb_extraction_by = self.sb_init.config.get(
|
||||
'nzbExtractionBy',
|
||||
'Downloader',
|
||||
)
|
||||
self.nzb_extraction_by = self.sb_init.config.get('nzbExtractionBy', 'Downloader')
|
||||
self.process_method = self.sb_init.config.get('process_method')
|
||||
self.remote_path = int(self.sb_init.config.get('remote_path', 0))
|
||||
self.wait_for = int(self.sb_init.config.get('wait_for', 2))
|
||||
@ -409,22 +237,13 @@ class SickBeard:
|
||||
self.delete_on = int(self.sb_init.config.get('delete_on', 0))
|
||||
self.ignore_subs = int(self.sb_init.config.get('ignore_subs', 0))
|
||||
self.is_priority = int(self.sb_init.config.get('is_priority', 0))
|
||||
|
||||
# get importmode, default to 'Move' for consistency with legacy
|
||||
self.import_mode = self.sb_init.config.get('importMode', 'Move')
|
||||
|
||||
# Keep track of result state
|
||||
self.success = False
|
||||
|
||||
def initialize(
|
||||
self,
|
||||
dir_name,
|
||||
input_name=None,
|
||||
failed=False,
|
||||
client_agent='manual',
|
||||
):
|
||||
def initialize(self, dir_name, input_name=None, failed=False, client_agent='manual'):
|
||||
"""We need to call this explicitely because we need some variables.
|
||||
|
||||
We can't pass these directly through the constructor.
|
||||
"""
|
||||
self.dir_name = dir_name
|
||||
@ -435,10 +254,7 @@ class SickBeard:
|
||||
self.extract = 0
|
||||
else:
|
||||
self.extract = int(self.sb_init.config.get('extract', 0))
|
||||
if (
|
||||
client_agent == nzb2media.TORRENT_CLIENT_AGENT
|
||||
and nzb2media.USE_LINK == 'move-sym'
|
||||
):
|
||||
if client_agent == nzb2media.TORRENT_CLIENT_AGENT and nzb2media.USE_LINK == 'move-sym':
|
||||
self.process_method = 'symlink'
|
||||
|
||||
@property
|
||||
@ -447,12 +263,7 @@ class SickBeard:
|
||||
route = f'{self.sb_init.web_root}/api/{self.sb_init.apikey}/'
|
||||
else:
|
||||
route = f'{self.sb_init.web_root}/home/postprocess/processEpisode'
|
||||
return nzb2media.utils.common.create_url(
|
||||
scheme=self.sb_init.protocol,
|
||||
host=self.sb_init.host,
|
||||
port=self.sb_init.port,
|
||||
path=route,
|
||||
)
|
||||
return nzb2media.utils.common.create_url(scheme=self.sb_init.protocol, host=self.sb_init.host, port=self.sb_init.port, path=route)
|
||||
|
||||
def _process_fork_prarams(self):
|
||||
# configure SB params to pass
|
||||
@ -461,7 +272,6 @@ class SickBeard:
|
||||
fork_params['proc_type'] = 'manual'
|
||||
if self.input_name is not None:
|
||||
fork_params['nzbName'] = self.input_name
|
||||
|
||||
for param in copy.copy(fork_params):
|
||||
if param == 'failed':
|
||||
if self.failed > 1:
|
||||
@ -471,66 +281,50 @@ class SickBeard:
|
||||
del fork_params['proc_type']
|
||||
if 'type' in fork_params:
|
||||
del fork_params['type']
|
||||
|
||||
if param == 'return_data':
|
||||
fork_params[param] = 0
|
||||
if 'quiet' in fork_params:
|
||||
del fork_params['quiet']
|
||||
|
||||
if param == 'type':
|
||||
if 'type' in fork_params:
|
||||
# Set if we haven't already deleted for 'failed' above.
|
||||
fork_params[param] = 'manual'
|
||||
if 'proc_type' in fork_params:
|
||||
del fork_params['proc_type']
|
||||
|
||||
if param in [
|
||||
'dir_name',
|
||||
'dir',
|
||||
'proc_dir',
|
||||
'process_directory',
|
||||
'path',
|
||||
]:
|
||||
if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']:
|
||||
fork_params[param] = self.dir_name
|
||||
if self.remote_path:
|
||||
fork_params[param] = remote_dir(self.dir_name)
|
||||
# SickChill allows multiple path types. Only retunr 'path'
|
||||
if param == 'proc_dir' and 'path' in fork_params:
|
||||
del fork_params['proc_dir']
|
||||
|
||||
if param == 'process_method':
|
||||
if self.process_method:
|
||||
fork_params[param] = self.process_method
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param in ['force', 'force_replace']:
|
||||
if self.force:
|
||||
fork_params[param] = self.force
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param in ['delete_on', 'delete']:
|
||||
if self.delete_on:
|
||||
fork_params[param] = self.delete_on
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param == 'ignore_subs':
|
||||
if self.ignore_subs:
|
||||
fork_params[param] = self.ignore_subs
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param == 'is_priority':
|
||||
if self.is_priority:
|
||||
fork_params[param] = self.is_priority
|
||||
else:
|
||||
del fork_params[param]
|
||||
|
||||
if param == 'force_next':
|
||||
fork_params[param] = 1
|
||||
|
||||
# delete any unused params so we don't pass them to SB by mistake
|
||||
[fork_params.pop(k) for k, v in list(fork_params.items()) if v is None]
|
||||
|
||||
@ -539,66 +333,30 @@ class SickBeard:
|
||||
self._process_fork_prarams()
|
||||
log.debug(f'Opening URL: {self.url} with params: {self.sb_init.fork_params}')
|
||||
try:
|
||||
if (
|
||||
not self.sb_init.apikey
|
||||
and self.sb_init.username
|
||||
and self.sb_init.password
|
||||
):
|
||||
if not self.sb_init.apikey and self.sb_init.username and self.sb_init.password:
|
||||
# If not using the api, we need to login using user/pass first.
|
||||
route = f'{self.sb_init.web_root}/login'
|
||||
login = nzb2media.utils.common.create_url(
|
||||
self.sb_init.protocol,
|
||||
self.sb_init.host,
|
||||
self.sb_init.port,
|
||||
route,
|
||||
)
|
||||
login_params = {
|
||||
'username': self.sb_init.username,
|
||||
'password': self.sb_init.password,
|
||||
}
|
||||
login = nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route)
|
||||
login_params = {'username': self.sb_init.username, 'password': self.sb_init.password}
|
||||
response = self.session.get(login, verify=False, timeout=(30, 60))
|
||||
if response.status_code in [401, 403] and response.cookies.get('_xsrf'):
|
||||
login_params['_xsrf'] = response.cookies.get('_xsrf')
|
||||
self.session.post(
|
||||
login,
|
||||
data=login_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 60),
|
||||
)
|
||||
response = self.session.get(
|
||||
self.url,
|
||||
auth=(self.sb_init.username, self.sb_init.password),
|
||||
params=self.sb_init.fork_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
self.session.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60))
|
||||
response = self.session.get(self.url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL: {self.url}')
|
||||
result = ProcessResult.failure(
|
||||
f'{self.sb_init.section}: Failed to post-process - Unable to '
|
||||
f'connect to {self.sb_init.section}',
|
||||
)
|
||||
result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Unable to connect to {self.sb_init.section}')
|
||||
else:
|
||||
successful_statuses = [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]
|
||||
successful_statuses = [requests.codes.ok, requests.codes.created, requests.codes.accepted]
|
||||
if response.status_code not in successful_statuses:
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
result = ProcessResult.failure(
|
||||
f'{self.sb_init.section}: Failed to post-process - Server '
|
||||
f'returned status {response.status_code}',
|
||||
)
|
||||
result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Server returned status {response.status_code}')
|
||||
else:
|
||||
result = self.process_response(response)
|
||||
return result
|
||||
|
||||
def process_response(self, response: requests.Response) -> ProcessResult:
|
||||
"""Iterate over the lines returned, and log.
|
||||
|
||||
:param response: Streamed Requests response object.
|
||||
This method will need to be overwritten in the forks, for alternative response handling.
|
||||
"""
|
||||
@ -610,23 +368,12 @@ class SickBeard:
|
||||
# input_name = os.path.split(line)[1]
|
||||
# if 'added to the queue' in line:
|
||||
# queued = True
|
||||
# For the refactoring i'm only considering vanilla sickbeard,
|
||||
# as for the base class.
|
||||
if (
|
||||
'Processing succeeded' in line
|
||||
or 'Successfully processed' in line
|
||||
):
|
||||
# For the refactoring i'm only considering vanilla sickbeard, # as for the base class.
|
||||
if 'Processing succeeded' in line or 'Successfully processed' in line:
|
||||
self.success = True
|
||||
|
||||
if self.success:
|
||||
result = ProcessResult.success(
|
||||
f'{self.sb_init.section}: Successfully post-processed '
|
||||
f'{self.input_name}',
|
||||
)
|
||||
result = ProcessResult.success(f'{self.sb_init.section}: Successfully post-processed {self.input_name}')
|
||||
else:
|
||||
# We did not receive Success confirmation.
|
||||
result = ProcessResult.failure(
|
||||
f'{self.sb_init.section}: Failed to post-process - Returned '
|
||||
f'log from {self.sb_init.section} was not as expected.',
|
||||
)
|
||||
result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Returned log from {self.sb_init.section} was not as expected.')
|
||||
return result
|
||||
|
@ -8,13 +8,11 @@ def configure_nzbs(config):
|
||||
nzb2media.NZB_CLIENT_AGENT = nzb_config['clientAgent'] # sabnzbd
|
||||
nzb2media.NZB_DEFAULT_DIRECTORY = nzb_config['default_downloadDirectory']
|
||||
nzb2media.NZB_NO_MANUAL = int(nzb_config['no_manual'], 0)
|
||||
|
||||
configure_sabnzbd(nzb_config)
|
||||
|
||||
|
||||
def configure_sabnzbd(config):
|
||||
nzb2media.SABNZBD_HOST = config['sabnzbd_host']
|
||||
nzb2media.SABNZBD_PORT = int(
|
||||
config['sabnzbd_port'] or 8080,
|
||||
) # defaults to accommodate NzbGet
|
||||
# defaults to accommodate NzbGet
|
||||
nzb2media.SABNZBD_PORT = int(config['sabnzbd_port'] or 8080)
|
||||
nzb2media.SABNZBD_APIKEY = config['sabnzbd_apikey']
|
||||
|
@ -16,27 +16,17 @@ def configure_plex(config):
|
||||
nzb2media.PLEX_PORT = config['Plex']['plex_port']
|
||||
nzb2media.PLEX_TOKEN = config['Plex']['plex_token']
|
||||
plex_section = config['Plex']['plex_sections'] or []
|
||||
|
||||
if plex_section:
|
||||
if isinstance(plex_section, list):
|
||||
plex_section = ','.join(
|
||||
plex_section,
|
||||
) # fix in case this imported as list.
|
||||
plex_section = [
|
||||
tuple(item.split(',')) for item in plex_section.split('|')
|
||||
]
|
||||
|
||||
plex_section = ','.join(plex_section) # fix in case this imported as list.
|
||||
plex_section = [tuple(item.split(',')) for item in plex_section.split('|')]
|
||||
nzb2media.PLEX_SECTION = plex_section
|
||||
|
||||
|
||||
def plex_update(category):
|
||||
if nzb2media.FAILED:
|
||||
return
|
||||
url = '{scheme}://{host}:{port}/library/sections/'.format(
|
||||
scheme='https' if nzb2media.PLEX_SSL else 'http',
|
||||
host=nzb2media.PLEX_HOST,
|
||||
port=nzb2media.PLEX_PORT,
|
||||
)
|
||||
url = '{scheme}://{host}:{port}/library/sections/'.format(scheme='https' if nzb2media.PLEX_SSL else 'http', host=nzb2media.PLEX_HOST, port=nzb2media.PLEX_PORT)
|
||||
section = None
|
||||
if not nzb2media.PLEX_SECTION:
|
||||
return
|
||||
@ -44,7 +34,6 @@ def plex_update(category):
|
||||
for item in nzb2media.PLEX_SECTION:
|
||||
if item[0] == category:
|
||||
section = item[1]
|
||||
|
||||
if section:
|
||||
url = f'{url}{section}/refresh?X-Plex-Token={nzb2media.PLEX_TOKEN}'
|
||||
requests.get(url, timeout=(60, 120), verify=False)
|
||||
|
@ -7,41 +7,36 @@ import re
|
||||
import subliminal
|
||||
from babelfish import Language
|
||||
|
||||
import nzb2media
|
||||
from nzb2media import GETSUBS
|
||||
from nzb2media import SLANGUAGES
|
||||
from nzb2media.utils.files import list_media_files
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def import_subs(filename):
|
||||
if not nzb2media.GETSUBS:
|
||||
if not GETSUBS:
|
||||
return
|
||||
try:
|
||||
subliminal.region.configure(
|
||||
'dogpile.cache.dbm', arguments={'filename': 'cachefile.dbm'},
|
||||
)
|
||||
subliminal.region.configure('dogpile.cache.dbm', arguments={'filename': 'cachefile.dbm'})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
languages = set()
|
||||
for item in nzb2media.SLANGUAGES:
|
||||
for item in SLANGUAGES:
|
||||
try:
|
||||
languages.add(Language(item))
|
||||
except Exception:
|
||||
pass
|
||||
if not languages:
|
||||
return
|
||||
|
||||
log.info(f'Attempting to download subtitles for {filename}')
|
||||
try:
|
||||
video = subliminal.scan_video(filename)
|
||||
subtitles = subliminal.download_best_subtitles({video}, languages)
|
||||
subliminal.save_subtitles(video, subtitles[video])
|
||||
|
||||
for subtitle in subtitles[video]:
|
||||
subtitle_path = subliminal.subtitle.get_subtitle_path(
|
||||
video.name, subtitle.language,
|
||||
)
|
||||
subtitle_path = subliminal.subtitle.get_subtitle_path(video.name, subtitle.language)
|
||||
os.chmod(subtitle_path, 0o644)
|
||||
except Exception as error:
|
||||
log.error(f'Failed to download subtitles for {filename} due to: {error}')
|
||||
@ -50,31 +45,22 @@ def import_subs(filename):
|
||||
def rename_subs(path):
|
||||
filepaths = []
|
||||
sub_ext = ['.srt', '.sub', '.idx']
|
||||
vidfiles = nzb2media.list_media_files(
|
||||
path, media=True, audio=False, meta=False, archives=False,
|
||||
)
|
||||
if (
|
||||
not vidfiles or len(vidfiles) > 1
|
||||
): # If there is more than 1 video file, or no video files, we can't rename subs.
|
||||
vidfiles = list_media_files(path, media=True, audio=False, meta=False, archives=False)
|
||||
if not vidfiles or len(vidfiles) > 1: # If there is more than 1 video file, or no video files, we can't rename subs.
|
||||
return
|
||||
name = os.path.splitext(os.path.split(vidfiles[0])[1])[0]
|
||||
for directory, _, filenames in os.walk(path):
|
||||
for filename in filenames:
|
||||
filepaths.extend([os.path.join(directory, filename)])
|
||||
subfiles = [
|
||||
item for item in filepaths if os.path.splitext(item)[1] in sub_ext
|
||||
]
|
||||
subfiles = [item for item in filepaths if os.path.splitext(item)[1] in sub_ext]
|
||||
subfiles.sort() # This should sort subtitle names by language (alpha) and Number (where multiple)
|
||||
renamed = []
|
||||
for sub in subfiles:
|
||||
subname, ext = os.path.splitext(os.path.basename(sub))
|
||||
if (
|
||||
name in subname
|
||||
): # The sub file name already includes the video name.
|
||||
continue
|
||||
words = re.findall(
|
||||
'[a-zA-Z]+', str(subname),
|
||||
) # find whole words in string
|
||||
if name in subname:
|
||||
continue # The sub file name already includes the video name.
|
||||
# find whole words in string
|
||||
words = re.findall('[a-zA-Z]+', str(subname))
|
||||
# parse the words for language descriptors.
|
||||
lan = None
|
||||
for word in words:
|
||||
@ -95,12 +81,10 @@ def rename_subs(path):
|
||||
new_sub_name = name
|
||||
else:
|
||||
new_sub_name = f'{name}.{str(lan)}'
|
||||
new_sub = os.path.join(
|
||||
directory, new_sub_name,
|
||||
) # full path and name less ext
|
||||
if (
|
||||
f'{new_sub}{ext}' in renamed
|
||||
): # If duplicate names, add unique number before ext.
|
||||
# full path and name less ext
|
||||
new_sub = os.path.join(directory, new_sub_name)
|
||||
if f'{new_sub}{ext}' in renamed:
|
||||
# If duplicate names, add unique number before ext.
|
||||
for i in range(1, len(renamed) + 1):
|
||||
if f'{new_sub}.{i}{ext}' in renamed:
|
||||
continue
|
||||
|
@ -16,13 +16,8 @@ log.addHandler(logging.NullHandler())
|
||||
def process():
|
||||
# Perform Manual Post-Processing
|
||||
log.warning('Invalid number of arguments received from client, Switching to manual run mode ...')
|
||||
|
||||
# Post-Processing Result
|
||||
result = ProcessResult(
|
||||
message='',
|
||||
status_code=0,
|
||||
)
|
||||
|
||||
result = ProcessResult(message='', status_code=0)
|
||||
for section, subsections in nzb2media.SECTIONS.items():
|
||||
for subsection in subsections:
|
||||
if not nzb2media.CFG[section][subsection].isenabled():
|
||||
@ -30,38 +25,19 @@ def process():
|
||||
for dir_name in get_dirs(section, subsection, link='move'):
|
||||
log.info(f'Starting manual run for {section}:{subsection} - Folder: {dir_name}')
|
||||
log.info(f'Checking database for download info for {os.path.basename(dir_name)} ...')
|
||||
|
||||
nzb2media.DOWNLOAD_INFO = get_download_info(
|
||||
os.path.basename(dir_name),
|
||||
0,
|
||||
)
|
||||
nzb2media.DOWNLOAD_INFO = get_download_info(os.path.basename(dir_name), 0)
|
||||
if nzb2media.DOWNLOAD_INFO:
|
||||
log.info(f'Found download info for {os.path.basename(dir_name)}, setting variables now ...')
|
||||
client_agent = (
|
||||
nzb2media.DOWNLOAD_INFO[0]['client_agent'] or 'manual'
|
||||
)
|
||||
client_agent = nzb2media.DOWNLOAD_INFO[0]['client_agent'] or 'manual'
|
||||
download_id = nzb2media.DOWNLOAD_INFO[0]['input_id'] or ''
|
||||
else:
|
||||
log.info(f'Unable to locate download info for {os.path.basename(dir_name)}, continuing to try and process this release ...')
|
||||
client_agent = 'manual'
|
||||
download_id = ''
|
||||
|
||||
if (
|
||||
client_agent
|
||||
and client_agent.lower() not in nzb2media.NZB_CLIENTS
|
||||
):
|
||||
if client_agent and client_agent.lower() not in nzb2media.NZB_CLIENTS:
|
||||
continue
|
||||
|
||||
input_name = os.path.basename(dir_name)
|
||||
|
||||
results = nzb.process(
|
||||
dir_name,
|
||||
input_name,
|
||||
0,
|
||||
client_agent=client_agent,
|
||||
download_id=download_id or None,
|
||||
input_category=subsection,
|
||||
)
|
||||
results = nzb.process(dir_name, input_name, 0, client_agent=client_agent, download_id=download_id or None, input_category=subsection)
|
||||
if results.status_code != 0:
|
||||
log.error(f'A problem was reported when trying to perform a manual run for {section}:{subsection}.')
|
||||
result = results
|
||||
|
@ -25,22 +25,10 @@ log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def process(
|
||||
input_directory,
|
||||
input_name=None,
|
||||
status=0,
|
||||
client_agent='manual',
|
||||
download_id=None,
|
||||
input_category=None,
|
||||
failure_link=None,
|
||||
):
|
||||
def process(input_directory, input_name=None, status=0, client_agent='manual', download_id=None, input_category=None, failure_link=None):
|
||||
if nzb2media.SAFE_MODE and input_directory == nzb2media.NZB_DEFAULT_DIRECTORY:
|
||||
log.error(f'The input directory:[{input_directory}] is the Default Download Directory. Please configure category directories to prevent processing of other media.')
|
||||
return ProcessResult(
|
||||
message='',
|
||||
status_code=-1,
|
||||
)
|
||||
|
||||
return ProcessResult(message='', status_code=-1)
|
||||
if not download_id and client_agent == 'sabnzbd':
|
||||
download_id = get_nzoid(input_name)
|
||||
if client_agent != 'manual' and not nzb2media.DOWNLOAD_INFO:
|
||||
@ -54,14 +42,7 @@ def process(
|
||||
except Exception:
|
||||
pass
|
||||
control_value_dict = {'input_directory': input_directory1}
|
||||
new_value_dict = {
|
||||
'input_name': input_name1,
|
||||
'input_hash': download_id,
|
||||
'input_id': download_id,
|
||||
'client_agent': client_agent,
|
||||
'status': 0,
|
||||
'last_update': datetime.date.today().toordinal(),
|
||||
}
|
||||
new_value_dict = {'input_name': input_name1, 'input_hash': download_id, 'input_id': download_id, 'client_agent': client_agent, 'status': 0, 'last_update': datetime.date.today().toordinal()}
|
||||
my_db.upsert('downloads', new_value_dict, control_value_dict)
|
||||
# auto-detect section
|
||||
if input_category is None:
|
||||
@ -72,35 +53,23 @@ def process(
|
||||
section = nzb2media.CFG.findsection('ALL').isenabled()
|
||||
if section is None:
|
||||
log.error(f'Category:[{input_category}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.')
|
||||
return ProcessResult(
|
||||
message='',
|
||||
status_code=-1,
|
||||
)
|
||||
return ProcessResult(message='', status_code=-1)
|
||||
usercat = 'ALL'
|
||||
if len(section) > 1:
|
||||
log.error(f'Category:[{input_category}] is not unique, {section.keys()} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.')
|
||||
return ProcessResult(
|
||||
message='',
|
||||
status_code=-1,
|
||||
)
|
||||
return ProcessResult(message='', status_code=-1)
|
||||
if section:
|
||||
section_name = section.keys()[0]
|
||||
log.info(f'Auto-detected SECTION:{section_name}')
|
||||
else:
|
||||
log.error(f'Unable to locate a section with subsection:{input_category} enabled in your autoProcessMedia.cfg, exiting!')
|
||||
return ProcessResult(
|
||||
status_code=-1,
|
||||
message='',
|
||||
)
|
||||
return ProcessResult(status_code=-1, message='')
|
||||
cfg = dict(nzb2media.CFG[section_name][usercat])
|
||||
extract = int(cfg.get('extract', 0))
|
||||
try:
|
||||
if int(cfg.get('remote_path')) and not nzb2media.REMOTE_PATHS:
|
||||
log.error(f'Remote Path is enabled for {section_name}:{input_category} but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!')
|
||||
return ProcessResult(
|
||||
status_code=-1,
|
||||
message='',
|
||||
)
|
||||
return ProcessResult(status_code=-1, message='')
|
||||
except Exception:
|
||||
remote_path = cfg.get('remote_path')
|
||||
log.error(f'Remote Path {remote_path} is not valid for {section_name}:{input_category} Please set this to either 0 to disable or 1 to enable!')
|
||||
@ -110,47 +79,17 @@ def process(
|
||||
extract_files(input_directory)
|
||||
log.info(f'Calling {section_name}:{input_category} to post-process:{input_name}')
|
||||
if section_name == 'UserScript':
|
||||
result = external_script(
|
||||
input_directory, input_name, input_category, section[usercat],
|
||||
)
|
||||
result = external_script(input_directory, input_name, input_category, section[usercat])
|
||||
else:
|
||||
process_map = {
|
||||
'CouchPotato': movies.process,
|
||||
'Radarr': movies.process,
|
||||
'Watcher3': movies.process,
|
||||
'SickBeard': tv.process,
|
||||
'SiCKRAGE': tv.process,
|
||||
'NzbDrone': tv.process,
|
||||
'Sonarr': tv.process,
|
||||
'LazyLibrarian': books.process,
|
||||
'HeadPhones': music.process,
|
||||
'Lidarr': music.process,
|
||||
'Mylar': comics.process,
|
||||
'Gamez': games.process,
|
||||
}
|
||||
process_map = {'CouchPotato': movies.process, 'Radarr': movies.process, 'Watcher3': movies.process, 'SickBeard': tv.process, 'SiCKRAGE': tv.process, 'NzbDrone': tv.process, 'Sonarr': tv.process, 'LazyLibrarian': books.process, 'HeadPhones': music.process, 'Lidarr': music.process, 'Mylar': comics.process, 'Gamez': games.process}
|
||||
processor = process_map[section_name]
|
||||
result = processor(
|
||||
section=section_name,
|
||||
dir_name=input_directory,
|
||||
input_name=input_name,
|
||||
status=status,
|
||||
client_agent=client_agent,
|
||||
download_id=download_id,
|
||||
input_category=input_category,
|
||||
failure_link=failure_link,
|
||||
)
|
||||
result = processor(section=section_name, dir_name=input_directory, input_name=input_name, status=status, client_agent=client_agent, download_id=download_id, input_category=input_category, failure_link=failure_link)
|
||||
plex_update(input_category)
|
||||
if result.status_code == 0:
|
||||
if client_agent != 'manual':
|
||||
# update download status in our DB
|
||||
update_download_info_status(input_name, 1)
|
||||
if section_name not in [
|
||||
'UserScript',
|
||||
'NzbDrone',
|
||||
'Sonarr',
|
||||
'Radarr',
|
||||
'Lidarr',
|
||||
]:
|
||||
if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']:
|
||||
# cleanup our processing folders of any misc unwanted files and
|
||||
# empty directories
|
||||
clean_dir(input_directory, section_name, input_category)
|
||||
|
@ -13,13 +13,7 @@ log.addHandler(logging.NullHandler())
|
||||
|
||||
def parse_download_id():
|
||||
"""Parse nzbget download_id from environment."""
|
||||
download_id_keys = [
|
||||
'NZBPR_COUCHPOTATO',
|
||||
'NZBPR_DRONE',
|
||||
'NZBPR_SONARR',
|
||||
'NZBPR_RADARR',
|
||||
'NZBPR_LIDARR',
|
||||
]
|
||||
download_id_keys = ['NZBPR_COUCHPOTATO', 'NZBPR_DRONE', 'NZBPR_SONARR', 'NZBPR_RADARR', 'NZBPR_LIDARR']
|
||||
for download_id_key in download_id_keys:
|
||||
try:
|
||||
return os.environ[download_id_key]
|
||||
@ -102,12 +96,4 @@ def process():
|
||||
status = parse_status()
|
||||
download_id = parse_download_id()
|
||||
failure_link = parse_failure_link()
|
||||
return nzb.process(
|
||||
input_directory=os.environ['NZBPP_DIRECTORY'],
|
||||
input_name=os.environ['NZBPP_NZBNAME'],
|
||||
status=status,
|
||||
client_agent='nzbget',
|
||||
download_id=download_id,
|
||||
input_category=os.environ['NZBPP_CATEGORY'],
|
||||
failure_link=failure_link,
|
||||
)
|
||||
return nzb.process(input_directory=os.environ['NZBPP_DIRECTORY'], input_name=os.environ['NZBPP_NZBNAME'], status=status, client_agent='nzbget', download_id=download_id, input_category=os.environ['NZBPP_CATEGORY'], failure_link=failure_link)
|
||||
|
@ -7,27 +7,17 @@ from nzb2media.processor import nzb
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
MINIMUM_ARGUMENTS = 8
|
||||
|
||||
|
||||
def process_script():
|
||||
version = os.environ['SAB_VERSION']
|
||||
log.info(f'Script triggered from SABnzbd {version}.')
|
||||
return nzb.process(
|
||||
input_directory=os.environ['SAB_COMPLETE_DIR'],
|
||||
input_name=os.environ['SAB_FINAL_NAME'],
|
||||
status=int(os.environ['SAB_PP_STATUS']),
|
||||
client_agent='sabnzbd',
|
||||
download_id=os.environ['SAB_NZO_ID'],
|
||||
input_category=os.environ['SAB_CAT'],
|
||||
failure_link=os.environ['SAB_FAILURE_URL'],
|
||||
)
|
||||
return nzb.process(input_directory=os.environ['SAB_COMPLETE_DIR'], input_name=os.environ['SAB_FINAL_NAME'], status=int(os.environ['SAB_PP_STATUS']), client_agent='sabnzbd', download_id=os.environ['SAB_NZO_ID'], input_category=os.environ['SAB_CAT'], failure_link=os.environ['SAB_FAILURE_URL'])
|
||||
|
||||
|
||||
def process(args):
|
||||
"""Process job from SABnzb.
|
||||
|
||||
SABnzbd arguments:
|
||||
1. The final directory of the job (full path)
|
||||
2. The original name of the NZB file
|
||||
@ -44,12 +34,4 @@ def process(args):
|
||||
"""
|
||||
version = '0.7.17+' if len(args) > MINIMUM_ARGUMENTS else ''
|
||||
log.info(f'Script triggered from SABnzbd {version}')
|
||||
return nzb.process(
|
||||
input_directory=args[1],
|
||||
input_name=args[2],
|
||||
status=int(args[7]),
|
||||
input_category=args[5],
|
||||
client_agent='sabnzbd',
|
||||
download_id='',
|
||||
failure_link=''.join(args[8:]),
|
||||
)
|
||||
return nzb.process(input_directory=args[1], input_name=args[2], status=int(args[7]), input_category=args[5], client_agent='sabnzbd', download_id='', failure_link=''.join(args[8:]))
|
||||
|
@ -12,66 +12,14 @@ from nzb2media.utils.files import list_media_files
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
reverse_list = [
|
||||
r'\.\d{2}e\d{2}s\.',
|
||||
r'\.[pi]0801\.',
|
||||
r'\.p027\.',
|
||||
r'\.[pi]675\.',
|
||||
r'\.[pi]084\.',
|
||||
r'\.p063\.',
|
||||
r'\b[45]62[xh]\.',
|
||||
r'\.yarulb\.',
|
||||
r'\.vtd[hp]\.',
|
||||
r'\.ld[.-]?bew\.',
|
||||
r'\.pir.?(dov|dvd|bew|db|rb)\.',
|
||||
r'\brdvd\.',
|
||||
r'\.vts\.',
|
||||
r'\.reneercs\.',
|
||||
r'\.dcv\.',
|
||||
r'\b(pir|mac)dh\b',
|
||||
r'\.reporp\.',
|
||||
r'\.kcaper\.',
|
||||
r'\.lanretni\.',
|
||||
r'\b3ca\b',
|
||||
r'\.cstn\.',
|
||||
]
|
||||
reverse_list = [r'\.\d{2}e\d{2}s\.', r'\.[pi]0801\.', r'\.p027\.', r'\.[pi]675\.', r'\.[pi]084\.', r'\.p063\.', r'\b[45]62[xh]\.', r'\.yarulb\.', r'\.vtd[hp]\.', r'\.ld[.-]?bew\.', r'\.pir.?(dov|dvd|bew|db|rb)\.', r'\brdvd\.', r'\.vts\.', r'\.reneercs\.', r'\.dcv\.', r'\b(pir|mac)dh\b', r'\.reporp\.', r'\.kcaper\.', r'\.lanretni\.', r'\b3ca\b', r'\.cstn\.']
|
||||
reverse_pattern = re.compile('|'.join(reverse_list), flags=re.IGNORECASE)
|
||||
season_pattern = re.compile(r'(.*\.\d{2}e\d{2}s\.)(.*)', flags=re.IGNORECASE)
|
||||
word_pattern = re.compile(r'([^A-Z0-9]*[A-Z0-9]+)')
|
||||
media_list = [
|
||||
r'\.s\d{2}e\d{2}\.',
|
||||
r'\.1080[pi]\.',
|
||||
r'\.720p\.',
|
||||
r'\.576[pi]',
|
||||
r'\.480[pi]\.',
|
||||
r'\.360p\.',
|
||||
r'\.[xh]26[45]\b',
|
||||
r'\.bluray\.',
|
||||
r'\.[hp]dtv\.',
|
||||
r'\.web[.-]?dl\.',
|
||||
r'\.(vod|dvd|web|bd|br).?rip\.',
|
||||
r'\.dvdr\b',
|
||||
r'\.stv\.',
|
||||
r'\.screener\.',
|
||||
r'\.vcd\.',
|
||||
r'\bhd(cam|rip)\b',
|
||||
r'\.proper\.',
|
||||
r'\.repack\.',
|
||||
r'\.internal\.',
|
||||
r'\bac3\b',
|
||||
r'\.ntsc\.',
|
||||
r'\.pal\.',
|
||||
r'\.secam\.',
|
||||
r'\bdivx\b',
|
||||
r'\bxvid\b',
|
||||
]
|
||||
media_list = [r'\.s\d{2}e\d{2}\.', r'\.1080[pi]\.', r'\.720p\.', r'\.576[pi]', r'\.480[pi]\.', r'\.360p\.', r'\.[xh]26[45]\b', r'\.bluray\.', r'\.[hp]dtv\.', r'\.web[.-]?dl\.', r'\.(vod|dvd|web|bd|br).?rip\.', r'\.dvdr\b', r'\.stv\.', r'\.screener\.', r'\.vcd\.', r'\bhd(cam|rip)\b', r'\.proper\.', r'\.repack\.', r'\.internal\.', r'\bac3\b', r'\.ntsc\.', r'\.pal\.', r'\.secam\.', r'\bdivx\b', r'\bxvid\b']
|
||||
media_pattern = re.compile('|'.join(media_list), flags=re.IGNORECASE)
|
||||
garbage_name = re.compile(r'^[a-zA-Z0-9]*$')
|
||||
char_replace = [
|
||||
[r'(\w)1\.(\w)', r'\1i\2'],
|
||||
]
|
||||
char_replace = [[r'(\w)1\.(\w)', r'\1i\2']]
|
||||
|
||||
|
||||
def process_all_exceptions(name, dirname):
|
||||
@ -112,11 +60,7 @@ def strip_groups(filename):
|
||||
|
||||
def rename_file(filename, newfile_path):
|
||||
if os.path.isfile(newfile_path):
|
||||
newfile_path = (
|
||||
os.path.splitext(newfile_path)[0]
|
||||
+ '.NTM'
|
||||
+ os.path.splitext(newfile_path)[1]
|
||||
)
|
||||
newfile_path = os.path.splitext(newfile_path)[0] + '.NTM' + os.path.splitext(newfile_path)[1]
|
||||
log.error(f'Replacing file name {filename} with download name {newfile_path}')
|
||||
try:
|
||||
os.rename(filename, newfile_path)
|
||||
@ -126,10 +70,7 @@ def rename_file(filename, newfile_path):
|
||||
|
||||
def replace_filename(filename, dirname, name):
|
||||
head, file_extension = os.path.splitext(os.path.basename(filename))
|
||||
if (
|
||||
media_pattern.search(os.path.basename(dirname).replace(' ', '.'))
|
||||
is not None
|
||||
):
|
||||
if media_pattern.search(os.path.basename(dirname).replace(' ', '.')) is not None:
|
||||
newname = os.path.basename(dirname).replace(' ', '.')
|
||||
log.debug(f'Replacing file name {head} with directory name {newname}')
|
||||
elif media_pattern.search(name.replace(' ', '.').lower()) is not None:
|
||||
@ -178,10 +119,7 @@ def rename_script(dirname):
|
||||
break
|
||||
if rename_file:
|
||||
with open(rename_file) as fin:
|
||||
rename_lines = [
|
||||
line.strip()
|
||||
for line in fin
|
||||
]
|
||||
rename_lines = [line.strip() for line in fin]
|
||||
for line in rename_lines:
|
||||
if re.search('^(mv|Move)', line, re.IGNORECASE):
|
||||
cmd = shlex.split(line)[1:]
|
||||
@ -189,9 +127,7 @@ def rename_script(dirname):
|
||||
continue
|
||||
if len(cmd) == 2 and os.path.isfile(os.path.join(dirname, cmd[0])):
|
||||
orig = os.path.join(dirname, cmd[0])
|
||||
dest = os.path.join(
|
||||
dirname, cmd[1].split('\\')[-1].split('/')[-1],
|
||||
)
|
||||
dest = os.path.join(dirname, cmd[1].split('\\')[-1].split('/')[-1])
|
||||
if os.path.isfile(dest):
|
||||
continue
|
||||
log.debug(f'Renaming file {orig} to {dest}')
|
||||
|
@ -6,17 +6,10 @@ from nzb2media.utils.torrent import create_torrent_class
|
||||
|
||||
def configure_torrents(config):
|
||||
torrent_config = config['Torrent']
|
||||
nzb2media.TORRENT_CLIENT_AGENT = torrent_config[
|
||||
'clientAgent'
|
||||
] # utorrent | deluge | transmission | rtorrent | vuze | qbittorrent | synods | other
|
||||
nzb2media.OUTPUT_DIRECTORY = torrent_config[
|
||||
'outputDirectory'
|
||||
] # /abs/path/to/complete/
|
||||
nzb2media.TORRENT_DEFAULT_DIRECTORY = torrent_config[
|
||||
'default_downloadDirectory'
|
||||
]
|
||||
nzb2media.TORRENT_CLIENT_AGENT = torrent_config['clientAgent'] # utorrent | deluge | transmission | rtorrent | vuze | qbittorrent | synods | other
|
||||
nzb2media.OUTPUT_DIRECTORY = torrent_config['outputDirectory'] # /abs/path/to/complete/
|
||||
nzb2media.TORRENT_DEFAULT_DIRECTORY = torrent_config['default_downloadDirectory']
|
||||
nzb2media.TORRENT_NO_MANUAL = int(torrent_config['no_manual'], 0)
|
||||
|
||||
configure_torrent_linking(torrent_config)
|
||||
configure_flattening(torrent_config)
|
||||
configure_torrent_deletion(torrent_config)
|
||||
@ -41,9 +34,7 @@ def configure_flattening(config):
|
||||
|
||||
|
||||
def configure_torrent_categories(config):
|
||||
nzb2media.CATEGORIES = config[
|
||||
'categories'
|
||||
] # music,music_videos,pictures,software
|
||||
nzb2media.CATEGORIES = config['categories'] # music,music_videos,pictures,software
|
||||
if isinstance(nzb2media.CATEGORIES, str):
|
||||
nzb2media.CATEGORIES = nzb2media.CATEGORIES.split(',')
|
||||
|
||||
@ -62,9 +53,7 @@ def configure_torrent_deletion(config):
|
||||
|
||||
|
||||
def configure_utorrent(config):
|
||||
nzb2media.UTORRENT_WEB_UI = config[
|
||||
'uTorrentWEBui'
|
||||
] # http://localhost:8090/gui/
|
||||
nzb2media.UTORRENT_WEB_UI = config['uTorrentWEBui'] # http://localhost:8090/gui/
|
||||
nzb2media.UTORRENT_USER = config['uTorrentUSR'] # mysecretusr
|
||||
nzb2media.UTORRENT_PASSWORD = config['uTorrentPWD'] # mysecretpwr
|
||||
|
||||
|
@ -16,7 +16,6 @@ def configure_client():
|
||||
port = nzb2media.DELUGE_PORT
|
||||
user = nzb2media.DELUGE_USER
|
||||
password = nzb2media.DELUGE_PASSWORD
|
||||
|
||||
log.debug(f'Connecting to {agent}: http://{host}:{port}')
|
||||
client = DelugeRPCClient(host, port, user, password)
|
||||
try:
|
||||
|
@ -16,7 +16,6 @@ def configure_client():
|
||||
port = nzb2media.QBITTORRENT_PORT
|
||||
user = nzb2media.QBITTORRENT_USER
|
||||
password = nzb2media.QBITTORRENT_PASSWORD
|
||||
|
||||
log.debug(f'Connecting to {agent}: http://{host}:{port}')
|
||||
client = qBittorrentClient(f'http://{host}:{port}/')
|
||||
try:
|
||||
|
@ -15,7 +15,6 @@ def configure_client():
|
||||
port = nzb2media.SYNO_PORT
|
||||
user = nzb2media.SYNO_USER
|
||||
password = nzb2media.SYNO_PASSWORD
|
||||
|
||||
log.debug(f'Connecting to {agent}: http://{host}:{port}')
|
||||
try:
|
||||
client = DownloadStation(host, port, user, password)
|
||||
|
@ -16,7 +16,6 @@ def configure_client():
|
||||
port = nzb2media.TRANSMISSION_PORT
|
||||
user = nzb2media.TRANSMISSION_USER
|
||||
password = nzb2media.TRANSMISSION_PASSWORD
|
||||
|
||||
log.debug(f'Connecting to {agent}: http://{host}:{port}')
|
||||
try:
|
||||
client = TransmissionClient(host, port, user, password)
|
||||
|
@ -15,7 +15,6 @@ def configure_client():
|
||||
web_ui = nzb2media.UTORRENT_WEB_UI
|
||||
user = nzb2media.UTORRENT_USER
|
||||
password = nzb2media.UTORRENT_PASSWORD
|
||||
|
||||
log.debug(f'Connecting to {agent}: {web_ui}')
|
||||
try:
|
||||
client = UTorrentClient(web_ui, user, password)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -18,27 +18,17 @@ log.addHandler(logging.NullHandler())
|
||||
def external_script(output_destination, torrent_name, torrent_label, settings):
|
||||
final_result = 0 # start at 0.
|
||||
num_files = 0
|
||||
nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = settings.get(
|
||||
'user_script_mediaExtensions', '',
|
||||
)
|
||||
nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = settings.get('user_script_mediaExtensions', '')
|
||||
try:
|
||||
if isinstance(nzb2media.USER_SCRIPT_MEDIAEXTENSIONS, str):
|
||||
nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = (
|
||||
nzb2media.USER_SCRIPT_MEDIAEXTENSIONS.lower().split(',')
|
||||
)
|
||||
nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = nzb2media.USER_SCRIPT_MEDIAEXTENSIONS.lower().split(',')
|
||||
except Exception:
|
||||
log.error('user_script_mediaExtensions could not be set')
|
||||
nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = []
|
||||
|
||||
nzb2media.USER_SCRIPT = settings.get('user_script_path', '')
|
||||
|
||||
if not nzb2media.USER_SCRIPT or nzb2media.USER_SCRIPT == 'None':
|
||||
# do nothing and return success. This allows the user an option to Link files only and not run a script.
|
||||
return ProcessResult(
|
||||
status_code=0,
|
||||
message='No user script defined',
|
||||
)
|
||||
|
||||
return ProcessResult(status_code=0, message='No user script defined')
|
||||
nzb2media.USER_SCRIPT_PARAM = settings.get('user_script_param', '')
|
||||
try:
|
||||
if isinstance(nzb2media.USER_SCRIPT_PARAM, str):
|
||||
@ -46,49 +36,30 @@ def external_script(output_destination, torrent_name, torrent_label, settings):
|
||||
except Exception:
|
||||
log.error('user_script_params could not be set')
|
||||
nzb2media.USER_SCRIPT_PARAM = []
|
||||
|
||||
nzb2media.USER_SCRIPT_SUCCESSCODES = settings.get('user_script_successCodes', 0)
|
||||
try:
|
||||
if isinstance(nzb2media.USER_SCRIPT_SUCCESSCODES, str):
|
||||
nzb2media.USER_SCRIPT_SUCCESSCODES = (
|
||||
nzb2media.USER_SCRIPT_SUCCESSCODES.split(',')
|
||||
)
|
||||
nzb2media.USER_SCRIPT_SUCCESSCODES = nzb2media.USER_SCRIPT_SUCCESSCODES.split(',')
|
||||
except Exception:
|
||||
log.error('user_script_successCodes could not be set')
|
||||
nzb2media.USER_SCRIPT_SUCCESSCODES = 0
|
||||
|
||||
nzb2media.USER_SCRIPT_CLEAN = int(settings.get('user_script_clean', 1))
|
||||
nzb2media.USER_SCRIPT_RUNONCE = int(settings.get('user_script_runOnce', 1))
|
||||
|
||||
if nzb2media.CHECK_MEDIA:
|
||||
for video in list_media_files(
|
||||
output_destination,
|
||||
media=True,
|
||||
audio=False,
|
||||
meta=False,
|
||||
archives=False,
|
||||
):
|
||||
for video in list_media_files(output_destination, media=True, audio=False, meta=False, archives=False):
|
||||
if transcoder.is_video_good(video, 0):
|
||||
import_subs(video)
|
||||
else:
|
||||
log.info(f'Corrupt video file found {video}. Deleting.')
|
||||
os.unlink(video)
|
||||
|
||||
for dirpath, _, filenames in os.walk(output_destination):
|
||||
for file in filenames:
|
||||
|
||||
file_path = nzb2media.os.path.join(dirpath, file)
|
||||
file_name, file_extension = os.path.splitext(file)
|
||||
log.debug(f'Checking file {file} to see if this should be processed.')
|
||||
|
||||
if (
|
||||
file_extension in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS
|
||||
or 'all' in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS
|
||||
):
|
||||
if file_extension in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS or 'all' in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS:
|
||||
num_files += 1
|
||||
if (
|
||||
nzb2media.USER_SCRIPT_RUNONCE == 1 and num_files > 1
|
||||
): # we have already run once, so just continue to get number of files.
|
||||
if nzb2media.USER_SCRIPT_RUNONCE == 1 and num_files > 1: # we have already run once, so just continue to get number of files.
|
||||
continue
|
||||
command = [nzb2media.USER_SCRIPT]
|
||||
for param in nzb2media.USER_SCRIPT_PARAM:
|
||||
@ -133,28 +104,15 @@ def external_script(output_destination, torrent_name, torrent_label, settings):
|
||||
log.info(f'If the UserScript completed successfully you should add {res} to the user_script_successCodes')
|
||||
result = 1
|
||||
final_result += result
|
||||
|
||||
num_files_new = 0
|
||||
for _, _, filenames in os.walk(output_destination):
|
||||
for file in filenames:
|
||||
file_name, file_extension = os.path.splitext(file)
|
||||
|
||||
if (
|
||||
file_extension in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS
|
||||
or nzb2media.USER_SCRIPT_MEDIAEXTENSIONS == 'ALL'
|
||||
):
|
||||
if file_extension in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS or nzb2media.USER_SCRIPT_MEDIAEXTENSIONS == 'ALL':
|
||||
num_files_new += 1
|
||||
|
||||
if (
|
||||
nzb2media.USER_SCRIPT_CLEAN == int(1)
|
||||
and num_files_new == 0
|
||||
and final_result == 0
|
||||
):
|
||||
if nzb2media.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0:
|
||||
log.info(f'All files have been processed. Cleaning outputDirectory {output_destination}')
|
||||
remove_dir(output_destination)
|
||||
elif nzb2media.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0:
|
||||
log.info(f'{num_files} files were processed, but {num_files_new} still remain. outputDirectory will not be cleaned.')
|
||||
return ProcessResult(
|
||||
status_code=final_result,
|
||||
message='User Script Completed',
|
||||
)
|
||||
return ProcessResult(status_code=final_result, message='User Script Completed')
|
||||
|
@ -15,9 +15,7 @@ log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def flatten(output_destination):
|
||||
return flatten_dir(
|
||||
output_destination, list_media_files(output_destination),
|
||||
)
|
||||
return flatten_dir(output_destination, list_media_files(output_destination))
|
||||
|
||||
|
||||
def clean_dir(path, section, subsection):
|
||||
@ -25,9 +23,7 @@ def clean_dir(path, section, subsection):
|
||||
min_size = int(cfg.get('minSize', 0))
|
||||
delete_ignored = int(cfg.get('delete_ignored', 0))
|
||||
try:
|
||||
files = list_media_files(
|
||||
path, min_size=min_size, delete_ignored=delete_ignored,
|
||||
)
|
||||
files = list_media_files(path, min_size=min_size, delete_ignored=delete_ignored)
|
||||
except Exception:
|
||||
files = []
|
||||
return clean_directory(path, files)
|
||||
@ -35,72 +31,45 @@ def clean_dir(path, section, subsection):
|
||||
|
||||
def process_dir(path, link):
|
||||
folders = []
|
||||
|
||||
log.info(f'Searching {path} for mediafiles to post-process ...')
|
||||
dir_contents = os.listdir(path)
|
||||
|
||||
# search for single files and move them into their own folder for post-processing
|
||||
|
||||
# Generate list of sync files
|
||||
sync_files = (
|
||||
item
|
||||
for item in dir_contents
|
||||
if os.path.splitext(item)[1] in ['.!sync', '.bts']
|
||||
)
|
||||
|
||||
sync_files = (item for item in dir_contents if os.path.splitext(item)[1] in ['.!sync', '.bts'])
|
||||
# Generate a list of file paths
|
||||
filepaths = (
|
||||
os.path.join(path, item)
|
||||
for item in dir_contents
|
||||
if item not in ['Thumbs.db', 'thumbs.db']
|
||||
)
|
||||
|
||||
filepaths = (os.path.join(path, item) for item in dir_contents if item not in ['Thumbs.db', 'thumbs.db'])
|
||||
# Generate a list of media files
|
||||
mediafiles = (item for item in filepaths if os.path.isfile(item))
|
||||
|
||||
if not any(sync_files):
|
||||
for mediafile in mediafiles:
|
||||
try:
|
||||
move_file(mediafile, path, link)
|
||||
except Exception as error:
|
||||
log.error(f'Failed to move {os.path.split(mediafile)[1]} to its own directory: {error}')
|
||||
|
||||
# removeEmptyFolders(path, removeRoot=False)
|
||||
|
||||
# Generate all path contents
|
||||
path_contents = (os.path.join(path, item) for item in os.listdir(path))
|
||||
|
||||
# Generate all directories from path contents
|
||||
directories = (path for path in path_contents if os.path.isdir(path))
|
||||
|
||||
for directory in directories:
|
||||
dir_contents = os.listdir(directory)
|
||||
sync_files = (
|
||||
item
|
||||
for item in dir_contents
|
||||
if os.path.splitext(item)[1] in ['.!sync', '.bts']
|
||||
)
|
||||
sync_files = (item for item in dir_contents if os.path.splitext(item)[1] in ['.!sync', '.bts'])
|
||||
if not any(dir_contents) or any(sync_files):
|
||||
continue
|
||||
folders.append(directory)
|
||||
|
||||
return folders
|
||||
|
||||
|
||||
def get_dirs(section, subsection, link='hard'):
|
||||
to_return = []
|
||||
|
||||
watch_directory = nzb2media.CFG[section][subsection]['watch_dir']
|
||||
directory = os.path.join(watch_directory, subsection)
|
||||
|
||||
if not os.path.exists(directory):
|
||||
directory = watch_directory
|
||||
|
||||
try:
|
||||
to_return.extend(process_dir(directory, link))
|
||||
except Exception as error:
|
||||
log.error(f'Failed to add directories from {watch_directory} for post-processing: {error}')
|
||||
|
||||
if nzb2media.USE_LINK == 'move':
|
||||
try:
|
||||
output_directory = os.path.join(nzb2media.OUTPUT_DIRECTORY, subsection)
|
||||
@ -108,20 +77,12 @@ def get_dirs(section, subsection, link='hard'):
|
||||
to_return.extend(process_dir(output_directory, link))
|
||||
except Exception as error:
|
||||
log.error(f'Failed to add directories from {nzb2media.OUTPUT_DIRECTORY} for post-processing: {error}')
|
||||
|
||||
if not to_return:
|
||||
log.debug(f'No directories identified in {section}:{subsection} for post-processing')
|
||||
|
||||
return list(set(to_return))
|
||||
|
||||
|
||||
def create_url(
|
||||
scheme: str,
|
||||
host: str,
|
||||
port: int | None = None,
|
||||
path: str = '',
|
||||
query: str = '',
|
||||
) -> str:
|
||||
def create_url(scheme: str, host: str, port: int | None = None, path: str = '', query: str = '') -> str:
|
||||
"""Create a url from its component parts."""
|
||||
netloc = host if port is None else f'{host}:{port}'
|
||||
fragments = ''
|
||||
|
@ -7,7 +7,6 @@ from nzb2media import main_db
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
database = main_db.DBConnection()
|
||||
|
||||
|
||||
|
@ -38,9 +38,7 @@ def char_replace(name_in):
|
||||
break
|
||||
else:
|
||||
# Detect UTF-8
|
||||
if ((character == 0xC2) | (character == 0xC3)) & (
|
||||
(next_character >= 0xA0) & (next_character <= 0xFF)
|
||||
):
|
||||
if ((character == 0xC2) | (character == 0xC3)) & ((next_character >= 0xA0) & (next_character <= 0xFF)):
|
||||
encoding = 'utf-8'
|
||||
break
|
||||
# Detect CP850
|
||||
@ -60,19 +58,13 @@ def char_replace(name_in):
|
||||
|
||||
|
||||
def convert_to_ascii(input_name, dir_name):
|
||||
|
||||
ascii_convert = int(nzb2media.CFG['ASCII']['convert'])
|
||||
if (
|
||||
ascii_convert == 0 or os.name == 'nt'
|
||||
): # just return if we don't want to convert or on windows os and '\' is replaced!.
|
||||
if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and '\' is replaced!.
|
||||
return input_name, dir_name
|
||||
|
||||
encoded, input_name = char_replace(input_name)
|
||||
|
||||
directory, base = os.path.split(dir_name)
|
||||
if not base: # ended with '/'
|
||||
directory, base = os.path.split(directory)
|
||||
|
||||
encoded, base2 = char_replace(base)
|
||||
if encoded:
|
||||
dir_name = os.path.join(directory, base2)
|
||||
@ -80,25 +72,16 @@ def convert_to_ascii(input_name, dir_name):
|
||||
os.rename(os.path.join(directory, base), dir_name)
|
||||
if 'NZBOP_SCRIPTDIR' in os.environ:
|
||||
print(f'[NZB] DIRECTORY={dir_name}')
|
||||
|
||||
for dirname, dirnames, _ in os.walk(dir_name, topdown=False):
|
||||
for subdirname in dirnames:
|
||||
encoded, subdirname2 = char_replace(subdirname)
|
||||
if encoded:
|
||||
log.info(f'Renaming directory to: {subdirname2}.')
|
||||
os.rename(
|
||||
os.path.join(dirname, subdirname),
|
||||
os.path.join(dirname, subdirname2),
|
||||
)
|
||||
|
||||
os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2))
|
||||
for dirname, _, filenames in os.walk(dir_name):
|
||||
for filename in filenames:
|
||||
encoded, filename2 = char_replace(filename)
|
||||
if encoded:
|
||||
log.info(f'Renaming file to: {filename2}.')
|
||||
os.rename(
|
||||
os.path.join(dirname, filename),
|
||||
os.path.join(dirname, filename2),
|
||||
)
|
||||
|
||||
os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2))
|
||||
return input_name, dir_name
|
||||
|
@ -29,65 +29,46 @@ def move_file(filename, path, link):
|
||||
try:
|
||||
if file_ext in nzb2media.AUDIO_CONTAINER:
|
||||
guess = mediafile.MediaFile(filename)
|
||||
|
||||
# get artist and album info
|
||||
artist = guess.artist
|
||||
album = guess.album
|
||||
|
||||
# create new path
|
||||
new_path = os.path.join(
|
||||
path, f'{sanitize_name(artist)} - {sanitize_name(album)}',
|
||||
)
|
||||
new_path = os.path.join(path, f'{sanitize_name(artist)} - {sanitize_name(album)}')
|
||||
elif file_ext in nzb2media.MEDIA_CONTAINER:
|
||||
guess = guessit.guessit(filename)
|
||||
|
||||
# get title
|
||||
title = guess.get('series') or guess.get('title')
|
||||
|
||||
if not title:
|
||||
title = os.path.splitext(os.path.basename(filename))[0]
|
||||
|
||||
new_path = os.path.join(path, sanitize_name(title))
|
||||
except Exception as error:
|
||||
log.error(f'Exception parsing name for media file: {os.path.split(filename)[1]}: {error}')
|
||||
|
||||
if not new_path:
|
||||
title = os.path.splitext(os.path.basename(filename))[0]
|
||||
new_path = os.path.join(path, sanitize_name(title))
|
||||
|
||||
# # Removed as encoding of directory no-longer required
|
||||
# try:
|
||||
# new_path = new_path.encode(nzb2media.SYS_ENCODING)
|
||||
# except Exception:
|
||||
# pass
|
||||
|
||||
# Just fail-safe incase we already have afile with this clean-name (was actually a bug from earlier code, but let's be safe).
|
||||
if os.path.isfile(new_path):
|
||||
new_path2 = os.path.join(
|
||||
os.path.join(os.path.split(new_path)[0], 'new'),
|
||||
os.path.split(new_path)[1],
|
||||
)
|
||||
new_path2 = os.path.join(os.path.join(os.path.split(new_path)[0], 'new'), os.path.split(new_path)[1])
|
||||
new_path = new_path2
|
||||
|
||||
# create new path if it does not exist
|
||||
if not os.path.exists(new_path):
|
||||
make_dir(new_path)
|
||||
|
||||
newfile = os.path.join(
|
||||
new_path, sanitize_name(os.path.split(filename)[1]),
|
||||
)
|
||||
newfile = os.path.join(new_path, sanitize_name(os.path.split(filename)[1]))
|
||||
try:
|
||||
newfile = newfile.encode(nzb2media.SYS_ENCODING)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# link file to its new path
|
||||
copy_link(filename, newfile, link)
|
||||
|
||||
|
||||
def is_min_size(input_name, min_size):
|
||||
file_name, file_ext = os.path.splitext(os.path.basename(input_name))
|
||||
|
||||
# audio files we need to check directory size not file size
|
||||
input_size = os.path.getsize(input_name)
|
||||
if file_ext in nzb2media.AUDIO_CONTAINER:
|
||||
@ -96,7 +77,6 @@ def is_min_size(input_name, min_size):
|
||||
except Exception:
|
||||
log.error(f'Failed to get file size for {input_name}')
|
||||
return True
|
||||
|
||||
# Ignore files under a certain size
|
||||
if input_size > min_size * 1048576:
|
||||
return True
|
||||
@ -110,59 +90,27 @@ def is_archive_file(filename):
|
||||
return False
|
||||
|
||||
|
||||
def is_media_file(
|
||||
mediafile,
|
||||
media=True,
|
||||
audio=True,
|
||||
meta=True,
|
||||
archives=True,
|
||||
other=False,
|
||||
otherext=None,
|
||||
):
|
||||
def is_media_file(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=None):
|
||||
if otherext is None:
|
||||
otherext = []
|
||||
|
||||
file_name, file_ext = os.path.splitext(mediafile)
|
||||
|
||||
try:
|
||||
# ignore MAC OS's 'resource fork' files
|
||||
if file_name.startswith('._'):
|
||||
return False
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return any(
|
||||
[
|
||||
(media and file_ext.lower() in nzb2media.MEDIA_CONTAINER),
|
||||
(audio and file_ext.lower() in nzb2media.AUDIO_CONTAINER),
|
||||
(meta and file_ext.lower() in nzb2media.META_CONTAINER),
|
||||
(archives and is_archive_file(mediafile)),
|
||||
(other and (file_ext.lower() in otherext or 'all' in otherext)),
|
||||
],
|
||||
)
|
||||
return any([(media and file_ext.lower() in nzb2media.MEDIA_CONTAINER), (audio and file_ext.lower() in nzb2media.AUDIO_CONTAINER), (meta and file_ext.lower() in nzb2media.META_CONTAINER), (archives and is_archive_file(mediafile)), (other and (file_ext.lower() in otherext or 'all' in otherext))])
|
||||
|
||||
|
||||
def list_media_files(
|
||||
path,
|
||||
min_size=0,
|
||||
delete_ignored=0,
|
||||
media=True,
|
||||
audio=True,
|
||||
meta=True,
|
||||
archives=True,
|
||||
other=False,
|
||||
otherext=None,
|
||||
):
|
||||
def list_media_files(path, min_size=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=None):
|
||||
if otherext is None:
|
||||
otherext = []
|
||||
|
||||
files = []
|
||||
if not os.path.isdir(path):
|
||||
if os.path.isfile(path): # Single file downloads.
|
||||
cur_file = os.path.split(path)[1]
|
||||
if is_media_file(
|
||||
cur_file, media, audio, meta, archives, other, otherext,
|
||||
):
|
||||
if is_media_file(cur_file, media, audio, meta, archives, other, otherext):
|
||||
# Optionally ignore sample files
|
||||
if is_sample(path) or not is_min_size(path, min_size):
|
||||
if delete_ignored == 1:
|
||||
@ -173,33 +121,15 @@ def list_media_files(
|
||||
pass
|
||||
else:
|
||||
files.append(path)
|
||||
|
||||
return files
|
||||
|
||||
for cur_file in os.listdir(path):
|
||||
full_cur_file = os.path.join(path, cur_file)
|
||||
|
||||
# if it's a folder do it recursively
|
||||
if os.path.isdir(full_cur_file) and not cur_file.startswith('.'):
|
||||
files += list_media_files(
|
||||
full_cur_file,
|
||||
min_size,
|
||||
delete_ignored,
|
||||
media,
|
||||
audio,
|
||||
meta,
|
||||
archives,
|
||||
other,
|
||||
otherext,
|
||||
)
|
||||
|
||||
elif is_media_file(
|
||||
cur_file, media, audio, meta, archives, other, otherext,
|
||||
):
|
||||
files += list_media_files(full_cur_file, min_size, delete_ignored, media, audio, meta, archives, other, otherext)
|
||||
elif is_media_file(cur_file, media, audio, meta, archives, other, otherext):
|
||||
# Optionally ignore sample files
|
||||
if is_sample(full_cur_file) or not is_min_size(
|
||||
full_cur_file, min_size,
|
||||
):
|
||||
if is_sample(full_cur_file) or not is_min_size(full_cur_file, min_size):
|
||||
if delete_ignored == 1:
|
||||
try:
|
||||
os.unlink(full_cur_file)
|
||||
@ -207,38 +137,28 @@ def list_media_files(
|
||||
except Exception:
|
||||
pass
|
||||
continue
|
||||
|
||||
files.append(full_cur_file)
|
||||
|
||||
return sorted(files, key=len)
|
||||
|
||||
|
||||
def extract_files(src, dst=None, keep_archive=None):
|
||||
extracted_folder = []
|
||||
extracted_archive = []
|
||||
|
||||
for input_file in list_media_files(
|
||||
src, media=False, audio=False, meta=False, archives=True,
|
||||
):
|
||||
for input_file in list_media_files(src, media=False, audio=False, meta=False, archives=True):
|
||||
dir_path = os.path.dirname(input_file)
|
||||
full_file_name = os.path.basename(input_file)
|
||||
archive_name = os.path.splitext(full_file_name)[0]
|
||||
archive_name = re.sub(r'part[0-9]+', '', archive_name)
|
||||
|
||||
if dir_path in extracted_folder and archive_name in extracted_archive:
|
||||
continue # no need to extract this, but keep going to look for other archives and sub directories.
|
||||
|
||||
try:
|
||||
if extractor.extract(input_file, dst or dir_path):
|
||||
extracted_folder.append(dir_path)
|
||||
extracted_archive.append(archive_name)
|
||||
except Exception:
|
||||
log.error(f'Extraction failed for: {full_file_name}')
|
||||
|
||||
for folder in extracted_folder:
|
||||
for input_file in list_media_files(
|
||||
folder, media=False, audio=False, meta=False, archives=True,
|
||||
):
|
||||
for input_file in list_media_files(folder, media=False, audio=False, meta=False, archives=True):
|
||||
full_file_name = os.path.basename(input_file)
|
||||
archive_name = os.path.splitext(full_file_name)[0]
|
||||
archive_name = re.sub(r'part[0-9]+', '', archive_name)
|
||||
|
@ -16,7 +16,6 @@ log.addHandler(logging.NullHandler())
|
||||
def find_imdbid(dir_name, input_name, omdb_api_key):
|
||||
imdbid = None
|
||||
log.info(f'Attemping imdbID lookup for {input_name}')
|
||||
|
||||
# find imdbid in dirName
|
||||
log.info('Searching folder and file names for imdbID ...')
|
||||
match = re.search(r'\b(tt\d{7,8})\b', dir_name + input_name)
|
||||
@ -34,9 +33,7 @@ def find_imdbid(dir_name, input_name, omdb_api_key):
|
||||
if 'NZBPR__DNZB_MOREINFO' in os.environ:
|
||||
dnzb_more_info = os.environ.get('NZBPR__DNZB_MOREINFO', '')
|
||||
if dnzb_more_info != '':
|
||||
regex = re.compile(
|
||||
r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE,
|
||||
)
|
||||
regex = re.compile(r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE)
|
||||
match = regex.match(dnzb_more_info)
|
||||
if match:
|
||||
imdbid = match.group(1)
|
||||
@ -52,81 +49,57 @@ def find_imdbid(dir_name, input_name, omdb_api_key):
|
||||
title = None
|
||||
if 'title' in guess:
|
||||
title = guess['title']
|
||||
|
||||
# Movie Year
|
||||
year = None
|
||||
if 'year' in guess:
|
||||
year = guess['year']
|
||||
|
||||
url = 'http://www.omdbapi.com'
|
||||
|
||||
if not omdb_api_key:
|
||||
log.info('Unable to determine imdbID: No api key provided for omdbapi.com.')
|
||||
return
|
||||
|
||||
log.debug(f'Opening URL: {url}')
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
url,
|
||||
params={'apikey': omdb_api_key, 'y': year, 't': title},
|
||||
verify=False,
|
||||
timeout=(60, 300),
|
||||
)
|
||||
response = requests.get(url, params={'apikey': omdb_api_key, 'y': year, 't': title}, verify=False, timeout=(60, 300))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL {url}')
|
||||
return
|
||||
|
||||
try:
|
||||
results = response.json()
|
||||
except Exception:
|
||||
log.error('No json data returned from omdbapi.com')
|
||||
|
||||
try:
|
||||
imdbid = results['imdbID']
|
||||
except Exception:
|
||||
log.error('No imdbID returned from omdbapi.com')
|
||||
|
||||
if imdbid:
|
||||
log.info(f'Found imdbID [{imdbid}]')
|
||||
return imdbid
|
||||
|
||||
log.warning(f'Unable to find a imdbID for {input_name}')
|
||||
return imdbid
|
||||
|
||||
|
||||
def category_search(
|
||||
input_directory, input_name, input_category, root, categories,
|
||||
):
|
||||
def category_search(input_directory, input_name, input_category, root, categories):
|
||||
tordir = False
|
||||
|
||||
if input_directory is None: # =Nothing to process here.
|
||||
return input_directory, input_name, input_category, root
|
||||
|
||||
pathlist = os.path.normpath(input_directory).split(os.sep)
|
||||
|
||||
if input_category and input_category in pathlist:
|
||||
log.debug(f'SEARCH: Found the Category: {input_category} in directory structure')
|
||||
elif input_category:
|
||||
log.debug(f'SEARCH: Could not find the category: {input_category} in the directory structure')
|
||||
else:
|
||||
try:
|
||||
input_category = list(set(pathlist) & set(categories))[
|
||||
-1
|
||||
] # assume last match is most relevant category.
|
||||
input_category = list(set(pathlist) & set(categories))[-1] # assume last match is most relevant category.
|
||||
log.debug(f'SEARCH: Found Category: {input_category} in directory structure')
|
||||
except IndexError:
|
||||
input_category = ''
|
||||
log.debug('SEARCH: Could not find a category in the directory structure')
|
||||
if not os.path.isdir(input_directory) and os.path.isfile(
|
||||
input_directory,
|
||||
): # If the input directory is a file
|
||||
if not os.path.isdir(input_directory) and os.path.isfile(input_directory):
|
||||
# If the input directory is a file
|
||||
if not input_name:
|
||||
input_name = os.path.split(os.path.normpath(input_directory))[1]
|
||||
return input_directory, input_name, input_category, root
|
||||
if input_category and os.path.isdir(
|
||||
os.path.join(input_directory, input_category),
|
||||
):
|
||||
if input_category and os.path.isdir(os.path.join(input_directory, input_category)):
|
||||
log.info(f'SEARCH: Found category directory {input_category} in input directory directory {input_directory}')
|
||||
input_directory = os.path.join(input_directory, input_category)
|
||||
log.info(f'SEARCH: Setting input_directory to {input_directory}')
|
||||
@ -135,53 +108,36 @@ def category_search(
|
||||
input_directory = os.path.join(input_directory, input_name)
|
||||
log.info(f'SEARCH: Setting input_directory to {input_directory}')
|
||||
tordir = True
|
||||
elif input_name and os.path.isdir(
|
||||
os.path.join(input_directory, sanitize_name(input_name)),
|
||||
):
|
||||
elif input_name and os.path.isdir(os.path.join(input_directory, sanitize_name(input_name))):
|
||||
log.info(f'SEARCH: Found torrent directory {sanitize_name(input_name)} in input directory directory {input_directory}')
|
||||
input_directory = os.path.join(
|
||||
input_directory, sanitize_name(input_name),
|
||||
)
|
||||
input_directory = os.path.join(input_directory, sanitize_name(input_name))
|
||||
log.info(f'SEARCH: Setting input_directory to {input_directory}')
|
||||
tordir = True
|
||||
elif input_name and os.path.isfile(
|
||||
os.path.join(input_directory, input_name),
|
||||
):
|
||||
elif input_name and os.path.isfile(os.path.join(input_directory, input_name)):
|
||||
log.info(f'SEARCH: Found torrent file {input_name} in input directory directory {input_directory}')
|
||||
input_directory = os.path.join(input_directory, input_name)
|
||||
log.info(f'SEARCH: Setting input_directory to {input_directory}')
|
||||
tordir = True
|
||||
elif input_name and os.path.isfile(
|
||||
os.path.join(input_directory, sanitize_name(input_name)),
|
||||
):
|
||||
elif input_name and os.path.isfile(os.path.join(input_directory, sanitize_name(input_name))):
|
||||
log.info(f'SEARCH: Found torrent file {sanitize_name(input_name)} in input directory directory {input_directory}')
|
||||
input_directory = os.path.join(
|
||||
input_directory, sanitize_name(input_name),
|
||||
)
|
||||
input_directory = os.path.join(input_directory, sanitize_name(input_name))
|
||||
log.info(f'SEARCH: Setting input_directory to {input_directory}')
|
||||
tordir = True
|
||||
elif input_name and os.path.isdir(input_directory):
|
||||
for file in os.listdir(input_directory):
|
||||
if os.path.splitext(file)[0] in [
|
||||
input_name,
|
||||
sanitize_name(input_name),
|
||||
]:
|
||||
if os.path.splitext(file)[0] in [input_name, sanitize_name(input_name)]:
|
||||
log.info(f'SEARCH: Found torrent file {file} in input directory directory {input_directory}')
|
||||
input_directory = os.path.join(input_directory, file)
|
||||
log.info(f'SEARCH: Setting input_directory to {input_directory}')
|
||||
input_name = file
|
||||
tordir = True
|
||||
break
|
||||
|
||||
imdbid = [
|
||||
item for item in pathlist if '.cp(tt' in item
|
||||
] # This looks for the .cp(tt imdb id in the path.
|
||||
# This looks for the .cp(tt imdb id in the path.
|
||||
imdbid = [item for item in pathlist if '.cp(tt' in item]
|
||||
if imdbid and '.cp(tt' not in input_name:
|
||||
input_name = imdbid[
|
||||
0
|
||||
] # This ensures the imdb id is preserved and passed to CP
|
||||
input_name = imdbid[0]
|
||||
# This ensures the imdb id is preserved and passed to CP
|
||||
tordir = True
|
||||
|
||||
if input_category and not tordir:
|
||||
try:
|
||||
index = pathlist.index(input_category)
|
||||
@ -192,7 +148,6 @@ def category_search(
|
||||
input_name = pathlist[index + 1]
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if input_name and not tordir:
|
||||
if input_name in pathlist or sanitize_name(input_name) in pathlist:
|
||||
log.info(f'SEARCH: Found torrent directory {input_name} in the directory structure')
|
||||
@ -201,9 +156,7 @@ def category_search(
|
||||
root = 1
|
||||
if not tordir:
|
||||
root = 2
|
||||
|
||||
if root > 0:
|
||||
log.info('SEARCH: Could not find a unique directory for this download. Assume a common directory.')
|
||||
log.info('SEARCH: We will try and determine which files to process, individually')
|
||||
|
||||
return input_directory, input_name, input_category, root
|
||||
|
@ -10,7 +10,6 @@ from nzb2media.utils.paths import make_dir
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
try:
|
||||
from jaraco.windows.filesystem import islink, readlink
|
||||
except ImportError:
|
||||
@ -25,7 +24,6 @@ def copy_link(src, target_link, use_link):
|
||||
log.info(f'MEDIAFILE: [{os.path.basename(target_link)}]')
|
||||
log.info(f'SOURCE FOLDER: [{os.path.dirname(src)}]')
|
||||
log.info(f'TARGET FOLDER: [{os.path.dirname(target_link)}]')
|
||||
|
||||
if src != target_link and os.path.exists(target_link):
|
||||
log.info('MEDIAFILE already exists in the TARGET folder, skipping ...')
|
||||
return True
|
||||
@ -35,7 +33,6 @@ def copy_link(src, target_link, use_link):
|
||||
if src == os.path.dirname(target_link):
|
||||
log.info('SOURCE AND TARGET folders are the same, skipping ...')
|
||||
return True
|
||||
|
||||
make_dir(os.path.dirname(target_link))
|
||||
try:
|
||||
if use_link == 'dir':
|
||||
@ -65,28 +62,22 @@ def copy_link(src, target_link, use_link):
|
||||
return True
|
||||
except Exception as error:
|
||||
log.warning(f'Error: {error}, copying instead ... ')
|
||||
|
||||
log.info('Copying SOURCE MEDIAFILE -> TARGET FOLDER')
|
||||
shutil.copy(src, target_link)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def replace_links(link, max_depth=10):
|
||||
link_depth = 0
|
||||
target = link
|
||||
|
||||
for attempt in range(0, max_depth):
|
||||
if not islink(target):
|
||||
break
|
||||
target = readlink(target)
|
||||
link_depth = attempt
|
||||
|
||||
if not link_depth:
|
||||
log.debug(f'{link} is not a link')
|
||||
elif link_depth > max_depth or (
|
||||
link_depth == max_depth and islink(target)
|
||||
):
|
||||
elif link_depth > max_depth or (link_depth == max_depth and islink(target)):
|
||||
log.warning(f'Exceeded maximum depth {max_depth} while following link {link}')
|
||||
else:
|
||||
log.info(f'Changing sym-link: {link} to point directly to file: {target}')
|
||||
|
@ -6,7 +6,6 @@ import re
|
||||
def sanitize_name(name):
|
||||
"""
|
||||
Remove bad chars from the filename.
|
||||
|
||||
>>> sanitize_name('a/b/c')
|
||||
'a-b-c'
|
||||
>>> sanitize_name('abc')
|
||||
@ -18,24 +17,20 @@ def sanitize_name(name):
|
||||
"""
|
||||
name = re.sub(r'[\\/*]', '-', name)
|
||||
name = re.sub(r'[:\'<>|?]', '', name)
|
||||
|
||||
# remove leading/trailing periods and spaces
|
||||
name = name.strip(' .')
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def clean_file_name(filename):
|
||||
"""
|
||||
Clean up nzb name by removing any . and _ characters and trailing hyphens.
|
||||
|
||||
Is basically equivalent to replacing all _ and . with a
|
||||
space, but handles decimal numbers in string, for example:
|
||||
"""
|
||||
filename = re.sub(r'(\D)\.(?!\s)(\D)', r'\1 \2', filename)
|
||||
filename = re.sub(
|
||||
r'(\d)\.(\d{4})', r'\1 \2', filename,
|
||||
) # if it ends in a year then don't keep the dot
|
||||
# if it ends in a year then don't keep the dot
|
||||
filename = re.sub(r'(\d)\.(\d{4})', r'\1 \2', filename)
|
||||
filename = re.sub(r'(\D)\.(?!\s)', r'\1 ', filename)
|
||||
filename = re.sub(r'\.(?!\s)(\D)', r' \1', filename)
|
||||
filename = filename.replace('_', ' ')
|
||||
|
@ -26,12 +26,10 @@ def wake_on_lan(ethernet_address):
|
||||
"""Send a WakeOnLan request."""
|
||||
# Create the WoL magic packet
|
||||
magic_packet = make_wake_on_lan_packet(ethernet_address)
|
||||
|
||||
# ...and send it to the broadcast address using UDP
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as connection:
|
||||
connection.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
||||
connection.sendto(magic_packet, ('<broadcast>', 9))
|
||||
|
||||
log.info(f'WakeOnLan sent for mac: {ethernet_address}')
|
||||
|
||||
|
||||
@ -52,9 +50,7 @@ def wake_up():
|
||||
port = int(wol['port'])
|
||||
mac = wol['mac']
|
||||
max_attempts = 4
|
||||
|
||||
log.info('Trying to wake On lan.')
|
||||
|
||||
for attempt in range(0, max_attempts):
|
||||
log.info(f'Attempt {attempt + 1} of {max_attempts}')
|
||||
if test_connection(host, port) == 'Up':
|
||||
@ -66,7 +62,6 @@ def wake_up():
|
||||
if test_connection(host, port) == 'Down': # final check.
|
||||
msg = 'System with mac: {0} has not woken after {1} attempts.'
|
||||
log.warning(msg.format(mac, max_attempts))
|
||||
|
||||
log.info('Continuing with the rest of the script.')
|
||||
|
||||
|
||||
@ -108,20 +103,12 @@ def find_download(client_agent, download_id):
|
||||
else:
|
||||
base_url = f'http://{nzb2media.SABNZBD_HOST}:{nzb2media.SABNZBD_PORT}/api'
|
||||
url = base_url
|
||||
params = {
|
||||
'apikey': nzb2media.SABNZBD_APIKEY,
|
||||
'mode': 'get_files',
|
||||
'output': 'json',
|
||||
'value': download_id,
|
||||
}
|
||||
params = {'apikey': nzb2media.SABNZBD_APIKEY, 'mode': 'get_files', 'output': 'json', 'value': download_id}
|
||||
try:
|
||||
response = requests.get(
|
||||
url, params=params, verify=False, timeout=(30, 120),
|
||||
)
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return False # failure
|
||||
|
||||
result = response.json()
|
||||
if result['files']:
|
||||
return True
|
||||
|
@ -20,11 +20,7 @@ def get_nzoid(input_name):
|
||||
else:
|
||||
base_url = f'http://{nzb2media.SABNZBD_HOST}:{nzb2media.SABNZBD_PORT}/api'
|
||||
url = base_url
|
||||
params = {
|
||||
'apikey': nzb2media.SABNZBD_APIKEY,
|
||||
'mode': 'queue',
|
||||
'output': 'json',
|
||||
}
|
||||
params = {'apikey': nzb2media.SABNZBD_APIKEY, 'mode': 'queue', 'output': 'json'}
|
||||
try:
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
except requests.ConnectionError:
|
||||
@ -33,12 +29,7 @@ def get_nzoid(input_name):
|
||||
try:
|
||||
result = response.json()
|
||||
clean_name = os.path.splitext(os.path.split(input_name)[1])[0]
|
||||
slots.extend(
|
||||
[
|
||||
(slot['nzo_id'], slot['filename'])
|
||||
for slot in result['queue']['slots']
|
||||
],
|
||||
)
|
||||
slots.extend([(slot['nzo_id'], slot['filename']) for slot in result['queue']['slots']])
|
||||
except Exception:
|
||||
log.warning('Data from SABnzbd queue could not be parsed')
|
||||
params['mode'] = 'history'
|
||||
@ -50,12 +41,7 @@ def get_nzoid(input_name):
|
||||
try:
|
||||
result = response.json()
|
||||
clean_name = os.path.splitext(os.path.split(input_name)[1])[0]
|
||||
slots.extend(
|
||||
[
|
||||
(slot['nzo_id'], slot['name'])
|
||||
for slot in result['history']['slots']
|
||||
],
|
||||
)
|
||||
slots.extend([(slot['nzo_id'], slot['name']) for slot in result['history']['slots']])
|
||||
except Exception:
|
||||
log.warning('Data from SABnzbd history could not be parsed')
|
||||
try:
|
||||
|
@ -14,8 +14,7 @@ def parse_other(args):
|
||||
|
||||
|
||||
def parse_rtorrent(args):
|
||||
# rtorrent usage: system.method.set_key = event.download.finished,TorrentToMedia,
|
||||
# 'execute={/path/to/nzbToMedia/TorrentToMedia.py,\'$d.get_base_path=\',\'$d.get_name=\',\'$d.get_custom1=\',\'$d.get_hash=\'}'
|
||||
# rtorrent usage: system.method.set_key = event.download.finished,TorrentToMedia, # 'execute={/path/to/nzbToMedia/TorrentToMedia.py,\'$d.get_base_path=\',\'$d.get_name=\',\'$d.get_custom1=\',\'$d.get_hash=\'}'
|
||||
input_directory = os.path.normpath(args[1])
|
||||
try:
|
||||
input_name = args[2]
|
||||
@ -33,7 +32,6 @@ def parse_rtorrent(args):
|
||||
input_id = args[4]
|
||||
except Exception:
|
||||
input_id = ''
|
||||
|
||||
return input_directory, input_name, input_category, input_hash, input_id
|
||||
|
||||
|
||||
@ -53,7 +51,6 @@ def parse_utorrent(args):
|
||||
input_id = args[4]
|
||||
except Exception:
|
||||
input_id = ''
|
||||
|
||||
return input_directory, input_name, input_category, input_hash, input_id
|
||||
|
||||
|
||||
@ -64,11 +61,7 @@ def parse_deluge(args):
|
||||
input_hash = args[1]
|
||||
input_id = args[1]
|
||||
try:
|
||||
input_category = (
|
||||
nzb2media.TORRENT_CLASS.core.get_torrent_status(input_id, ['label'])
|
||||
.get(b'label')
|
||||
.decode()
|
||||
)
|
||||
input_category = nzb2media.TORRENT_CLASS.core.get_torrent_status(input_id, ['label']).get(b'label').decode()
|
||||
except Exception:
|
||||
input_category = ''
|
||||
return input_directory, input_name, input_category, input_hash, input_id
|
||||
@ -92,13 +85,7 @@ def parse_synods(args):
|
||||
input_name = os.getenv('TR_TORRENT_NAME')
|
||||
input_hash = os.getenv('TR_TORRENT_HASH')
|
||||
if not input_name: # No info passed. Assume manual download.
|
||||
return (
|
||||
input_directory,
|
||||
input_name,
|
||||
input_category,
|
||||
input_hash,
|
||||
input_id,
|
||||
)
|
||||
return input_directory, input_name, input_category, input_hash, input_id
|
||||
torrent_id = os.getenv('TR_TORRENT_ID')
|
||||
input_id = f'dbid_{torrent_id}'
|
||||
# res = nzb2media.TORRENT_CLASS.tasks_list(additional_param='detail')
|
||||
@ -152,7 +139,6 @@ def parse_vuze(args):
|
||||
input_name = cur_input[5]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return input_directory, input_name, input_category, input_hash, input_id
|
||||
|
||||
|
||||
@ -186,22 +172,11 @@ def parse_qbittorrent(args):
|
||||
input_id = cur_input[3].replace('\'', '')
|
||||
except Exception:
|
||||
input_id = ''
|
||||
|
||||
return input_directory, input_name, input_category, input_hash, input_id
|
||||
|
||||
|
||||
def parse_args(client_agent, args):
|
||||
clients = {
|
||||
'other': parse_other,
|
||||
'rtorrent': parse_rtorrent,
|
||||
'utorrent': parse_utorrent,
|
||||
'deluge': parse_deluge,
|
||||
'transmission': parse_transmission,
|
||||
'qbittorrent': parse_qbittorrent,
|
||||
'vuze': parse_vuze,
|
||||
'synods': parse_synods,
|
||||
}
|
||||
|
||||
clients = {'other': parse_other, 'rtorrent': parse_rtorrent, 'utorrent': parse_utorrent, 'deluge': parse_deluge, 'transmission': parse_transmission, 'qbittorrent': parse_qbittorrent, 'vuze': parse_vuze, 'synods': parse_synods}
|
||||
try:
|
||||
return clients[client_agent](args)
|
||||
except Exception:
|
||||
|
@ -16,12 +16,9 @@ log.addHandler(logging.NullHandler())
|
||||
def onerror(func, path, exc_info):
|
||||
"""
|
||||
Error handler for ``shutil.rmtree``.
|
||||
|
||||
If the error is due to an access error (read only file)
|
||||
it attempts to add write permission and then retries.
|
||||
|
||||
If the error is for another reason it re-raises the error.
|
||||
|
||||
Usage : ``shutil.rmtree(path, onerror=onerror)``
|
||||
"""
|
||||
if not os.access(path, os.W_OK):
|
||||
@ -69,17 +66,13 @@ def remote_dir(path):
|
||||
|
||||
def get_dir_size(input_path):
|
||||
prepend = partial(os.path.join, input_path)
|
||||
return sum(
|
||||
(os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f))
|
||||
for f in map(prepend, os.listdir(input_path))
|
||||
)
|
||||
return sum((os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f)) for f in map(prepend, os.listdir(input_path)))
|
||||
|
||||
|
||||
def remove_empty_folders(path, remove_root=True):
|
||||
"""Remove empty folders."""
|
||||
if not os.path.isdir(path):
|
||||
return
|
||||
|
||||
# remove empty subfolders
|
||||
log.debug(f'Checking for empty folders in:{path}')
|
||||
files = os.listdir(path)
|
||||
@ -88,7 +81,6 @@ def remove_empty_folders(path, remove_root=True):
|
||||
fullpath = os.path.join(path, each_file)
|
||||
if os.path.isdir(fullpath):
|
||||
remove_empty_folders(fullpath)
|
||||
|
||||
# if folder empty, delete it
|
||||
files = os.listdir(path)
|
||||
if len(files) == 0 and remove_root:
|
||||
@ -128,16 +120,13 @@ def clean_directory(path, files):
|
||||
if not os.path.exists(path):
|
||||
log.info(f'Directory {path} has been processed and removed ...')
|
||||
return
|
||||
|
||||
if nzb2media.FORCE_CLEAN and not nzb2media.FAILED:
|
||||
log.info(f'Doing Forceful Clean of {path}')
|
||||
remove_dir(path)
|
||||
return
|
||||
|
||||
if files:
|
||||
log.info(f'Directory {path} still contains {len(files)} unprocessed file(s), skipping ...')
|
||||
return
|
||||
|
||||
log.info(f'Directory {path} has been processed, removing ...')
|
||||
try:
|
||||
shutil.rmtree(path, onerror=onerror)
|
||||
@ -150,7 +139,6 @@ def rchmod(path, mod):
|
||||
os.chmod(path, mod)
|
||||
if not os.path.isdir(path):
|
||||
return # Skip files
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
for each_dir in dirs:
|
||||
os.chmod(os.path.join(root, each_dir), mod)
|
||||
|
@ -13,7 +13,6 @@ if os.name == 'nt':
|
||||
from win32event import CreateMutex
|
||||
from win32api import CloseHandle, GetLastError
|
||||
from winerror import ERROR_ALREADY_EXISTS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
@ -76,7 +75,6 @@ class PosixProcess:
|
||||
self.lasterror = False
|
||||
else:
|
||||
self.lasterror = False
|
||||
|
||||
if not self.lasterror:
|
||||
# Write my pid into pidFile to keep multiple copies of program
|
||||
# from running
|
||||
@ -100,18 +98,14 @@ else:
|
||||
|
||||
def restart():
|
||||
install_type = nzb2media.version_check.CheckVersion().install_type
|
||||
|
||||
status = 0
|
||||
popen_list = []
|
||||
|
||||
if install_type in ('git', 'source'):
|
||||
popen_list = [sys.executable, nzb2media.APP_FILENAME]
|
||||
|
||||
if popen_list:
|
||||
popen_list += nzb2media.SYS_ARGV
|
||||
log.info(f'Restarting nzbToMedia with {popen_list}')
|
||||
with subprocess.Popen(popen_list, cwd=os.getcwd()) as proc:
|
||||
proc.wait()
|
||||
status = proc.returncode
|
||||
|
||||
os._exit(status)
|
||||
|
@ -12,20 +12,12 @@ from nzb2media.torrent import utorrent
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.addHandler(logging.NullHandler())
|
||||
|
||||
torrent_clients = {
|
||||
'deluge': deluge,
|
||||
'qbittorrent': qbittorrent,
|
||||
'transmission': transmission,
|
||||
'utorrent': utorrent,
|
||||
'synods': synology,
|
||||
}
|
||||
torrent_clients = {'deluge': deluge, 'qbittorrent': qbittorrent, 'transmission': transmission, 'utorrent': utorrent, 'synods': synology}
|
||||
|
||||
|
||||
def create_torrent_class(client_agent):
|
||||
if nzb2media.APP_NAME != 'TorrentToMedia.py':
|
||||
return # Skip loading Torrent for NZBs.
|
||||
|
||||
try:
|
||||
agent = torrent_clients[client_agent]
|
||||
except KeyError:
|
||||
|
@ -28,7 +28,6 @@ class CheckVersion:
|
||||
self.install_type = self.find_install_type()
|
||||
self.installed_version = None
|
||||
self.installed_branch = None
|
||||
|
||||
if self.install_type == 'git':
|
||||
self.updater = GitUpdateManager()
|
||||
elif self.install_type == 'source':
|
||||
@ -42,7 +41,6 @@ class CheckVersion:
|
||||
def find_install_type(self):
|
||||
"""
|
||||
Determine how this copy of SB was installed.
|
||||
|
||||
returns: type of installation. Possible values are:
|
||||
'win': any compiled windows build
|
||||
'git': running from source using git
|
||||
@ -53,27 +51,22 @@ class CheckVersion:
|
||||
install_type = 'git'
|
||||
else:
|
||||
install_type = 'source'
|
||||
|
||||
return install_type
|
||||
|
||||
def check_for_new_version(self, force=False):
|
||||
"""
|
||||
Check the internet for a newer version.
|
||||
|
||||
returns: bool, True for new version or False for no new version.
|
||||
|
||||
force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
|
||||
"""
|
||||
if not nzb2media.VERSION_NOTIFY and not force:
|
||||
log.info('Version checking is disabled, not checking for the newest version')
|
||||
return False
|
||||
|
||||
log.info(f'Checking if {self.install_type} needs an update')
|
||||
if not self.updater.need_update():
|
||||
nzb2media.NEWEST_VERSION_STRING = None
|
||||
log.info('No update needed')
|
||||
return False
|
||||
|
||||
self.updater.set_newest_text()
|
||||
return True
|
||||
|
||||
@ -100,7 +93,6 @@ class GitUpdateManager(UpdateManager):
|
||||
self.github_repo_user = self.get_github_repo_user()
|
||||
self.github_repo = self.get_github_repo()
|
||||
self.branch = self._find_git_branch()
|
||||
|
||||
self._cur_commit_hash = None
|
||||
self._newest_commit_hash = None
|
||||
self._num_commits_behind = 0
|
||||
@ -111,86 +103,56 @@ class GitUpdateManager(UpdateManager):
|
||||
|
||||
def _find_working_git(self):
|
||||
test_cmd = 'version'
|
||||
|
||||
if nzb2media.GIT_PATH:
|
||||
main_git = f'"{nzb2media.GIT_PATH}"'
|
||||
else:
|
||||
main_git = 'git'
|
||||
|
||||
log.debug(f'Checking if we can use git commands: {main_git} {test_cmd}')
|
||||
output, err, exit_status = self._run_git(main_git, test_cmd)
|
||||
|
||||
if exit_status == 0:
|
||||
log.debug(f'Using: {main_git}')
|
||||
return main_git
|
||||
log.debug(f'Not using: {main_git}')
|
||||
|
||||
# trying alternatives
|
||||
|
||||
alternative_git = []
|
||||
|
||||
# osx people who start SB from launchd have a broken path, so try a hail-mary attempt for them
|
||||
if platform.system().lower() == 'darwin':
|
||||
alternative_git.append('/usr/local/git/bin/git')
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
if main_git != main_git.lower():
|
||||
alternative_git.append(main_git.lower())
|
||||
|
||||
if alternative_git:
|
||||
log.debug('Trying known alternative git locations')
|
||||
|
||||
for cur_git in alternative_git:
|
||||
log.debug(f'Checking if we can use git commands: {cur_git} {test_cmd}')
|
||||
output, err, exit_status = self._run_git(cur_git, test_cmd)
|
||||
|
||||
if exit_status == 0:
|
||||
log.debug(f'Using: {cur_git}')
|
||||
return cur_git
|
||||
log.debug(f'Not using: {cur_git}')
|
||||
|
||||
# Still haven't found a working git
|
||||
log.debug(
|
||||
'Unable to find your git executable - '
|
||||
'Set git_path in your autoProcessMedia.cfg OR '
|
||||
'delete your .git folder and run from source to enable updates.',
|
||||
)
|
||||
|
||||
log.debug('Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.')
|
||||
return None
|
||||
|
||||
def _run_git(self, git_path, args):
|
||||
|
||||
result = None
|
||||
proc_err = None
|
||||
|
||||
if not git_path:
|
||||
log.debug('No git specified, can\'t use git commands')
|
||||
proc_status = 1
|
||||
return result, proc_err, proc_status
|
||||
|
||||
cmd = f'{git_path} {args}'
|
||||
|
||||
try:
|
||||
log.debug(f'Executing {cmd} with your shell in {nzb2media.APP_ROOT}')
|
||||
with subprocess.Popen(
|
||||
cmd,
|
||||
stdin=PIPE,
|
||||
stdout=PIPE,
|
||||
stderr=STDOUT,
|
||||
shell=True,
|
||||
cwd=nzb2media.APP_ROOT,
|
||||
) as proc:
|
||||
with subprocess.Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT, shell=True, cwd=nzb2media.APP_ROOT) as proc:
|
||||
proc_out, proc_err = proc.communicate()
|
||||
proc_status = proc.returncode
|
||||
|
||||
if nzb2media.LOG_GIT:
|
||||
msg = proc_out.decode('utf-8').strip()
|
||||
log.debug(f'git output: {msg}')
|
||||
|
||||
except OSError:
|
||||
log.error(f'Command {cmd} didn\'t work')
|
||||
proc_status = 1
|
||||
|
||||
proc_status = 128 if ('fatal:' in result) or proc_err else proc_status
|
||||
if proc_status == 0:
|
||||
log.debug(f'{cmd} : returned successful')
|
||||
@ -201,21 +163,15 @@ class GitUpdateManager(UpdateManager):
|
||||
if nzb2media.LOG_GIT:
|
||||
log.debug(f'{cmd} returned : {result}, treat as error for now')
|
||||
proc_status = 1
|
||||
|
||||
return result, proc_err, proc_status
|
||||
|
||||
def _find_installed_version(self):
|
||||
"""
|
||||
Attempt to find the currently installed version of Sick Beard.
|
||||
|
||||
Uses git show to get commit version.
|
||||
|
||||
Returns: True for success or False for failure
|
||||
"""
|
||||
output, err, exit_status = self._run_git(
|
||||
self._git_path, 'rev-parse HEAD',
|
||||
) # @UnusedVariable
|
||||
|
||||
output, err, exit_status = self._run_git(self._git_path, 'rev-parse HEAD')
|
||||
if exit_status == 0 and output:
|
||||
cur_commit_hash = output.strip()
|
||||
if not re.match('^[a-z0-9]+$', cur_commit_hash):
|
||||
@ -229,9 +185,7 @@ class GitUpdateManager(UpdateManager):
|
||||
|
||||
def _find_git_branch(self):
|
||||
nzb2media.NZBTOMEDIA_BRANCH = self.get_github_branch()
|
||||
branch_info, err, exit_status = self._run_git(
|
||||
self._git_path, 'symbolic-ref -q HEAD',
|
||||
) # @UnusedVariable
|
||||
branch_info, err, exit_status = self._run_git(self._git_path, 'symbolic-ref -q HEAD')
|
||||
if exit_status == 0 and branch_info:
|
||||
branch = branch_info.strip().replace('refs/heads/', '', 1)
|
||||
if branch:
|
||||
@ -242,7 +196,6 @@ class GitUpdateManager(UpdateManager):
|
||||
def _check_github_for_update(self):
|
||||
"""
|
||||
Check Github for a new version.
|
||||
|
||||
Uses git commands to check if there is a newer version than
|
||||
the provided commit hash. If there is a newer version it
|
||||
sets _num_commits_behind.
|
||||
@ -250,24 +203,15 @@ class GitUpdateManager(UpdateManager):
|
||||
self._newest_commit_hash = None
|
||||
self._num_commits_behind = 0
|
||||
self._num_commits_ahead = 0
|
||||
|
||||
# get all new info from github
|
||||
output, err, exit_status = self._run_git(
|
||||
self._git_path, 'fetch origin',
|
||||
)
|
||||
|
||||
output, err, exit_status = self._run_git(self._git_path, 'fetch origin')
|
||||
if not exit_status == 0:
|
||||
log.error('Unable to contact github, can\'t check for update')
|
||||
return
|
||||
|
||||
# get latest commit_hash from remote
|
||||
output, err, exit_status = self._run_git(
|
||||
self._git_path, 'rev-parse --verify --quiet \'@{upstream}\'',
|
||||
)
|
||||
|
||||
output, err, exit_status = self._run_git(self._git_path, 'rev-parse --verify --quiet \'@{upstream}\'')
|
||||
if exit_status == 0 and output:
|
||||
cur_commit_hash = output.strip()
|
||||
|
||||
if not re.match('^[a-z0-9]+$', cur_commit_hash):
|
||||
log.debug('Output doesn\'t look like a hash, not using it')
|
||||
return
|
||||
@ -275,22 +219,15 @@ class GitUpdateManager(UpdateManager):
|
||||
else:
|
||||
log.debug('git didn\'t return newest commit hash')
|
||||
return
|
||||
|
||||
# get number of commits behind and ahead (option --count not supported git < 1.7.2)
|
||||
output, err, exit_status = self._run_git(
|
||||
self._git_path, 'rev-list --left-right \'@{upstream}\'...HEAD',
|
||||
)
|
||||
|
||||
output, err, exit_status = self._run_git(self._git_path, 'rev-list --left-right \'@{upstream}\'...HEAD')
|
||||
if exit_status == 0 and output:
|
||||
|
||||
try:
|
||||
self._num_commits_behind = int(output.count('<'))
|
||||
self._num_commits_ahead = int(output.count('>'))
|
||||
|
||||
except Exception:
|
||||
log.debug('git didn\'t return numbers for behind and ahead, not using it')
|
||||
return
|
||||
|
||||
log.debug(f'cur_commit = {self._cur_commit_hash} % (newest_commit)= {self._newest_commit_hash}, num_commits_behind = {self._num_commits_behind}, num_commits_ahead = {self._num_commits_ahead}')
|
||||
|
||||
def set_newest_text(self):
|
||||
@ -305,35 +242,26 @@ class GitUpdateManager(UpdateManager):
|
||||
if not self._find_installed_version():
|
||||
log.error('Unable to determine installed version via git, please check your logs!')
|
||||
return False
|
||||
|
||||
if not self._cur_commit_hash:
|
||||
return True
|
||||
|
||||
try:
|
||||
self._check_github_for_update()
|
||||
except Exception as error:
|
||||
log.error(f'Unable to contact github, can\'t check for update: {error!r}')
|
||||
return False
|
||||
|
||||
if self._num_commits_behind > 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def update(self):
|
||||
"""
|
||||
Check git for a new version.
|
||||
|
||||
Calls git pull origin <branch> in order to update Sick Beard.
|
||||
Returns a bool depending on the call's success.
|
||||
"""
|
||||
output, err, exit_status = self._run_git(
|
||||
self._git_path, f'pull origin {self.branch}',
|
||||
) # @UnusedVariable
|
||||
|
||||
output, err, exit_status = self._run_git(self._git_path, f'pull origin {self.branch}')
|
||||
if exit_status == 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
@ -342,99 +270,71 @@ class SourceUpdateManager(UpdateManager):
|
||||
self.github_repo_user = self.get_github_repo_user()
|
||||
self.github_repo = self.get_github_repo()
|
||||
self.branch = self.get_github_branch()
|
||||
|
||||
self._cur_commit_hash = None
|
||||
self._newest_commit_hash = None
|
||||
self._num_commits_behind = 0
|
||||
|
||||
def _find_installed_version(self):
|
||||
|
||||
version_file = os.path.join(nzb2media.APP_ROOT, 'version.txt')
|
||||
|
||||
if not os.path.isfile(version_file):
|
||||
self._cur_commit_hash = None
|
||||
return
|
||||
|
||||
try:
|
||||
with open(version_file) as fin:
|
||||
self._cur_commit_hash = fin.read().strip(' \n\r')
|
||||
except OSError as error:
|
||||
log.debug(f'Unable to open \'version.txt\': {error}')
|
||||
|
||||
if not self._cur_commit_hash:
|
||||
self._cur_commit_hash = None
|
||||
else:
|
||||
nzb2media.NZBTOMEDIA_VERSION = self._cur_commit_hash
|
||||
|
||||
def need_update(self):
|
||||
|
||||
self._find_installed_version()
|
||||
|
||||
try:
|
||||
self._check_github_for_update()
|
||||
except Exception as error:
|
||||
log.error(f'Unable to contact github, can\'t check for update: {error!r}')
|
||||
return False
|
||||
|
||||
if not self._cur_commit_hash or self._num_commits_behind > 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _check_github_for_update(self):
|
||||
"""
|
||||
Check Github for a new version.
|
||||
|
||||
Uses pygithub to ask github if there is a newer version than
|
||||
the provided commit hash. If there is a newer version it sets
|
||||
Sick Beard's version text.
|
||||
|
||||
commit_hash: hash that we're checking against
|
||||
"""
|
||||
self._num_commits_behind = 0
|
||||
self._newest_commit_hash = None
|
||||
|
||||
repository = github.GitHub(
|
||||
self.github_repo_user, self.github_repo, self.branch,
|
||||
)
|
||||
|
||||
repository = github.GitHub(self.github_repo_user, self.github_repo, self.branch)
|
||||
# try to get newest commit hash and commits behind directly by
|
||||
# comparing branch and current commit
|
||||
if self._cur_commit_hash:
|
||||
branch_compared = repository.compare(
|
||||
base=self.branch, head=self._cur_commit_hash,
|
||||
)
|
||||
|
||||
branch_compared = repository.compare(base=self.branch, head=self._cur_commit_hash)
|
||||
if 'base_commit' in branch_compared:
|
||||
self._newest_commit_hash = branch_compared['base_commit'][
|
||||
'sha'
|
||||
]
|
||||
|
||||
self._newest_commit_hash = branch_compared['base_commit']['sha']
|
||||
if 'behind_by' in branch_compared:
|
||||
self._num_commits_behind = int(branch_compared['behind_by'])
|
||||
|
||||
# fall back and iterate over last 100 (items per page in gh_api) commits
|
||||
if not self._newest_commit_hash:
|
||||
|
||||
for cur_commit in repository.commits():
|
||||
if not self._newest_commit_hash:
|
||||
self._newest_commit_hash = cur_commit['sha']
|
||||
if not self._cur_commit_hash:
|
||||
break
|
||||
|
||||
if cur_commit['sha'] == self._cur_commit_hash:
|
||||
break
|
||||
|
||||
# when _cur_commit_hash doesn't match anything _num_commits_behind == 100
|
||||
self._num_commits_behind += 1
|
||||
|
||||
log.debug(f'cur_commit = {self._cur_commit_hash} % (newest_commit)= {self._newest_commit_hash}, num_commits_behind = {self._num_commits_behind}')
|
||||
|
||||
def set_newest_text(self):
|
||||
|
||||
# if we're up to date then don't set this
|
||||
nzb2media.NEWEST_VERSION_STRING = None
|
||||
|
||||
if not self._cur_commit_hash:
|
||||
log.error('Unknown current version number, don\'t know if we should update or not')
|
||||
elif self._num_commits_behind > 0:
|
||||
@ -444,67 +344,46 @@ class SourceUpdateManager(UpdateManager):
|
||||
|
||||
def update(self):
|
||||
"""Download and install latest source tarball from github."""
|
||||
tar_download_url = (
|
||||
f'https://github.com/{self.github_repo_user}/{self.github_repo}/tarball/{self.branch}'
|
||||
)
|
||||
tar_download_url = f'https://github.com/{self.github_repo_user}/{self.github_repo}/tarball/{self.branch}'
|
||||
version_path = os.path.join(nzb2media.APP_ROOT, 'version.txt')
|
||||
|
||||
try:
|
||||
# prepare the update dir
|
||||
sb_update_dir = os.path.join(nzb2media.APP_ROOT, 'sb-update')
|
||||
|
||||
if os.path.isdir(sb_update_dir):
|
||||
log.info(f'Clearing out update folder {sb_update_dir} before extracting')
|
||||
shutil.rmtree(sb_update_dir)
|
||||
|
||||
log.info(f'Creating update folder {sb_update_dir} before extracting')
|
||||
os.makedirs(sb_update_dir)
|
||||
|
||||
# retrieve file
|
||||
log.info(f'Downloading update from {tar_download_url!r}')
|
||||
tar_download_path = os.path.join(
|
||||
sb_update_dir, 'nzbtomedia-update.tar',
|
||||
)
|
||||
tar_download_path = os.path.join(sb_update_dir, 'nzbtomedia-update.tar')
|
||||
urlretrieve(tar_download_url, tar_download_path)
|
||||
|
||||
if not os.path.isfile(tar_download_path):
|
||||
log.error(f'Unable to retrieve new version from {tar_download_url}, can\'t update')
|
||||
return False
|
||||
|
||||
if not tarfile.is_tarfile(tar_download_path):
|
||||
log.error(f'Retrieved version from {tar_download_url} is corrupt, can\'t update')
|
||||
return False
|
||||
|
||||
# extract to sb-update dir
|
||||
log.info(f'Extracting file {tar_download_path}')
|
||||
with tarfile.open(tar_download_path) as tar:
|
||||
tar.extractall(sb_update_dir)
|
||||
|
||||
# delete .tar.gz
|
||||
log.info(f'Deleting file {tar_download_path}')
|
||||
os.remove(tar_download_path)
|
||||
|
||||
# find update dir name
|
||||
update_dir_contents = [
|
||||
x
|
||||
for x in os.listdir(sb_update_dir)
|
||||
if os.path.isdir(os.path.join(sb_update_dir, x))
|
||||
]
|
||||
update_dir_contents = [x for x in os.listdir(sb_update_dir) if os.path.isdir(os.path.join(sb_update_dir, x))]
|
||||
if len(update_dir_contents) != 1:
|
||||
log.error(f'Invalid update data, update failed: {update_dir_contents}')
|
||||
return False
|
||||
content_dir = os.path.join(sb_update_dir, update_dir_contents[0])
|
||||
|
||||
# walk temp folder and move files to main folder
|
||||
log.info(f'Moving files from {content_dir} to {nzb2media.APP_ROOT}')
|
||||
for dirname, _, filenames in os.walk(
|
||||
content_dir,
|
||||
): # @UnusedVariable
|
||||
dirname = dirname[len(content_dir) + 1:]
|
||||
for dirname, _, filenames in os.walk(content_dir):
|
||||
dirname = dirname[len(content_dir) + 1 :]
|
||||
for curfile in filenames:
|
||||
old_path = os.path.join(content_dir, dirname, curfile)
|
||||
new_path = os.path.join(nzb2media.APP_ROOT, dirname, curfile)
|
||||
|
||||
# Avoid DLL access problem on WIN32/64
|
||||
# These files needing to be updated manually
|
||||
# or find a way to kill the access from memory
|
||||
@ -518,11 +397,9 @@ class SourceUpdateManager(UpdateManager):
|
||||
# Trash the updated file without moving in new path
|
||||
os.remove(old_path)
|
||||
continue
|
||||
|
||||
if os.path.isfile(new_path):
|
||||
os.remove(new_path)
|
||||
os.renames(old_path, new_path)
|
||||
|
||||
# update version.txt with commit hash
|
||||
try:
|
||||
with open(version_path, 'w') as ver_file:
|
||||
@ -530,10 +407,8 @@ class SourceUpdateManager(UpdateManager):
|
||||
except OSError as error:
|
||||
log.error(f'Unable to write version file, update not complete: {error}')
|
||||
return False
|
||||
|
||||
except Exception as error:
|
||||
log.error(f'Error while trying to update: {error}')
|
||||
log.debug(f'Traceback: {traceback.format_exc()}')
|
||||
return False
|
||||
|
||||
return True
|
||||
|
Loading…
x
Reference in New Issue
Block a user