diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 2bade6621..4ef7a4798 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,6 +5,19 @@ on: types: - published workflow_dispatch: + inputs: + pr_number: + description: 'PR number (for PR builds)' + required: false + type: string + pr_ref: + description: 'PR branch ref (for PR builds)' + required: false + type: string + pr_sha: + description: 'PR SHA (for PR builds)' + required: false + type: string env: REGISTRY: ghcr.io @@ -20,6 +33,42 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + with: + ref: ${{ inputs.pr_sha || github.sha }} + fetch-depth: 0 + + - name: Fetch PR if building for PR + if: inputs.pr_number + run: | + echo "Building for PR #${{ inputs.pr_number }}" + echo "PR SHA: ${{ inputs.pr_sha }}" + echo "PR Ref: ${{ inputs.pr_ref }}" + + # Try to fetch the PR head directly (works for both forks and same-repo PRs) + if git fetch origin pull/${{ inputs.pr_number }}/head:pr-${{ inputs.pr_number }}; then + echo "✅ Successfully fetched PR via pull/${{ inputs.pr_number }}/head" + git checkout pr-${{ inputs.pr_number }} + else + echo "⚠️ Failed to fetch via pull/ ref, trying to checkout SHA directly" + # Fallback: try to checkout the SHA if it exists + if git checkout ${{ inputs.pr_sha }}; then + echo "✅ Successfully checked out SHA ${{ inputs.pr_sha }}" + else + echo "❌ Failed to checkout PR. Using current ref." + exit 1 + fi + fi + + # Verify we're on the right commit + CURRENT_SHA=$(git rev-parse HEAD) + echo "Current SHA: $CURRENT_SHA" + echo "Expected SHA: ${{ inputs.pr_sha }}" + + if [ "$CURRENT_SHA" = "${{ inputs.pr_sha }}" ]; then + echo "✅ Successfully checked out correct commit" + else + echo "⚠️ Warning: Current SHA doesn't match expected SHA" + fi - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -44,6 +93,9 @@ jobs: if [ "${{ github.event_name }}" == "release" ]; then RELEASE_VERSION="${{ github.event.release.tag_name }}" echo "VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV + elif [ "${{ github.event_name }}" == "workflow_dispatch" ] && [ -n "${{ inputs.pr_number }}" ]; then + PR_NUMBER="${{ inputs.pr_number }}" + echo "VERSION=pr-${PR_NUMBER}" >> $GITHUB_ENV elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then BRANCH_NAME="${{ github.ref_name }}" echo "VERSION=${BRANCH_NAME}" >> $GITHUB_ENV @@ -57,17 +109,15 @@ jobs: tags: | type=ref,event=branch type=ref,event=tag - type=raw,value=latest,enable={{is_default_branch}} + type=raw,value=latest,enable=${{ github.event_name == 'release' }} - name: Build and push Docker image uses: docker/build-push-action@v5 with: context: . - platforms: linux/amd64,linux/arm64 + platforms: linux/amd64 push: true - tags: | - ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.VERSION }} - ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:latest + tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max @@ -76,10 +126,14 @@ jobs: run: | echo "✅ Docker images built and pushed successfully!" echo "🐋 Images:" - echo " - ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.VERSION }}" - echo " - ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:latest" + echo "${{ steps.meta.outputs.tags }}" | sed 's/^/ - /' + echo "📝 Event: ${{ github.event_name }}" if [ "${{ github.event_name }}" == "release" ]; then echo "📝 Triggered by release: ${{ github.event.release.tag_name }}" + elif [ -n "${{ inputs.pr_number }}" ]; then + echo "📝 Triggered by comment on PR #${{ inputs.pr_number }}" + echo "📝 PR branch: ${{ inputs.pr_ref }}" + echo "📝 PR SHA: ${{ inputs.pr_sha }}" else echo "📝 Triggered by manual workflow dispatch on branch: ${{ github.ref_name }}" fi diff --git a/.github/workflows/pr-build-comment.yml b/.github/workflows/pr-build-comment.yml new file mode 100644 index 000000000..1d429e1f6 --- /dev/null +++ b/.github/workflows/pr-build-comment.yml @@ -0,0 +1,116 @@ +name: PR Build Comment Trigger + +on: + issue_comment: + types: [created] + +jobs: + check-comment: + if: github.event.issue.pull_request && github.event.comment.user.login == 'Audionut' && contains(github.event.comment.body, '/build') + runs-on: ubuntu-latest + outputs: + should-build: ${{ steps.check.outputs.should-build }} + pr-number: ${{ steps.pr-info.outputs.pr-number }} + pr-ref: ${{ steps.pr-info.outputs.pr-ref }} + pr-sha: ${{ steps.pr-info.outputs.pr-sha }} + pr-repo: ${{ steps.pr-info.outputs.pr-repo }} + + steps: + - name: Check comment and permissions + id: check + run: | + echo "Comment by: ${{ github.event.comment.user.login }}" + echo "Comment body: ${{ github.event.comment.body }}" + echo "should-build=true" >> $GITHUB_OUTPUT + + - name: Get PR information + id: pr-info + uses: actions/github-script@v7 + with: + script: | + const pr = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number + }); + + core.setOutput('pr-number', context.issue.number); + core.setOutput('pr-ref', pr.data.head.ref); + core.setOutput('pr-sha', pr.data.head.sha); + core.setOutput('pr-repo', pr.data.head.repo.full_name); + + console.log(`PR #${context.issue.number}: ${pr.data.title}`); + console.log(`Branch: ${pr.data.head.ref}`); + console.log(`SHA: ${pr.data.head.sha}`); + console.log(`Repo: ${pr.data.head.repo.full_name}`); + + - name: React to comment + uses: actions/github-script@v7 + with: + script: | + github.rest.reactions.createForIssueComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: context.payload.comment.id, + content: 'rocket' + }); + + trigger-build: + needs: check-comment + if: needs.check-comment.outputs.should-build == 'true' + runs-on: ubuntu-latest + + steps: + - name: Trigger Docker build workflow + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + // Check if PR is from a fork + const prRepo = '${{ needs.check-comment.outputs.pr-repo }}'; + const baseRepo = context.repo.owner + '/' + context.repo.repo; + const isFromFork = prRepo !== baseRepo; + + let ref; + if (isFromFork) { + // For forks, we can't dispatch to the fork's branch, so we'll trigger on main/master + // but pass the PR info so the workflow can checkout the right commit + console.log(`PR is from fork (${prRepo}), dispatching on default branch`); + + // Get the default branch + const repo = await github.rest.repos.get({ + owner: context.repo.owner, + repo: context.repo.repo + }); + ref = repo.data.default_branch; + console.log(`Using default branch: ${ref}`); + } else { + // For same-repo PRs, use the actual branch + ref = '${{ needs.check-comment.outputs.pr-ref }}'; + console.log(`PR is from same repo, using branch: ${ref}`); + } + + const response = await github.rest.actions.createWorkflowDispatch({ + owner: context.repo.owner, + repo: context.repo.repo, + workflow_id: 'docker-image.yml', + ref: ref, + inputs: { + pr_number: '${{ needs.check-comment.outputs.pr-number }}', + pr_ref: '${{ needs.check-comment.outputs.pr-ref }}', + pr_sha: '${{ needs.check-comment.outputs.pr-sha }}' + } + }); + + console.log(`Triggered workflow dispatch for PR #${{ needs.check-comment.outputs.pr-number }} on ref: ${ref}`); + + - name: Comment on PR + uses: actions/github-script@v7 + with: + script: | + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: ${{ needs.check-comment.outputs.pr-number }}, + body: '🚀 Docker build triggered for PR #${{ needs.check-comment.outputs.pr-number }}!\n\nCheck the [Actions tab](https://github.com/${{ github.repository }}/actions) for build progress.' + }); \ No newline at end of file diff --git a/.github/workflows/push_release.yaml b/.github/workflows/push_release.yaml index c79769461..3bb050621 100644 --- a/.github/workflows/push_release.yaml +++ b/.github/workflows/push_release.yaml @@ -58,6 +58,62 @@ jobs: fi echo "📝 Previous tag: ${PREVIOUS_TAG:-'none (first release)'}" + - name: Fetch changelog from local file + run: | + # Path to the local changelog file + CHANGELOG_FILE="data/Upload-Assistant-release_notes.md" + + echo "🔍 Attempting to read changelog from: $CHANGELOG_FILE" + + # Check if the file exists and read it + if [ -f "$CHANGELOG_FILE" ]; then + # Read the file content + GIST_CONTENT=$(cat "$CHANGELOG_FILE") + + # Check if content is not empty + if [ -n "$GIST_CONTENT" ] && [ ${#GIST_CONTENT} -gt 10 ]; then + echo "✅ Successfully read content from local file" + + # Get the first line and check if it matches the version + FIRST_LINE=$(echo "$GIST_CONTENT" | head -n 1) + echo "🔍 First line of file: '$FIRST_LINE'" + echo "🔍 Expected version: '${{ env.VERSION }}'" + + # Check if first line matches the version (with or without markdown formatting) + if [[ "$FIRST_LINE" == "${{ env.VERSION }}" ]] || [[ "$FIRST_LINE" == "# ${{ env.VERSION }}" ]] || [[ "$FIRST_LINE" == "## ${{ env.VERSION }}" ]]; then + echo "✅ First line matches version, removing it and using rest of content" + # Remove the first line and use the rest + PROCESSED_CONTENT=$(echo "$GIST_CONTENT" | tail -n +2) + else + echo "⚠️ First line doesn't match version '${{ env.VERSION }}', skipping file content" + PROCESSED_CONTENT="" + fi + + # Only set the changelog if we have processed content + if [ -n "$PROCESSED_CONTENT" ] && [ ${#PROCESSED_CONTENT} -gt 5 ]; then + echo "✅ Using local file changelog content" + # Save to environment variable + { + echo "GIST_CHANGELOG<> $GITHUB_ENV + else + echo "⚠️ No valid content after processing, skipping file" + echo "GIST_CHANGELOG=" >> $GITHUB_ENV + fi + else + echo "⚠️ File content appears to be empty or too short, skipping" + echo "GIST_CHANGELOG=" >> $GITHUB_ENV + fi + else + echo "⚠️ Changelog file '$CHANGELOG_FILE' not found, continuing without it" + echo "GIST_CHANGELOG=" >> $GITHUB_ENV + fi + - name: Generate changelog from merged PRs and commits run: | if [ "${{ env.PREVIOUS_TAG }}" = "initial" ]; then @@ -71,6 +127,12 @@ jobs: # Create changelog { echo "CHANGELOG<> "$VERSION_FILE" fi - # Prepend new release info + # Update version and prepend new release info TEMP_FILE=$(mktemp) + + # Start with the NEW version (maintaining 'v' prefix format) echo "__version__ = \"${{ env.VERSION }}\"" > "$TEMP_FILE" echo "" >> "$TEMP_FILE" echo "\"\"\"" >> "$TEMP_FILE" echo "Release Notes for version ${{ env.VERSION }} ($TIMESTAMP):" >> "$TEMP_FILE" echo "" >> "$TEMP_FILE" - echo "${{ env.CHANGELOG }}" | sed 's/^/# /' >> "$TEMP_FILE" + cat <> "$TEMP_FILE" + ${{ env.CHANGELOG }} + EOF echo "\"\"\"" >> "$TEMP_FILE" echo "" >> "$TEMP_FILE" - # Skip the first line of existing version.py (old __version__) + # Add the rest of the existing file (skip first line since we're replacing it) if [ -f "$VERSION_FILE" ]; then tail -n +2 "$VERSION_FILE" >> "$TEMP_FILE" fi diff --git a/.gitignore b/.gitignore index 59cedbafd..4cb225bfa 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,14 @@ data/cookies/*.txt data/cookies/*.pkl data/cookies/*.pickle data/banned/*.* +data/web_ui/*.* +data/web_ui/templates/*.html +data/web_ui/static/js/*.js +data/web_ui/*.py +requirements.txt +docker-compose.yml +Dockerfile +bin/mkbrr/* *.mkv .vscode/ __pycache__/ @@ -14,3 +22,7 @@ tmp/* .DS_Store user-args.json /.vs +data/nfos/* +data/*.json +.venv/* +venv/* diff --git a/Dockerfile b/Dockerfile index cd87dfd37..25f76de33 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,11 +4,11 @@ FROM python:3.12 RUN apt-get update && \ apt-get install -y --no-install-recommends \ ffmpeg \ - mediainfo=23.04-1 \ git \ g++ \ cargo \ mktorrent \ + mediainfo \ rustc \ mono-complete \ nano && \ @@ -18,12 +18,19 @@ RUN apt-get update && \ RUN python -m venv /venv ENV PATH="/venv/bin:$PATH" -# Install wheel and other Python dependencies -RUN pip install --upgrade pip wheel +# Install wheel, requests (for DVD MediaInfo download), and other Python dependencies +RUN pip install --upgrade pip wheel requests -# Set the working directory in the container +# Install Web UI dependencies (in venv) +RUN pip install --no-cache-dir flask flask-cors + +# Set the working directory FIRST WORKDIR /Upload-Assistant +# Copy DVD MediaInfo download script and run it +COPY bin/get_dvd_mediainfo_docker.py bin/ +RUN python3 bin/get_dvd_mediainfo_docker.py + # Copy the Python requirements file and install Python dependencies COPY requirements.txt . RUN pip install -r requirements.txt @@ -35,7 +42,7 @@ RUN chmod +x bin/download_mkbrr_for_docker.py # Download only the required mkbrr binary RUN python3 bin/download_mkbrr_for_docker.py -# Copy the rest of the application +# Copy the rest of the application (including web_ui) COPY . . # Ensure mkbrr is executable @@ -45,5 +52,12 @@ RUN find bin/mkbrr -type f -name "mkbrr" -exec chmod +x {} \; RUN mkdir -p /Upload-Assistant/tmp && chmod 777 /Upload-Assistant/tmp ENV TMPDIR=/Upload-Assistant/tmp +# Add environment variable to enable/disable Web UI +ENV ENABLE_WEB_UI=false + +# Make entrypoint script executable +RUN chmod +x docker-entrypoint.sh + # Set the entry point for the container -ENTRYPOINT ["python", "/Upload-Assistant/upload.py"] \ No newline at end of file +ENTRYPOINT ["/Upload-Assistant/docker-entrypoint.sh"] +CMD ["python", "upload.py"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..92926e88a --- /dev/null +++ b/LICENSE @@ -0,0 +1,116 @@ +# Upload Assistant Public License (UAPL) v1.0 +Copyright © Audionut & wastaken7 + +Permission is hereby granted to any person obtaining a copy of this software and +associated documentation files (the “Software”) to use, copy, modify, merge, +publish, and distribute the Software, subject to the following conditions: + +--- + +## 1. Branding and Attribution Protection + +**1.1.** +The names **“Upload Assistant”**, **“Audionut”**, and any confusingly similar +variants **may not** be used in derivative works in a manner that suggests the +derivative is an official version, endorsed version, approved continuation, or +authorized build of the original project. + +**1.2.** +Derivative works **must not** replace, modify, rename, or repurpose: +- Any identifiers, markers, strings, signatures, metadata blocks, timestamps, + or other automated output branding generated by the Software, **except to remove such identifiers entirely**; +- **Any license headers or attribution notices in source files must be preserved**. + + +**1.3.** +Derivative works **may not** introduce new identifiers, branding, promotional +markings, or tool-generated labels or HTTP/S links that imply the derivative is: +- an **upload tool**, +- a replacement or continuation of **Upload Assistant**, +- or any other tool intended to performing the same function or role, +by **reusing or adapting the original branding mechanisms, output structures, +or auto-generated identifiers** provided by the Software. + +**1.4.** +The Software and derivative works **must not** misrepresent authorship, +affiliation, origin, identity, project lineage, or repository linkage. + +--- + +## 2. Fork Disclosure Requirement + +Any distributed derivative work **must disclose** that it is a fork or +modification of the original “Upload Assistant” project and must include a clear +notice that it is **not** the official version. + +--- + +## 3. Credential and Key Restrictions + +**3.1.** +The embedded, obfuscated key included with the Software is licensed +**solely for use in the official “Upload Assistant” project**. + +**3.2.** +You may **not**: +- extract, reuse, modify, deobfuscate, or redistribute the key; +- use the key in forks, derivative works, modified builds, or alternative tools; +- circumvent or attempt to circumvent any obfuscation or protective measures. + +**3.3.** +Any use of the key outside the official project is strictly prohibited and +terminates all rights granted by this license. + +--- + +## 4. Modification and Distribution + +You may modify the Software and distribute such modifications, provided that: + +**4.1.** +You comply with all branding and credential restrictions in Sections 1 and 3. + +**4.2.** +You retain this license text in full. + +**4.3.** +You include a notice stating: +*“This is a modified version of the Upload Assistant project and is not +affiliated with or endorsed by Audionut.”* + +**4.4.** +You do not imply that the upstream project or author endorses your modifications. + +--- + +## 5. No Warranty + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. + +THE AUTHOR SHALL NOT BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY +ARISING FROM THE SOFTWARE OR ITS USE. + +--- + +## 6. Termination + +Any breach of Sections **1**, **2**, or **3** results in **automatic and immediate +termination** of all rights granted under this license. + +Upon termination, you must cease all distribution and delete all copies of the +Software and derivative works in your possession. + +--- + +## 7. Compatibility + +This license is **not** an OSI-approved license. +Software licensed under this license may not be combined with code under +licenses that require unrestricted modification or removal of branding or +identification clauses. + +--- + +**END OF LICENSE** diff --git a/README.md b/README.md index 0d0fd8af3..c2adb8d00 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,14 @@ -[![Create and publish a Docker image](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml/badge.svg?branch=master)](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml) [![Test run (Master Branch)](https://img.shields.io/github/actions/workflow/status/Audionut/Upload-Assistant/test-run.yaml?branch=master&label=Test%20run%20(Master%20Branch%202025-07-04%2006:06%20UTC))](https://github.com/Audionut/Upload-Assistant/actions/workflows/test-run.yaml?query=branch%3Amaster) [![Test run (5.1.5.2)](https://img.shields.io/github/actions/workflow/status/Audionut/Upload-Assistant/test-run.yaml?branch=5.1.5.2&label=Test%20run%20(5.1.5.2%202025-07-19%2014:24%20UTC))](https://github.com/Audionut/Upload-Assistant/actions/workflows/test-run.yaml?query=branch%3A5.1.5.2) +[![Create and publish a Docker image](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml/badge.svg?branch=master)](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml) [![Test run (Master Branch)](https://img.shields.io/github/actions/workflow/status/Audionut/Upload-Assistant/test-run.yaml?branch=master&label=Test%20run%20(Master%20Branch%202025-12-21%2000:45%20UTC))](https://github.com/Audionut/Upload-Assistant/actions/workflows/test-run.yaml?query=branch%3Amaster) Discord support https://discord.gg/QHHAZu7e2A -# Audionut's Upload Assistant +# Upload Assistant A simple tool to take the work out of uploading. This project is a fork of the original work of L4G https://github.com/L4GSP1KE/Upload-Assistant Immense thanks to him for establishing this project. Without his (and supporters) time and effort, this fork would not be a thing. -What started as simply pushing some pull requests to keep the main repo inline, as L4G seemed busy with IRL, has since snowballed into full time development, bugs and all. - -Many other forks exist, most are simply a rebranding of this fork without any credit whatsoever. -Better just to be on this fork and bug me about my bugs, rather than bugging someone who can ctrl+c/ctrl+v, but likely can't fix the bugs. +Many thanks to all who have contributed. ## What It Can Do: - Generates and Parses MediaInfo/BDInfo. @@ -24,6 +21,7 @@ Better just to be on this fork and bug me about my bugs, rather than bugging som - Generates custom .torrents without useless top level folders/nfos. - Can re-use existing torrents instead of hashing new. - Can automagically search qBitTorrent version 5+ clients for matching existing torrent. + - Includes support for [qui](https://github.com/autobrr/qui) - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules. - Checks for existing releases already on site. - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder). @@ -31,40 +29,42 @@ Better just to be on this fork and bug me about my bugs, rather than bugging som - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs. ## Supported Sites: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameAcronymNameAcronym
AitherAITHERAlpharatioAR
Amigos Share ClubASCAnimeLoversAL
AnthelionANTAsianCinemaACM
Beyond-HDBHDBitHDTVBHDTV
BlutopiaBLUBrasilTrackerBT
CapybaraBRCBRCinematikTIK
DarkPeersDPDigitalCoreDC
FearNoPeerFNPFileListFL
FrikiFRIKIhawke-unoHUNO
HDBitsHDBHD-SpaceHDS
HD-TorrentsHDTHomieHelpDeskHHD
ItaTorrentsITTLast Digital UndergroundLDU
Lat-TeamLTLocadoraLCD
LSTLSTMoreThanTVMTV
NebulanceNBLOldToonsWorldOTW
OnlyEncodes+OEPassThePopcornPTP
Polish TorrentPTTPortugasPT
PrivateSilverScreenPSSPTerClubPTER
Racing4EveryoneR4ERastastuganRAS
ReelFLiXRFRetroFlixRTF
SamaritanoSAMseedpoolSP
ShareislandSHRISkipTheCommericalsSTC
SpeedAppSPDSwarmazonSN
Toca ShareTOCATorrentHRTHR
TorrentLeechTLToTheGloryTTG
TVChaosUKTVCUHDShareUHD
ULCXULCXUTOPIAUTP
YOiNKEDYOINKYUSCENEYUS
+ +|Name|Acronym|Name|Acronym| +|-|:-:|-|:-:| +|Aither|AITHER|Alpharatio|AR| +|AmigosShareClub|ASC|AnimeLovers|AL| +|Anthelion|ANT|AsianCinema|ACM| +|AvistaZ|AZ|Beyond-HD|BHD| +|BitHDTV|BHDTV|Blutopia|BLU| +|BrasilJapão-Share|BJS|BrasilTracker|BT| +|CapybaraBR|CBR|Cinematik|TIK| +|CinemaZ|CZ|DarkPeers|DP| +|DigitalCore|DC|Emuwarez|EMUW| +|FearNoPeer|FNP|FileList|FL| +|Friki|FRIKI|FunFile|FF| +|GreatPosterWall|GPW|hawke-uno|HUNO| +|HDBits|HDB|HD-Space|HDS| +|HD-Torrents|HDT|HomieHelpDesk|HHD| +|ImmortalSeed|IS|InfinityHD|IHD| +|ItaTorrents|ITT|LastDigitalUnderground|LDU| +|Lat-Team|LT|Locadora|LCD| +|LST|LST|MoreThanTV|MTV| +|Nebulance|NBL|OldToonsWorld|OTW| +|OnlyEncodes+|OE|PassThePopcorn|PTP| +|PolishTorrent|PTT|Portugas|PT| +|PTerClub|PTER|PrivateHD|PHD| +|PTSKIT|PTS|Racing4Everyone|R4E| +|Rastastugan|RAS|ReelFLiX|RF| +|RetroFlix|RTF|Samaritano|SAM| +|seedpool|SP|ShareIsland|SHRI| +|SkipTheCommerials|STC|SpeedApp|SPD| +|Swarmazon|SN|TorrentHR|THR| +|Torrenteros|TTR|TorrentLeech|TL| +|The Leach Zone|TLZ|ToTheGlory|TTG| +|TVChaosUK|TVC|ULCX|ULCX| +|UTOPIA|UTP|YOiNKED|YOINK| +|YUSCENE|YUS||| ## **Setup:** - **REQUIRES AT LEAST PYTHON 3.9 AND PIP3** @@ -91,9 +91,9 @@ Better just to be on this fork and bug me about my bugs, rather than bugging som - Edit `config.py` to use your information (more detailed information in the [wiki](https://github.com/Audionut/Upload-Assistant/wiki)) - tmdb_api key can be obtained from https://www.themoviedb.org/settings/api - image host api keys can be obtained from their respective sites - + **Additional Resources are found in the [wiki](https://github.com/Audionut/Upload-Assistant/wiki)** - + Feel free to contact me if you need help, I'm not that hard to find. ## **Updating:** @@ -105,16 +105,16 @@ Better just to be on this fork and bug me about my bugs, rather than bugging som - Run `python3 config-generator.py` and select to grab new UA config options. ## **CLI Usage:** - + `python3 upload.py "/path/to/content" --args` - + Args are OPTIONAL and ALWAYS follow path, for a list of acceptable args, pass `--help`. Path works best in quotes. ## **Docker Usage:** Visit our wonderful [docker usage wiki page](https://github.com/Audionut/Upload-Assistant/wiki/Docker) - Also see this excellent video put together by a community memeber https://videos.badkitty.zone/ua + Also see this excellent video put together by a community member https://videos.badkitty.zone/ua ## **Attributions:** diff --git a/bin/MI/get_linux_mi.py b/bin/MI/get_linux_mi.py new file mode 100644 index 000000000..b7ca47ff8 --- /dev/null +++ b/bin/MI/get_linux_mi.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import os +import platform +import requests +import shutil +import zipfile +from pathlib import Path +from tempfile import TemporaryDirectory +from src.console import console + +MEDIAINFO_VERSION = "23.04" +MEDIAINFO_CLI_BASE_URL = "https://mediaarea.net/download/binary/mediainfo" +MEDIAINFO_LIB_BASE_URL = "https://mediaarea.net/download/binary/libmediainfo0" + + +def get_filename(system: str, arch: str, library_type: str = "cli") -> str: + if system == "linux": + if library_type == "cli": + # MediaInfo CLI uses Lambda (pre-compiled) version + return f"MediaInfo_CLI_{MEDIAINFO_VERSION}_Lambda_{arch}.zip" + elif library_type == "lib": + # MediaInfo library uses DLL version + return f"MediaInfo_DLL_{MEDIAINFO_VERSION}_Lambda_{arch}.zip" + else: + raise ValueError(f"Unknown library_type: {library_type}") + else: + return + + +def get_url(/service/system: str, arch: str, library_type: str = "cli") -> str: + filename = get_filename(system, arch, library_type) + if library_type == "cli": + return f"{MEDIAINFO_CLI_BASE_URL}/{MEDIAINFO_VERSION}/{filename}" + elif library_type == "lib": + return f"{MEDIAINFO_LIB_BASE_URL}/{MEDIAINFO_VERSION}/{filename}" + else: + raise ValueError(f"Unknown library_type: {library_type}") + + +def download_file(url: str, output_path: Path) -> None: + response = requests.get(url, stream=True, timeout=30) + response.raise_for_status() + + with open(output_path, "wb") as f: + for chunk in response.iter_content(chunk_size=8192): + f.write(chunk) + + +def extract_linux(cli_archive: Path, lib_archive: Path, output_dir: Path) -> None: + # Extract MediaInfo CLI from zip file + with zipfile.ZipFile(cli_archive, 'r') as zip_ref: + file_list = zip_ref.namelist() + mediainfo_file = output_dir / "mediainfo" + + # Look for the mediainfo binary in the archive + for member in file_list: + if member.endswith('/mediainfo') or member == 'mediainfo': + zip_ref.extract(member, output_dir.parent) + extracted_path = output_dir.parent / member + shutil.move(str(extracted_path), str(mediainfo_file)) + break + + # Extract MediaInfo library + with zipfile.ZipFile(lib_archive, 'r') as zip_ref: + file_list = zip_ref.namelist() + lib_file = output_dir / "libmediainfo.so.0" + + # Look for the library file in the archive + if "lib/libmediainfo.so.0.0.0" in file_list: + zip_ref.extract("lib/libmediainfo.so.0.0.0", output_dir.parent) + extracted_path = output_dir.parent / "lib/libmediainfo.so.0.0.0" + shutil.move(str(extracted_path), str(lib_file)) + + # Clean up empty lib directory if it exists + lib_dir = output_dir.parent / "lib" + if lib_dir.exists() and not any(lib_dir.iterdir()): + lib_dir.rmdir() + + +def download_dvd_mediainfo(base_dir, debug=False): + system = platform.system().lower() + machine = platform.machine().lower() + + if debug: + console.print(f"[blue]System: {system}, arch: {machine}[/blue]") + + if system not in ["linux"]: + return + + if system == "linux" and machine not in ["x86_64", "arm64"]: + return + + if machine == "amd64": + machine = "x86_64" + + platform_dir = "linux" + output_dir = Path(base_dir) / "bin" / "MI" / platform_dir + output_dir.mkdir(parents=True, exist_ok=True) + + if debug: + console.print(f"[blue]Output: {output_dir}[/blue]") + + cli_file = output_dir / "mediainfo" + lib_file = output_dir / "libmediainfo.so.0" + version_file = output_dir / f"version_{MEDIAINFO_VERSION}" + + if cli_file.exists() and lib_file.exists() and version_file.exists(): + if debug: + console.print(f"[blue]MediaInfo CLI and Library {MEDIAINFO_VERSION} exist[/blue]") + return str(cli_file) + console.print(f"[yellow]Downloading specific MediaInfo CLI and Library for DVD processing: {MEDIAINFO_VERSION}...[/yellow]") + # Download MediaInfo CLI + cli_url = get_url(https://github.com/system,%20machine,%20%22cli") + cli_filename = get_filename(system, machine, "cli") + + # Download MediaInfo Library + lib_url = get_url(https://github.com/system,%20machine,%20%22lib") + lib_filename = get_filename(system, machine, "lib") + + if debug: + console.print(f"[blue]MediaInfo CLI URL: {cli_url}[/blue]") + console.print(f"[blue]MediaInfo CLI filename: {cli_filename}[/blue]") + console.print(f"[blue]MediaInfo Library URL: {lib_url}[/blue]") + console.print(f"[blue]MediaInfo Library filename: {lib_filename}[/blue]") + + with TemporaryDirectory() as tmp_dir: + cli_archive = Path(tmp_dir) / cli_filename + lib_archive = Path(tmp_dir) / lib_filename + + # Download both archives + download_file(cli_url, cli_archive) + if debug: + console.print(f"[green]Downloaded {cli_filename}[/green]") + + download_file(lib_url, lib_archive) + if debug: + console.print(f"[green]Downloaded {lib_filename}[/green]") + + extract_linux(cli_archive, lib_archive, output_dir) + + if debug: + console.print("[green]Extracted library[/green]") + + with open(version_file, 'w') as f: + f.write(f"MediaInfo {MEDIAINFO_VERSION}") + + # Make CLI binary executable + if cli_file.exists(): + os.chmod(cli_file, 0o755) + + if not cli_file.exists(): + raise Exception(f"Failed to extract CLI binary to {cli_file}") + if not lib_file.exists(): + raise Exception(f"Failed to extract library to {lib_file}") + + return str(cli_file) diff --git a/bin/download_mkbrr_for_docker.py b/bin/download_mkbrr_for_docker.py index 157caa5e2..5a4436f8e 100644 --- a/bin/download_mkbrr_for_docker.py +++ b/bin/download_mkbrr_for_docker.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import platform import requests import tarfile @@ -7,7 +8,7 @@ from pathlib import Path -def download_mkbrr_for_docker(base_dir=".", version="v1.8.1"): +def download_mkbrr_for_docker(base_dir=".", version="v1.14.0"): """Download mkbrr binary for Docker - synchronous version""" system = platform.system().lower() diff --git a/bin/get_dvd_mediainfo_docker.py b/bin/get_dvd_mediainfo_docker.py new file mode 100644 index 000000000..98b762a95 --- /dev/null +++ b/bin/get_dvd_mediainfo_docker.py @@ -0,0 +1,218 @@ +#!/usr/bin/env python3 +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +""" +Docker-specific script to download DVD-capable MediaInfo binaries for Linux. +This script downloads specialized MediaInfo CLI and library binaries that +support DVD IFO/VOB file parsing with language information. +""" +import os +import platform +import requests +import shutil +import zipfile +from pathlib import Path +from tempfile import TemporaryDirectory + +MEDIAINFO_VERSION = "23.04" +MEDIAINFO_CLI_BASE_URL = "https://mediaarea.net/download/binary/mediainfo" +MEDIAINFO_LIB_BASE_URL = "https://mediaarea.net/download/binary/libmediainfo0" + + +def get_filename(system: str, arch: str, library_type: str = "cli") -> str: + """Get the appropriate filename for MediaInfo download based on system and architecture.""" + if system == "linux": + if library_type == "cli": + # MediaInfo CLI uses Lambda (pre-compiled) version for better DVD support + return f"MediaInfo_CLI_{MEDIAINFO_VERSION}_Lambda_{arch}.zip" + elif library_type == "lib": + # MediaInfo library uses DLL version for better compatibility + return f"MediaInfo_DLL_{MEDIAINFO_VERSION}_Lambda_{arch}.zip" + else: + raise ValueError(f"Unknown library_type: {library_type}") + else: + raise ValueError(f"Unsupported system: {system}") + + +def get_url(/service/system: str, arch: str, library_type: str = "cli") -> str: + """Construct download URL for MediaInfo components.""" + filename = get_filename(system, arch, library_type) + if library_type == "cli": + return f"{MEDIAINFO_CLI_BASE_URL}/{MEDIAINFO_VERSION}/{filename}" + elif library_type == "lib": + return f"{MEDIAINFO_LIB_BASE_URL}/{MEDIAINFO_VERSION}/{filename}" + else: + raise ValueError(f"Unknown library_type: {library_type}") + + +def download_file(url: str, output_path: Path) -> None: + """Download a file from URL to specified path.""" + print(f"Downloading: {url}") + response = requests.get(url, stream=True, timeout=60) + response.raise_for_status() + + with open(output_path, "wb") as f: + for chunk in response.iter_content(chunk_size=8192): + f.write(chunk) + print(f"Downloaded: {output_path.name}") + + +def extract_linux_binaries(cli_archive: Path, lib_archive: Path, output_dir: Path) -> None: + """Extract MediaInfo CLI and library from downloaded archives.""" + print("Extracting MediaInfo binaries...") + + # Extract MediaInfo CLI from zip file + with zipfile.ZipFile(cli_archive, 'r') as zip_ref: + file_list = zip_ref.namelist() + mediainfo_file = output_dir / "mediainfo" + + print(f"CLI archive contents: {file_list}") + + # Look for the mediainfo binary in the archive + for member in file_list: + if member.endswith('/mediainfo') or member == 'mediainfo': + zip_ref.extract(member, output_dir.parent) + extracted_path = output_dir.parent / member + shutil.move(str(extracted_path), str(mediainfo_file)) + print(f"Extracted CLI binary: {mediainfo_file}") + break + else: + raise Exception("MediaInfo CLI binary not found in archive") + + # Extract MediaInfo library + with zipfile.ZipFile(lib_archive, 'r') as zip_ref: + file_list = zip_ref.namelist() + lib_file = output_dir / "libmediainfo.so.0" + + print(f"Library archive contents: {file_list}") + + # Look for the library file in the archive + lib_candidates = [ + "lib/libmediainfo.so.0.0.0", + "libmediainfo.so.0.0.0", + "libmediainfo.so.0", + "MediaInfo/libmediainfo.so.0.0.0", + "MediaInfo/lib/libmediainfo.so.0.0.0" + ] + + for candidate in lib_candidates: + if candidate in file_list: + zip_ref.extract(candidate, output_dir.parent) + extracted_path = output_dir.parent / candidate + # Move to final location + shutil.move(str(extracted_path), str(lib_file)) + # Set appropriate permissions for library file (readable by all) + os.chmod(lib_file, 0o644) + print(f"Extracted library: {lib_file}") + break + else: + raise Exception("MediaInfo library not found in archive") + + # Clean up empty lib directory if it exists + lib_dir = output_dir.parent / "lib" + if lib_dir.exists() and not any(lib_dir.iterdir()): + lib_dir.rmdir() + + +def download_dvd_mediainfo_docker(): + """Download DVD-specific MediaInfo binaries for Docker container.""" + system = platform.system().lower() + machine = platform.machine().lower() + + print(f"System: {system}, Architecture: {machine}") + + if system != "linux": + raise Exception(f"This script is only for Linux containers, got: {system}") + + # Normalize architecture names + if machine in ["amd64", "x86_64"]: + arch = "x86_64" + elif machine in ["arm64", "aarch64"]: + arch = "arm64" + else: + raise Exception(f"Unsupported architecture: {machine}") + + # Set up output directory in the container + base_dir = Path("/Upload-Assistant") + output_dir = base_dir / "bin" / "MI" / "linux" + output_dir.mkdir(parents=True, exist_ok=True) + + print(f"Installing DVD MediaInfo to: {output_dir}") + + cli_file = output_dir / "mediainfo" + lib_file = output_dir / "libmediainfo.so.0" + version_file = output_dir / f"version_{MEDIAINFO_VERSION}" + + # Check if already installed + if cli_file.exists() and lib_file.exists() and version_file.exists(): + print(f"DVD MediaInfo {MEDIAINFO_VERSION} already installed") + return str(cli_file) + + print(f"Downloading DVD-specific MediaInfo CLI and Library: {MEDIAINFO_VERSION}") + + # Get download URLs + cli_url = get_url(https://github.com/system,%20arch,%20%22cli") + lib_url = get_url(https://github.com/system,%20arch,%20%22lib") + + cli_filename = get_filename(system, arch, "cli") + lib_filename = get_filename(system, arch, "lib") + + print(f"CLI URL: {cli_url}") + print(f"Library URL: {lib_url}") + + # Download and extract in temporary directory + with TemporaryDirectory() as tmp_dir: + cli_archive = Path(tmp_dir) / cli_filename + lib_archive = Path(tmp_dir) / lib_filename + + # Download both archives + download_file(cli_url, cli_archive) + download_file(lib_url, lib_archive) + + # Extract binaries + extract_linux_binaries(cli_archive, lib_archive, output_dir) + + # Create version marker + with open(version_file, 'w') as f: + f.write(f"MediaInfo {MEDIAINFO_VERSION} - DVD Support") + + # Make CLI binary executable and verify permissions + if cli_file.exists(): + # Set full executable permissions (owner: rwx, group: rx, other: rx) + os.chmod(cli_file, 0o755) + # Verify permissions were set correctly + file_stat = cli_file.stat() + is_executable = bool(file_stat.st_mode & 0o111) # Check if any execute bit is set + if is_executable: + print(f"✓ Set executable permissions on: {cli_file} (mode: {oct(file_stat.st_mode)})") + else: + raise Exception(f"Failed to set executable permissions on: {cli_file}") + else: + raise Exception(f"CLI binary not found for permission setting: {cli_file}") + + # Verify installation and permissions + if not cli_file.exists(): + raise Exception(f"Failed to install CLI binary: {cli_file}") + if not lib_file.exists(): + raise Exception(f"Failed to install library: {lib_file}") + + # Final executable verification + cli_stat = cli_file.stat() + if not (cli_stat.st_mode & 0o111): + raise Exception(f"CLI binary is not executable: {cli_file}") + else: + print(f"✓ CLI binary is executable: {oct(cli_stat.st_mode)}") + + print(f"Successfully installed DVD MediaInfo {MEDIAINFO_VERSION}") + print(f"CLI: {cli_file}") + print(f"Library: {lib_file}") + + return str(cli_file) + + +if __name__ == "__main__": + try: + download_dvd_mediainfo_docker() + print("DVD MediaInfo installation completed successfully!") + except Exception as e: + print(f"ERROR: Failed to install DVD MediaInfo: {e}") + exit(1) diff --git a/bin/get_mkbrr.py b/bin/get_mkbrr.py index 23c631367..c4d1f5187 100644 --- a/bin/get_mkbrr.py +++ b/bin/get_mkbrr.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import platform import requests import tarfile diff --git a/cogs/commands.py b/cogs/commands.py index c40d66123..fdaf485d4 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 from src.prep import Prep from src.args import Args from src.clients import Clients diff --git a/cogs/redaction.py b/cogs/redaction.py index f50841a5d..d1b504837 100644 --- a/cogs/redaction.py +++ b/cogs/redaction.py @@ -1,18 +1,21 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import re import json SENSITIVE_KEYS = { - "token", "passkey", "password", "auth", "cookie", "csrf", "email", "username", "user", "key", "info_hash", "downloadUrl" + "token", "passkey", "password", "auth", "cookie", "csrf", "email", "username", "user", "key", "info_hash", "AntiCsrfToken", "torrent_pass" } def redact_value(val): """Redact sensitive values, including passkeys in URLs.""" if isinstance(val, str): - # Redact passkeys/hashes in URLs (e.g. .../download/45626.ABCDEFGHIJKLMNOP...) - val = re.sub(r'(\.[a-zA-Z0-9]{10,})(?=[^a-zA-Z0-9]|$)', '.[REDACTED]', val) + # Redact passkeys in announce URLs (e.g. //announce) + val = re.sub(r'(?<=/)[a-zA-Z0-9]{10,}(?=/announce)', '[REDACTED]', val) + # Redact content between /proxy/ and /api (e.g. /proxy//api) + val = re.sub(r'(?<=/proxy/)[^/]+(?=/api)', '[REDACTED]', val) # Redact query params like ?passkey=... or &token=... - val = re.sub(r'([?&](passkey|key|token|auth|info_hash)=)[^&]+', r'\1[REDACTED]', val, flags=re.I) + val = re.sub(r'([?&](passkey|key|token|auth|info_hash|torrent_pass)=)[^&]+', r'\1[REDACTED]', val, flags=re.I) # Redact long hex or base64-like strings (common for tokens) val = re.sub(r'\b[a-fA-F0-9]{32,}\b', '[REDACTED]', val) return val diff --git a/config-generator.py b/config-generator.py index 3baf28f4e..d65e3fd85 100644 --- a/config-generator.py +++ b/config-generator.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import os import re @@ -332,13 +333,18 @@ def configure_default_section(existing_defaults, example_defaults, config_commen skip_settings.update(linked_group["settings"]) else: is_password = key in ["api_key", "passkey", "rss_key", "tvdb_token", "tmdb_api", "tvdb_api", "btn_api"] or "password" in key.lower() or key.endswith("_key") or key.endswith("_api") or key.endswith("_url") - config_defaults[key] = get_user_input( + value = get_user_input( f"Setting '{key}'", default=str(default_value), is_password=is_password, existing_value=existing_defaults.get(key) ) + if default_value is None and (value == "" or value == "None"): + config_defaults[key] = None + else: + config_defaults[key] = value + if key in linked_settings: linked_group = linked_settings[key] if not linked_group["condition"](config_defaults[key]): @@ -363,6 +369,7 @@ def get_img_host(config_defaults, existing_defaults, example_defaults, config_co "dalexni": "dalexni_api", "ziplinestudio": ["zipline_url", "zipline_api_key"], "passtheimage": "passtheima_ge_api", + "seedpool_cdn": "seedpool_cdn_api", "imgbox": None, "pixhost": None } @@ -374,7 +381,7 @@ def get_img_host(config_defaults, existing_defaults, example_defaults, config_co # Get existing image hosts if available existing_hosts = [] for i in range(1, 11): - key = f"image_host_{i}" + key = f"img_host_{i}" if key in existing_defaults and existing_defaults[key]: existing_hosts.append(existing_defaults[key].strip().lower()) @@ -404,7 +411,7 @@ def get_img_host(config_defaults, existing_defaults, example_defaults, config_co if host_input in img_host_api_map: valid_host = True - host_key = f"image_host_{i}" + host_key = f"img_host_{i}" config_defaults[host_key] = host_input # Configure API key(s) for this host, if needed @@ -823,6 +830,9 @@ def write_dict(d, indent_level=1): elif isinstance(value, bool): # Ensure booleans are capitalized file.write(f"{str(value).capitalize()},\n") + elif isinstance(value, type(None)): + # Handle None values + file.write("None,\n") else: # Other values with trailing comma file.write(f"{json.dumps(value, ensure_ascii=False)},\n") @@ -904,7 +914,7 @@ def write_dict(d, indent_level=1): print() # TRACKERS section - update_trackers = input("Do you want to update something in thee TRACKERS section? (y/n): ").lower() == "y" + update_trackers = input("Do you want to update something in the TRACKERS section? (y/n): ").lower() == "y" if update_trackers: existing_trackers = existing_config.get("TRACKERS", {}) example_trackers = example_config.get("TRACKERS", {}) diff --git a/data/Upload-Assistant-release_notes.md b/data/Upload-Assistant-release_notes.md new file mode 100644 index 000000000..a8c424282 --- /dev/null +++ b/data/Upload-Assistant-release_notes.md @@ -0,0 +1,24 @@ +v6.3.0 + +## RELEASE NOTES + - This release was slower than usual. Some notable changes.... + - Added TLZ, IHD, TTR + - Added support for multiple client injecting. + - Improved the image host handling for sites that have banned hosts. + - Improved the year detection for RTF when uploading TV content. + - Fixed a bug with DVD mediainfo handling. + - Fixed audio track handling that could cause missed dual-audio type handling. + - Fixed image compression when using image overlay. + - Fixed some mal id handling. + - Added support for detecting ATMOS height channels. + - Fixed auto torrent searching to only search valid trackers in config. + - Added a webui for docker. + - wastaken7 done a bunch of further refactoring. + +## New config options - see example.py + - Multiple injecion clients can be added. + - Removed the need for passkey from SPD and DC. + - Added modq support for LT. + +## Special shout out +- blueberry, who is banned from UA github, and from a bunch of sites, due to invite begging/trading, and begging for internal status. It takes a special kind of stupid to modify a handful of lines and claim something as your own. \ No newline at end of file diff --git a/data/example-config.py b/data/example-config.py index 041f41c97..eda60b344 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 config = { "DEFAULT": { # will print a notice if an update is available @@ -9,19 +10,11 @@ # visit "https://www.themoviedb.org/settings/api" copy api key and insert below "tmdb_api": "", - # tvdb api key - # visit "https://www.thetvdb.com/dashboard/account/apikey" copy api key and insert below - "tvdb_api": "", - - # visit "https://thetvdb.github.io/v4-api/#/Login/post_login" enter api key, generate token and insert token below - # the pin in the login form is not needed (don't modify), only enter your api key - "tvdb_token": "", - # btn api key used to get details from btn "btn_api": "", # Order of image hosts. primary host as first with others as backup - # Available image hosts: imgbb, ptpimg, imgbox, pixhost, lensdump, ptscreens, onlyimage, dalexni, zipline, passtheimage + # Available image hosts: imgbb, ptpimg, imgbox, pixhost, lensdump, ptscreens, onlyimage, dalexni, zipline, passtheimage, seedpool_cdn "img_host_1": "", "img_host_2": "", "img_host_3": "", @@ -39,6 +32,8 @@ # custom zipline url "zipline_url": "", "zipline_api_key": "", + # Seedpool CDN API key + "seedpool_cdn_api": "", # Whether to add a logo for the show/movie from TMDB to the top of the description "add_logo": False, @@ -78,12 +73,27 @@ # Tonemap HDR - DV+HDR screenshots "tone_map": True, - # Tonemap screenshots with the following settings + # Set false to disable libtorrent ffmpeg tonemapping and use ffmpeg only + "use_libplacebo": True, + + # Set true to skip ffmpeg check, useful if you know your ffmpeg is compatible with libplacebo + # Else, when tonemapping is enabled (and used), UA will run a quick check before to decide + "ffmpeg_is_good": False, + + # Set true to skip "warming up" libplacebo + # Some systems are slow to compile libtorrent shaders, which will cause the first screenshot to fail + "ffmpeg_warmup": False, + + # Set ffmpeg compression level for screenshots (0-9) + "ffmpeg_compression": "6", + + # Tonemap screenshots with the following settings (doesn't apply when using libplacebo) # See https://ayosec.github.io/ffmpeg-filters-docs/7.1/Filters/Video/tonemap.html "algorithm": "mobius", "desat": "10.0", # Add this header above screenshots in description when screens have been tonemapped (in bbcode) + # Can be overridden in a per-tracker setting by adding this same config "tonemapped_header": "[center][code] Screenshots have been tonemapped for reference [/code][/center]", # MULTI PROCESSING @@ -94,7 +104,7 @@ "process_limit": "4", # When optimizing images, limit to this many threads spawned by each process above. - # Recommended value is the number of logical processesors on your system. + # Recommended value is the number of logical processors on your system. # This is equivalent to the old shared_seedbox setting, however the existing process # only used a single process. You probably need to limit this to 1 or 2 to avoid hogging resources. "threads": "10", @@ -104,12 +114,12 @@ # Number of screenshots to use for each (ALL) disc/episode when uploading packs to supported sites. # 0 equals old behavior where only the original description and images are added. - # This setting also affects PTP, however PTP requries at least 2 images for each. + # This setting also affects PTP, however PTP requires at least 2 images for each. # PTP will always use a *minimum* of 2, regardless of what is set here. "multiScreens": "2", # The next options for packed content do not effect PTP. PTP has a set standard. - # When uploading packs, you can specifiy a different screenshot thumbnail size, default 300. + # When uploading packs, you can specify a different screenshot thumbnail size, default 300. "pack_thumb_size": "300", # Description character count (including bbcode) cutoff for UNIT3D sites when **season packs only**. @@ -129,23 +139,44 @@ # You might not want to process screens/mediainfo for 40 episodes in a season pack. "processLimit": "10", - # Providing the option to add a description header, in bbcode, at the top of the description section - # where supported + # Providing the option to add a description header, in bbcode, at the top of the description section where supported + # Can be overridden in a per-tracker setting by adding this same config "custom_description_header": "", # Providing the option to add a header, in bbcode, above the screenshot section where supported + # Can be overridden in a per-tracker setting by adding this same config "screenshot_header": "", - # Enable lossless PNG Compression (True/False) - "optimize_images": True, + # Applicable only to raw discs (Blu-ray/DVD). + # Providing the option to add a header, in bbcode, above the section featuring screenshots of the Disc menus, where supported + # Can be overridden in a per-tracker setting by adding this same config + "disc_menu_header": "", + + # Allows adding a custom signature, in BBCode, at the bottom of the description section + # Can be overridden in a per-tracker setting by adding this same config + "custom_signature": "", # Which client are you using. "default_torrent_client": "qbittorrent", + # A list of clients to use for injection (aka actually adding the torrent for uploading) + # eg: ['qbittorrent', 'rtorrent'] + "injecting_client_list": [''], + + # A list of clients to search for torrents. + # eg: ['qbittorrent', 'qbittorrent_searching'] + # will fallback to default_torrent_client if empty + "searching_client_list": [''], + + # set true to skip automated client torrent searching + # this will search qbittorrent clients for matching torrents + # and use found torrent id's for existing hash and site searching + 'skip_auto_torrent': False, + # Play the bell sound effect when asking for confirmation "sfx_on_prompt": True, - # How many trackers need to pass successfull checking to continue with the upload process + # How many trackers need to pass successful checking to continue with the upload process # Default = 1. If 1 (or more) tracker/s pass banned_group, content and dupe checking, uploading will continue # If less than the number of trackers pass the checking, exit immediately. "tracker_pass_checks": "1", @@ -182,21 +213,21 @@ # set true to use mkbrr for torrent creation "mkbrr": True, + # Create using a specific number of worker threads for hashing (e.g., 8) with mkbrr + # Experimenting with different values might yield better performance than the default automatic setting. + # Conversely, you can set a lower amount such as 1 to protect system resources (default "0" (auto)) + "mkbrr_threads": "0", + # set true to use argument overrides from data/templates/user-args.json "user_overrides": False, - # set true to skip automated client torrent searching - # this will search qbittorrent clients for matching torrents - # and use found torrent id's for existing hash and site searching - 'skip_auto_torrent': False, - # If there is no region/distributor ids specified, we can use existing torrents to check # This will use data from matching torrents in qBitTorrent/RuTorrent to find matching site ids # and then try and find region/distributor ids from those sites # Requires "skip_auto_torrent" to be set to False "ping_unit3d": False, - # If processing a bluray disc, get bluray information from bluray.com + # If processing a dvd/bluray disc, get related information from bluray.com # This will set region and distribution info # Must have imdb id to work "get_bluray_info": False, @@ -221,6 +252,7 @@ # Video codec/resolution and disc size mismatches have huge penalities # Only useful in unattended mode. If not unattended you will be prompted to confirm release # Final score must be greater than this value to be considered a match + # Only works with blu-ray discs, not dvd "bluray_score": 94.5, # If there is only a single release on bluray.com, you may wish to relax the score a little @@ -236,6 +268,32 @@ # Whether or not to print direct torrent links for the uploaded content "print_tracker_links": True, + # Add a directory for Emby linking. This is the folder where the emby files will be linked to. + # If not set, Emby linking will not be performed. Symlinking only, linux not tested + # path in quotes (double quotes for windows), e.g. "C:\\Emby\\Movies" + # this path for movies + "emby_dir": None, + + # this path for TV shows + "emby_tv_dir": None, + + # Set true to search for matching requests on supported trackers + "search_requests": False, + + # Set true to also try searching predb for scene release + # predb is not consistent, can timeout, but can find some releases not found on SRRDB + "check_predb": False, + + # Set true to prefer torrents with piece size <= 16 MiB when searching for existing torrents in clients + # Does not override MTV preference for small pieces + "prefer_max_16_torrent": False, + + # Set false to disable adding cross-seed suitable torrents found during existing search (dupe) checking + "cross_seeding": True, + # Set true to cross-seed check every valid tracker defined in your config + # regardless of whether the tracker was selected for upload or not (needs cross-seeding above to be True) + "cross_seed_check_everything": False, + }, # these are used for DB links on AR @@ -249,7 +307,7 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: ACM, AITHER, AL, ANT, AR, ASC, BHD, BHDTV, BLU, BT, CBR, DC, DP, FNP, FRIKI, HDB, HDS, HDT, HHD, HUNO, ITT, LCD, LDU, LST, LT, MTV, NBL, OE, OTW, PSS, PT, PTER, PTP, PTT, R4E, RAS, RF, RTF, SAM, SN, STC, THR, TIK, TL, TOCA, UHD, ULCX, UTP, YOINK, YUS + # Available tracker: ACM, AITHER, AL, ANT, AR, ASC, AZ, BHD, BHDTV, BJS, BLU, BT, CBR, CZ, DC, DP, EMUW, FF, FL, FNP, FRIKI, GPW, HDB, HDS, HDT, HHD, HUNO, IHD, IS, ITT, LCD, LDU, LST, LT, MTV, NBL, OE, OTW, PHD, PT, PTER, PTP, PTS, PTT, R4E, RAS, RF, RTF, SAM, SHRI, SN, SP, SPD, STC, THR, TIK, TL, TLZ, TTG, TTR, TVC, ULCX, UTP, YOINK, YUS # Only add the trackers you want to upload to on a regular basis "default_trackers": "", @@ -266,7 +324,6 @@ # "useAPI": False, Set to True if using this tracker for automatic ID searching or description parsing "useAPI": False, "api_key": "", - "announce_url": "https://aither.cc/announce/customannounceurl", "anon": False, # Send uploads to Aither modq for staff approval "modq": False, @@ -275,7 +332,6 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://animelovers.club/announce/customannounceurl", "anon": False, }, "ANT": { @@ -305,7 +361,18 @@ # anon is not an option when uploading to ASC # for ASC to work you need to export cookies from https://cliente.amigos-share.club/ using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/ # cookies need to be in netscape format and need to be in data/cookies/ASC.txt - "announce_url": "https://amigos-share.club/announce.php?passkey=PASSKEY", + "announce_url": "https://amigos-share.club/announce.php?passkey=PASSKEY" + }, + "AZ": { + # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name + "link_dir_name": "", + # for AZ to work you need to export cookies from https://avistaz.to using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/ + # cookies need to be in netscape format and need to be in data/cookies/AZ.txt + "announce_url": "https://tracker.avistaz.to/%3CPASSKEY%3E/announce", + "anon": False, + # If True, the script performs a basic rules compliance check (e.g., codecs, region). + # This does not cover all tracker rules. Set to False to disable. + "check_for_rules": True, }, "BHD": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name @@ -328,13 +395,22 @@ "my_announce_url": "https://trackerr.bit-hdtv.com/passkey/announce", "anon": False, }, + "BJS": { + # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name + "link_dir_name": "", + # for BJS to work you need to export cookies from https://bj-share.info using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/. + # cookies need to be in netscape format and need to be in data/cookies/BJS.txt + "announce_url": "https://tracker.bj-share.info:2053/%3CPASSKEY%3E/announce", + "anon": False, + # Set to False if during an anonymous upload you want your release group to be hidden + "show_group_if_anon": True, + }, "BLU": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", # "useAPI": False, Set to True if using this tracker for automatic ID searching or description parsing "useAPI": False, "api_key": "", - "announce_url": "https://blutopia.cc/announce/customannounceurl", "anon": False, }, "BT": { @@ -348,28 +424,56 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://capybarabr.com/announce/customannounceurl", "anon": False, # Send uploads to CBR modq for staff approval "modq": False, }, + "CZ": { + # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name + "link_dir_name": "", + # for CZ to work you need to export cookies from https://cinemaz.to using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/ + # cookies need to be in netscape format and need to be in data/cookies/CZ.txt + "announce_url": "https://tracker.cinemaz.to/%3CPASSKEY%3E/announce", + "anon": False, + # If True, the script performs a basic rules compliance check (e.g., codecs, region). + # This does not cover all tracker rules. Set to False to disable. + "check_for_rules": True, + }, "DC": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", - "username": "", - "password": "", - "announce_url": "https://digitalcore.club/tracker.php/%3CPASSKEY%3E/announce", + # You can find your api key at Settings -> Security -> API Key -> Generate API Key + "api_key": "", "anon": False, }, "DP": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://darkpeers.org/announce/customannounceurl", "anon": False, # Send uploads to DP modq for staff approval "modq": False, }, + "EMUW": { + # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name + "link_dir_name": "", + "api_key": "", + "anon": False, + # Use Spanish title instead of English title, if available + "use_spanish_title": False, + }, + "FF": { + # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name + "link_dir_name": "", + "username": "", + "password": "", + # You can find your announce URL by downloading any torrent from FunFile, adding it to your client, and then copying the URL from the 'Trackers' tab. + "announce_url": "https://tracker.funfile.org:2711/%3CPASSKEY%3E/announce", + # Set to True if you want to check whether your upload fulfills corresponding requests. This may slightly slow down the upload process. + "check_requests": False, + # Set to True if you want to include the full MediaInfo in your upload description or False to include only the most relevant parts. + "full_mediainfo": False, + }, "FL": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", @@ -382,14 +486,22 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://fearnopeer.com/announce/customannounceurl", "anon": False, }, "FRIKI": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://frikibar.com/announce/%3CPASSKEY%3E", + }, + "GPW": { + "link_dir_name": "", + # You can find your API key in Profile Settings -> Access Settings -> API Key. If there is no API, click "Reset your api key" and Save Profile. + "api_key": "", + # Optionally, you can export cookies from GPW to improve duplicate searches. + # If you do this, you must export cookies from https://greatposterwall.com using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/ + # Cookies must be in Netscape format and must be located in data/cookies/GPW.txt + # You can find your announce URL at https://greatposterwall.com/upload.php + "announce_url": "https://tracker.greatposterwall.com/%3CPASSKEY%3E/announce", }, "HDB": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name @@ -411,24 +523,32 @@ # cookies need to be in netscape format and need to be in data/cookies/HDS.txt "announce_url": "http://hd-space.pw/announce.php?pid=%3CPASSKEY%3E", "anon": False, + # Set to True if you want to include the full MediaInfo in your upload description or False to include only the most relevant parts. + "full_mediainfo": False, }, "HDT": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", - # for HDT to work you need to export cookies from https://hd-torrent.net/ using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/. - # cookies need to be in netscape format and need to be in data/cookies/HDT.txt - "username": "", - "password": "", - "my_announce_url": "https://hdts-announce.ru/announce.php?pid=%3CPASS_KEY/PID%3E", + # For HDT to work, you need to export cookies from the site using: + # https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/ + # Cookies must be in Netscape format and saved in: data/cookies/HDT.txt + # You can change the URL if the main site is down or if you encounter upload issues. + # Keep in mind that changing the URL requires exporting the cookies again from the new domain. + # Alternative domains: + # - https://hd-torrents.org/ + # - https://hd-torrents.net/ + # - https://hd-torrents.me/ + # - https://hdts.ru/ + "url": "https://hd-torrents.me/", "anon": False, - # DO NOT EDIT THIS LINE - "announce_url": "https://hdts-announce.ru/announce.php", + "announce_url": "https://hdts-announce.ru/announce.php?pid=%3CPASS_KEY/PID%3E", + # Set to True if you want to include the full MediaInfo in your upload description or False to include only the most relevant parts. + "full_mediainfo": False, }, "HHD": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://homiehelpdesk.net/announce/customannounceurl", "anon": False, }, "HUNO": { @@ -436,28 +556,38 @@ "link_dir_name": "", "useAPI": False, "api_key": "", - "announce_url": "https://hawke.uno/announce/customannounceurl", + "anon": False, + }, + "IHD": { + # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name + "link_dir_name": "", + "api_key": "", + "anon": False, + }, + "IS": { + # for IS to work you need to export cookies from https://immortalseed.me/ using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/. + # cookies need to be in netscape format and need to be in data/cookies/IS.txt + # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name + "link_dir_name": "", + "announce_url": "https://immortalseed.me/announce.php?passkey=%3CPASSKEY%3E", "anon": False, }, "ITT": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://itatorrents.xyz/announce/customannounceurl", "anon": False, }, "LCD": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://locadora.cc/announce/customannounceurl", "anon": False, }, "LDU": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://theldu.to/announce/customannounceurl", "anon": False, }, "LST": { @@ -466,7 +596,6 @@ # "useAPI": False, Set to True if using this tracker for automatic ID searching or description parsing "useAPI": False, "api_key": "", - "announce_url": "https://lst.gg/announce/customannounceurl", "anon": False, # Send uploads to LST modq for staff approval "modq": False, @@ -477,8 +606,9 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://lat-team.com/announce/customannounceurl", "anon": False, + # Send uploads to LT modq for staff approval + "modq": False, }, "MTV": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name @@ -507,30 +637,31 @@ # "useAPI": False, Set to True if using this tracker for automatic ID searching or description parsing "useAPI": False, "api_key": "", - "announce_url": "https://onlyencodes.cc/announce/customannounceurl", "anon": False, }, "OTW": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://oldtoons.world/announce/customannounceurl", # Send uploads to OTW modq for staff approval "modq": False, "anon": False, }, - "PT": { + "PHD": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", - "api_key": "", - "announce_url": "https://portugas.org/announce/customannounceurl", + # for PHD to work you need to export cookies from https://privatehd.to/ using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/ + # cookies need to be in netscape format and need to be in data/cookies/PHD.txt + "announce_url": "https://tracker.privatehd.to/%3CPASSKEY%3E/announce", "anon": False, + # If True, the script performs a basic rules compliance check (e.g., codecs, region). + # This does not cover all tracker rules. Set to False to disable. + "check_for_rules": True, }, - "PSS": { + "PT": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://privatesilverscreen.cc/announce/customannounceurl", "anon": False, }, "PTER": { # Does not appear to be working at all @@ -555,11 +686,17 @@ "password": "", "announce_url": "", }, + "PTS": { + # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name + "link_dir_name": "", + # for PTS to work you need to export cookies from https://www.ptskit.org using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/. + # cookies need to be in netscape format and need to be in data/cookies/PTS.txt + "announce_url": "https://ptskit.kqbhek.com/announce.php?passkey=%3CPASSKEY%3E", + }, "PTT": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://polishtorrent.top/announce/customannounceurl", "anon": False, }, "R4E": { @@ -573,14 +710,12 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://rastastugan.org/announce/customannounceurl", "anon": False, }, "RF": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://reelflix.xyz/announce/customannounceurl", "anon": False, }, "RTF": { @@ -597,15 +732,15 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://samaritano.cc/announce/%3CPASSKEY%3E", "anon": False, }, "SHRI": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://shareisland.org/announce/customannounceurl", "anon": False, + # Use Italian title instead of English title, if available + "use_italian_title": False, }, "SN": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name @@ -617,19 +752,20 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://seedpool.org/announce/%3CPASSKEY%3E", }, "SPD": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", + # You can create an API key here https://speedapp.io/profile/api-tokens. Required Permission: Upload torrents "api_key": "", - "announce_url": "https://ramjet.speedapp.io/%3CPASSKEY%3E/announce", + # Select the upload channel, if you don't know what this is, leave it empty. + # You can also set this manually using the args -ch or --channel, without '@'. Example: @spd -> '-ch spd'. + "channel": "", }, "STC": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://skipthecommericals.xyz/announce/customannounceurl", "anon": False, }, "THR": { @@ -648,7 +784,6 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://cinematik.net/announce/", "anon": False, }, "TL": { @@ -658,13 +793,19 @@ # If you are not going to use the API, you will need to export cookies from https://www.torrentleech.org/ using https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/. # cookies need to be in netscape format and need to be in data/cookies/TL.txt "api_upload": True, - "announce_key": "TL announce key", + # You can find your passkey at your profile (https://www.torrentleech.org/profile/[YourUserName]/view) -> Torrent Passkey + "passkey": "", + "anon": False, + # Rehost images to the TL image host. Does not work with the API upload method. + # Keep in mind that screenshots are only anonymous if you enable the "Anonymous Gallery Uploads" option in your profile settings. + "img_rehost": True, + # Set to True if you want to include the full MediaInfo in your upload description or False to include only the most relevant parts. + "full_mediainfo": False, }, - "TOCA": { + "TLZ": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://tocashare.biz/announce/customannounceurl", "anon": False, }, "TTG": { @@ -678,18 +819,21 @@ "announce_url": "https://totheglory.im/announce/", "anon": False, }, - "TVC": { + "TTR": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://tvchaosuk.com/announce/%3CPASSKEY%3E", "anon": False, + # Send to modq for staff approval + "modq": False, }, - "UHD": { + "TVC": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", + # 2 is listed as max images in rules. Please do not change unless you have permission + "image_count": 2, "api_key": "", - "announce_url": "https://uhdshare.com/announce/%3CPASSKEY%3E", + "announce_url": "https://tvchaosuk.com/announce/%3CPASSKEY%3E", "anon": False, }, "ULCX": { @@ -698,7 +842,6 @@ # "useAPI": False, Set to True if using this tracker for automatic ID searching or description parsing "useAPI": False, "api_key": "", - "announce_url": "https://upload.cx/announce/customannounceurl", "anon": False, # Send to modq for staff approval "modq": False, @@ -707,21 +850,18 @@ # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://utp/announce/customannounceurl", "anon": False, }, "YOINK": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://yoinked.org/announce/customannounceurl", "anon": False, }, "YUS": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", "api_key": "", - "announce_url": "https://yu-scene.net/announce/customannounceurl", "anon": False, }, "MANUAL": { @@ -739,16 +879,27 @@ # See https://github.com/Audionut/Upload-Assistant/wiki "qbittorrent": { "torrent_client": "qbit", + # qui reverse proxy url, see https://github.com/autobrr/qui#reverse-proxy-for-external-applications + # If using the qui reverse proxy, no other auth type needs to be set + "qui_proxy_url": "", # enable_search to True will automatically try and find a suitable hash to save having to rehash when creating torrents "enable_search": True, "qbit_url": "http://127.0.0.1/", "qbit_port": "8080", "qbit_user": "", "qbit_pass": "", + # List of trackers to activate "super-seed" (or "initial seeding") mode when adding the torrent. + # https://www.bittorrent.org/beps/bep_0016.html + # Super-seed mode is NOT recommended for general use. + # Super-seed mode is only recommended for initial seeding servers where bandwidth management is paramount. + "super_seed_trackers": [""], # Use the UA tracker acronym as a tag in qBitTorrent "use_tracker_as_tag": False, "qbit_tag": "", "qbit_cat": "", + # If using cross seeding, add cross seed tag/category here + "qbit_cross_tag": "", + "qbit_cross_cat": "", "content_layout": "Original", # here you can chose to use either symbolic or hard links, or None to use original path # this will disable any automatic torrent management if set @@ -759,7 +910,7 @@ # when linking error. eg: unsupported file system. "allow_fallback": True, # A folder or list of folders that will contain the linked content - # if using hardlinking, the linked folder must be on the same drive/volume as the original contnt, + # if using hardlinking, the linked folder must be on the same drive/volume as the original content, # with UA mapping the correct location if multiple paths are specified. # Use local paths, remote path mapping will be handled. # only single \ on windows, path will be handled by UA @@ -774,15 +925,38 @@ # Set to False to skip verify certificate for HTTPS connections; for instance, if the connection is using a self-signed certificate. # "VERIFY_WEBUI_CERTIFICATE": True, }, + "qbittorrent_searching": { + # an example of using a qBitTorrent client just for searching, when using another client for injection + "torrent_client": "qbit", + # qui reverse proxy url, see https://github.com/autobrr/qui#reverse-proxy-for-external-applications + # If using the qui reverse proxy, no other auth type needs to be set + "qui_proxy_url": "", + # enable_search to True will automatically try and find a suitable hash to save having to rehash when creating torrents + "enable_search": True, + "qbit_url": "http://127.0.0.1/", + "qbit_port": "8080", + "qbit_user": "", + "qbit_pass": "", + }, "rtorrent": { "torrent_client": "rtorrent", "rtorrent_url": "https://user:password@server.host.tld/username/rutorrent/plugins/httprpc/action.php", # path/to/session folder "torrent_storage_dir": "", "rtorrent_label": "", - # here you can chose to use either symbolic or hard links, or leave uncommented to use original path + # here you can chose to use either symbolic or hard links, or None to use original path + # this will disable any automatic torrent management if set # use either "symlink" or "hardlink" + # on windows, symlinks needs admin privs, both link types need ntfs/refs filesytem (and same drive) "linking": "", + # Allow fallback to inject torrent into qBitTorrent using the original path + # when linking error. eg: unsupported file system. + "allow_fallback": True, + # A folder or list of folders that will contain the linked content + # if using hardlinking, the linked folder must be on the same drive/volume as the original content, + # with UA mapping the correct location if multiple paths are specified. + # Use local paths, remote path mapping will be handled. + # only single \ on windows, path will be handled by UA "linked_folder": [""], # Remote path mapping (docker/etc.) CASE SENSITIVE "local_path": [""], @@ -827,6 +1001,8 @@ "use_discord": False, # Set to True to only run the bot in unattended mode "only_unattended": True, + # Set to True to send the tracker torrent urls + "send_upload_links": True, "discord_bot_token": "", "discord_channel_id": "", "discord_bot_description": "", diff --git a/data/templates/config.py b/data/templates/config.py index 02a58f32d..722f4e92f 100644 --- a/data/templates/config.py +++ b/data/templates/config.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 config = { "DEFAULT": { @@ -38,9 +39,11 @@ # custom zipline url "zipline_url": "", "zipline_api_key": "", + # Seedpool CDN API key + "seedpool_cdn_api": "", # Order of image hosts. primary host as first with others as backup - # Available image hosts: imgbb, ptpimg, imgbox, pixhost, lensdump, ptscreens, oeimg, dalexni, zipline, passtheimage + # Available image hosts: imgbb, ptpimg, imgbox, pixhost, lensdump, ptscreens, oeimg, dalexni, zipline, passtheimage, seedpool_cdn "img_host_1": "imgbb", "img_host_2": "imgbox", diff --git a/data/version.py b/data/version.py index ffc65db50..4b87715f2 100644 --- a/data/version.py +++ b/data/version.py @@ -1,4 +1,848 @@ -__version__ = "v5.2.1" +__version__ = "v6.3.0" + +""" +Release Notes for version v6.3.0 (2025-12-15): + +# +# ## RELEASE NOTES +# - This release was slower than usual. Some notable changes.... +# - Added TLZ, IHD, TTR +# - Added support for multiple client injecting. +# - Improved the image host handling for sites that have banned hosts. +# - Improved the year detection for RTF when uploading TV content. +# - Fixed a bug with DVD mediainfo handling. +# - Fixed audio track handling that could cause missed dual-audio type handling. +# - Fixed image compression when using image overlay. +# - Fixed some mal id handling. +# - Added support for detecting ATMOS height channels. +# - Fixed auto torrent searching to only search valid trackers in config. +# - Added a webui for docker. +# - wastaken7 done a bunch of further refactoring. +# +# ## New config options - see example.py +# - Multiple injecion clients can be added. +# - Removed the need for passkey from SPD and DC. +# - Added modq support for LT. +# +# ## Special shout out +# - blueberry, who is banned from UA github, and from a bunch of sites, due to invite begging/trading, and begging for internal status. It takes a special kind of stupid to modify a handful of lines and claim something as your own. +# +# --- +# +# ## What's Changed +# +# * docker pr building by @Audionut in 4a56b81 +# * build from fork by @Audionut in d10bff9 +# * fix some args parse quirks by @Audionut in fa05ca5 +# * prints behind debug by @Audionut in c1050f2 +# * BT: fix internal (#955) by @wastaken7 in d852726 +# * fix: don't sanitize meta early by @Audionut in 46346cd +# * add new tracker URLs to match_tracker_url function (#957) by @wastaken7 in f45b9af +# * refactor: remove UHD tracker (#958) by @wastaken7 in 339ba64 +# * feat: add support for super-seed mode (#956) by @wastaken7 in 0db8ed8 +# * PTP: upload error handling by @Audionut in 2c04770 +# * handle domain changes in unit3d descriptions by @Audionut in fb12db1 +# * TIK: videoformat update by @Audionut in 569740f +# * image host validation updates by @Audionut in 3ab8bc5 +# * refactor(ASC): move internal flag setting to a separate method (#959) by @wastaken7 in 058b4cb +# * fix uploaded images success check by @Audionut in 089f127 +# * feat: Docker GUI for front end. (#954) by @IPGPrometheus in ca87495 +# * AR: Improve genre tag compilation logic (#965) by @cza in 6271ded +# * refactor(SPD): remove passkey usage, update upload logic, add banned groups API (#963) by @wastaken7 in b019965 +# * HDB - Refine granulometry of thumbnail size in comps (#968) by @GizmoBal in c342712 +# * webui example args by @Audionut in 93330c2 +# * UHD: Remove from available trackers list in example-config.py (#970) by @FortKnox1337 in 03c2fda +# * Add Torrenteros support (#971) by @wastaken7 in 170a43a +# * TVDB/TVMaze improvements (#978) by @Audionut in bdd164f +# * README: Add CinemaZ to the list of supported sites. (#974) by @FortKnox1337 in 1718e3c +# * refactor(DC): remove passkey handling, update API endpoint usage, and improve upload logic (#976) by @wastaken7 in 7a8cb9c +# * reverse linux bdinfo checking by @Audionut in 10eca14 +# * unattended no imdb by @Audionut in 5224894 +# * fix imdb search params by @Audionut in e51421a +# * SHRI: BBCode description generation (#980) by @TheDarkMan in 80fe158 +# * feat(SHRI): add customization and conditional sections (#983) by @TheDarkMan in 7c9ced7 +# * Scan type detection for empty ScanType value (#982) by @TheDarkMan in ed49e65 +# * Minor THR improvements (#987) by @cucaracha7126378 in 13ba722 +# * fix(SHRI): handle MediaInfo Language dict and improve logo extraction (#988) by @TheDarkMan in 27d2143 +# * Improve qui searching (#967) by @Audionut in 7eb838e +# * validate specific trackers by @Audionut in a4b5af3 +# * improve existing torrent search by @Audionut in 5598e24 +# * RTF: fix year check by @Audionut in 1f729ae +# * BLU: Update banned release groups list (#990) by @FortKnox1337 in a5c6ecf +# * fix(THR): replace unsupported bbcode tags and fix NFO content alignment (#993) by @cucaracha7126378 in 06c7e86 +# * fix(SHRI): improve REMUX detection and codec identification (#992) by @TheDarkMan in 7f45b58 +# * print error when no audio by @Audionut in c169279 +# * ANT: prohibit bloated by @Audionut in d3c5528 +# * handle height channels (#985) by @Audionut in df76e53 +# * fix(CZ): change BDinfo summary file (#996) by @wastaken7 in df4a4cb +# * ASC: fix localized data (#994) by @Audionut in f2a6513 +# * catch bdinfo errors by @Audionut in 614bbc0 +# * Fix empty status_message after upload failure (#1000) by @wastaken7 in 1120f4a +# * fix(LT.py): fix origin_country check, modQ option added (#1002) by @Caleb Contreras in 752072d +# * fix getting BTN ids by @Audionut in d244af5 +# * parse scene nfo for service by @Audionut in 751253c +# * PHD: Add banned release groups (#1004) by @FortKnox1337 in b22a07e +# * fix group checking for banned groups by @Audionut in 7060e34 +# * fix double screens print by @Audionut in fe5ee82 +# * refactor infohash by @Audionut in d758027 +# * fix(FF): follow the site's naming conventions (#1006) by @wastaken7 in 1e6b27a +# * RTV: refactor tv year handling by @Audionut in a325fb3 +# * cleanup bluray com handling by @Audionut in da2fff2 +# * ULCX: refactor aka handling by @Audionut in 9b38b2d +# * fixed mal handling by @Audionut in e7e4e48 +# * refactor(DC): remove logo and screenshot header from description (#1018) by @wastaken7 in dc6f057 +# * fix(SHRI): improve language handling for Italian and English (#1013) by @TheDarkMan in f5d4a42 +# * add site uploading from site check log (#995) by @Audionut in 4f166d4 +# * always attempt mal by @Audionut in 3915ed9 +# * Add IHD support by @Audionut in a4d1df4 +# * licensing (#1022) by @Audionut in e2d2531 +# * IHD: fix language checking by @Audionut in a259e3d +# * add extra tags catch by @Audionut in 458f48f +# * unit3d internal flag debugging by @Audionut in a08b887 +# * Revert "unit3d internal flag debugging" by @Audionut in 23e1b08 +# * fix: screens compression when image overlay by @Audionut in 66dcc2b +# * Add further support for IHD tracker (#1027) by @FortKnox1337 in f0a13f4 +# * Refactor UNIT3D description handling (#1007) by @wastaken7 in 9949c1e +# * description fixes by @Audionut in 1d13584 +# * mps support 1 MiB by @Audionut in 8d328b5 +# * fix audio track counting by @Audionut in ce4529a +# * allow override scene imdb by @Audionut in f988749 +# * fix search_imdb by @Audionut in bfbc68f +# * cleanup HDB description handling by @Audionut in a9b20a5 +# * RTF: get all the years by @Audionut in 37836f7 +# * fix(SHRI) ignores akas titles with "attributes" (alternative titles) (#1032) by @braingremlin in 0823f78 +# * TL: fix unbound variable 'torrent_id' (#1034) by @wastaken7 in a533425 +# * check image hosts before async upload (#1033) by @Audionut in 4fc63d8 +# * refactor(DC): streamline torrent naming convention and remove duplicate title check (#1036) by @wastaken7 in ebb0d78 +# * Fix list formatting on different websites (#1038) by @wastaken7 in 3295781 +# * add support for multi-client injection (#1037) by @wastaken7 in 02865c7 +# * BHD: adjust size bbcode for new site fix by @Audionut in 5c69c2b +# * Update eMuwarez base url (#1040) by @Jesús José Maldonado Arroyo in bf6ecaa +# * refactor language checks to use common method across trackers (#1030) by @wastaken7 in 66d7a05 +# * rehosting: Use listdir instead of glob for PNG files (#1035) by @WOSSFOSS in e382ed9 +# * fix qui searching by @Audionut in d6757cc +# * filter empty injection clients by @Audionut in 9f1f616 +# * Update LT.py (#1044) by @Hielito in a4ecf7e +# * fix: remove space before 'AKA' in anime title formatting (#1042) by @wastaken7 in dc3397d +# * fix qui url by @Audionut in 4e7c4b2 +# * ANT: all type ids by @Audionut in a2471d0 +# * remove debugger by @Audionut in a237d6a +# * Add language-data optional to langcodes (#1046) by @WOSSFOSS in 2d82858 +# * BHD: exclude 2160p by default when framestor and hdr release by @Audionut in 2066f68 +# * dupe checking: fix remux check by @Audionut in f47307e +# * unit3d: fix existing search when edition by @Audionut in 70cc612 +# * fix DVD mediainfo use ifo by @Audionut in a795bc8 +# * fix bdinfo progress output by @Audionut in 55ca8cf +# * Add TLZ support (#1048) by @FortKnox1337 in a98eb50 +# * ACM: fix multi-disc bdinfo by @Audionut in 25647d4 +# * TL: fix NFO upload with cookie method (#1049) by @wastaken7 in 9faaf47 +# * Refactor TVC tracker with enhanced metadata handling and description generation (#1025) by @Lusephur in e2ba3a7 +# * 6.3 release notes by @Audionut in 65f2f55 +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v6.2.3...v6.3.0 +""" + +__version__ = "v6.2.3" + +""" +Release Notes for version v6.2.3 (2025-10-27): + +# ## What's Changed +# +# * set torrent_properties by @Audionut in 96d41b8 +# * discparse.py - Use BDInfo on PATH if available (Linux) (#952) by @noobiangodd in bd52996 +# * tvdb improvements by @Audionut in 5891db5 +# * BT: add internal upload support (#953) by @wastaken7 in daeadbe +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v6.2.2...v6.2.3 +""" + + +""" +Release Notes for version v6.2.2 (2025-10-26): + +# ## What's Changed +# +# * fix tvdb pagination by @Audionut in 1c4f9b6 +# * S/E overrides by @Audionut in 84787af +# * tvdb safe settings by @Audionut in d4de654 +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v6.2.1...v6.2.2 +""" + + +""" +Release Notes for version v6.2.1 (2025-10-26): + +# ## What's Changed +# +# * fix: prevent error with missing config by @Audionut in 1398458 +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v6.2.0...v6.2.1 +""" + + +""" +Release Notes for version v6.2.0 (2025-10-26): + +# +# ## RELEASE NOTES +# - New site support - ImmortalSeed, Emuwarez. +# - New modules required, update with requirements.txt. +# - Linux specific mediainfo binaries for DVD support. Uninstall existing 'pymediainfo' before running requirements.txt. +# - Removed oxipng support, using ffmpeg based compression instead. +# - TVDB for all. +# - Refactored cookie/site validation processing, to speed processing. +# - New feature, site checking. Use as 'python3 upload.py path_to_movie_folder --queue a_queue_name -sc -ua'. Append trackers as needed. You can also append '-req' (or config option). This will find all matching content from the input directory, that can be uploaded to each tracker (and list any request). Log files for each tracker will be created in the UA tmp directory. +# - Alternatively, you can remove '-sc' from the above example, and let UA just upload content from the input directory instead of logging. You may wish to append '-lq' with a numeric value to limit the amount of successful uploads processed. +# +# ## New config options - see example.py +# - Multiple client searching for existing torrent files. +# - Specific injection client. +# - ffmpeg based compression option. +# +# --- +# +# ## What's Changed +# +# * Add banned groups to CBR tracker: DragsterPS, DRENAN, S74Ll10n (#885) by @franzopl in 5c7db2b +# * linux specific mi binaries (#886) by @Audionut in f838eff +# * force mediainfo by @Audionut in 01fc6c9 +# * fix: args key (#889) by @GizmoBal in 1a94f57 +# * catch exceptions by @Audionut in 1c82a9e +# * unit3d description handling update by @Audionut in b292887 +# * add prompt for ANT ids by @Audionut in c60328e +# * Refactor SHRI (#888) by @TheDarkMan in a129a0e +# * feat(SHRI): improve audio string cleaning in SHRI tracker (#893) by @TheDarkMan in 968d575 +# * OE-OTW rules compliance by @Audionut in ffb3424 +# * fixing imgur issue with TVC and making some improvements (#894) by @swannie-eire in 5b7ebf4 +# * Fix race condition in get_mediainfo_section by removing unnecessary asyncio usage (#895) by @wastaken7 in abdfee7 +# * SHRI: handle bitrate conversion errors in audio track processing (#896) by @TheDarkMan in b783158 +# * feat: add site checking by @Audionut in 83718c0 +# * feat: injection client by @Audionut in 68ca252 +# * wrap child process kill by @Audionut in 28a99f2 +# * PTP: fix missing import by @Audionut in 289e9b4 +# * SHRI: improve encoding detection (#902) by @TheDarkMan in 73deae4 +# * ITT: add naming conventions and request research (#900) by @wastaken7 in 2dedf50 +# * add EMUW tracker support (#898) by @Kaiser in 98ec8ec +# * fix(ITT): missing mapping_only (#903) by @wastaken7 in 3884b89 +# * always regenerate mi by @Audionut in 04c70a8 +# * SHRI: handle list sources (#905) by @TheDarkMan in 0014260 +# * distributor from edition only when is_disc by @Audionut in 6283beb +# * UNIT3D: catch upload permission & incorrect API key (#904) by @wastaken7 in 2b27633 +# * Add Nebula streaming service (#906) by @WOSSFOSS in a2cd15d +# * rules compliance updates by @Audionut in aafc3b0 +# * update -sc handling to work as only a tracker search by @Audionut in 22e912d +# * DP: enable request search (#912) by @wastaken7 in b897ff2 +# * better -sc handling by @Audionut in 269d810 +# * YUS: disabled request searching by @Audionut in bad14bc +# * remove print by @Audionut in 1c0c03c +# * log requests by @Audionut in 27ee1d5 +# * fix logging only sucessfull trackers by @Audionut in c1f04c3 +# * site_searching: save aither trumpables by @Audionut in d6e487a +# * site_searching: always request search by @Audionut in d6293c9 +# * fix(SHRI): normalize Blu-ray to BluRay for non-DISC types (#914) by @TheDarkMan in 1faa0a6 +# * AITHER: add request support by @Audionut in 3941841 +# * site_check: cleanup queue printing by @Audionut in 9b82a6c +# * fix(SHRI): correct WEB-DL vs WEBRip detection logic (#916) by @TheDarkMan in c05bbc9 +# * feat: cache qbit login (#918) by @WOSSFOSS in 25755f5 +# * banned groups update on CBR.py (#920) by @franzopl in 63c3f67 +# * fix(SHRI): improve release group tag extraction (#921) by @TheDarkMan in e37c36e +# * blu, remove webdv by @Audionut in b1888e6 +# * HHD: no dvdrip by @Audionut in 517247e +# * BJS, ASC: add missing internal group detection (#923) by @wastaken7 in d516502 +# * fix(SHRI): detect GPU encodes via empty BluRay metadata (#924) by @TheDarkMan in 6dffda3 +# * ANT: adult screens by @Audionut in c039dce +# * OTW: naming fixes by @Audionut in 8c4c75d +# * use combined genre check by @Audionut in 6211f21 +# * LT: enhance category detection and add Spanish language checks (#925) by @wastaken7 in e954f12 +# * ULCX: fail safe with adult screens by @Audionut in fb1c61f +# * BT: add scene flag (#927) by @wastaken7 in a2e6527 +# * HUNO: correct HFR placement by @Audionut in cbb3bef +# * ANT: flagchange adult screens by @Audionut in d30290b +# * ANT: useragent by @Audionut in 43a3795 +# * SHRI: improve type detection for DV profile encodes (#929) by @TheDarkMan in f83f0b8 +# * Slice upload of comparison screenshots on HDB. (#930) by @GizmoBal in 58d0793 +# * BHD: remove screensperrow handling by @Audionut in b544769 +# * HUNO: replace dubbed by @Audionut in 58be987 +# * BLU: fix webdv name replacement by @Audionut in 4895d64 +# * TL: Fix wrong syntax (#932) by @WOSSFOSS in 3f54319 +# * TL: fix unbound error in torrent edit (#933) by @WOSSFOSS in c4685da +# * RF: domain change by @Audionut in e62fd96 +# * adult content handling by @Audionut in 04d8c4a +# * better matching against adult content by @Audionut in 373bbb4 +# * fix(SHRI): improve hybrid detection logic in SHRI tracker (#937) by @TheDarkMan in a6b8c5f +# * Center ordinary screens on HDB (#936) by @GizmoBal in df0b521 +# * refactor: centralize cookie validation and upload logic (#883) by @wastaken7 in 76daec4 +# * fix setting BHD id's by @Audionut in 131849b +# * ASC: Fix anime related issues (#939) by @wastaken7 in fb57fb3 +# * CZ: add client matching (#945) by @FortKnox1337 in 06339ae +# * Added support for ImmortalSeed (#942) by @wastaken7 in 644e0ad +# * raise exceptions by @Audionut in 81619c4 +# * Use ffmpeg compression instead of oxipng (#946) by @Audionut in 79130f9 +# * refactor tvdb (#941) by @Audionut in f0b70dc +# * feat: multiple searching client support (#913) by @Audionut in 06c04a3 +# * release notes by @Audionut in dad66b6 +# * patch qui torrent comments by @Audionut in 0892720 +# * HUNO text size by @Audionut in eac97c6 +# * HUNO: screens per row fix by @Audionut in 624a825 +# * fix args parsing by @Audionut in a21cebc +# * fix missing key set by @Audionut in ed0f1c4 +# * fix(SHRI): web and remux handling (#947) by @TheDarkMan in 194e4ab +# * unit3d follow redirects by @Audionut in 8ef665c +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v6.1.1...v6.2.0 +""" + + +""" +Release Notes for version v6.1.1 (2025-10-11): + +# ## What's Changed +# +# * fix(BJS): NoneType error (#880) by @wastaken7 in 88e7f40 +# * fix: ASC IMDb link, signatures (#884) by @wastaken7 in f555acf +# * fix: anime tagging by @Audionut in 1ebb00c +# * fix: skip checking AV1 encode settings by @Audionut in 4741097 +# * tvmaze episode data use meta objects by @Audionut in d805d85 +# * tvmaze - rely on meta object for additional check by @Audionut in f82babf +# * set meta object by @Audionut in 968cee7 +# * unit3d bbcode parser, white space handling by @Audionut in 55f636c +# * BHD fix empty returns by @Audionut in 63d49d1 +# * Revert "unit3d bbcode parser, white space handling" by @Audionut in b22b12d +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v6.1.0...v6.1.1 +""" + + +""" +Release Notes for version v6.1.0 (2025-10-10): + +# +# ## RELEASE NOTES +# - Some large refactoring of description type handling for some sites, to speed the upload process. +# - The actual ffmpeg process now respects process_limit set via config.py. +# - The author has seen some issues with latest ffmpeg versions. August based releases work fine here. +# +# ## New config options - see example.py +# - prefer_max_16_torrent which will choose an 16 MiB torrent or lower when finding a suitable existing torrent file to use. +# - full_mediainfo in some tracker sections, to choose whether to use the full mediainfo or not. +# +# --- +# +# ## What's Changed +# +# * fix(BLU): derived handling not needed any longer by @Audionut in ca2f507 +# * fix: frame overlay by @Audionut in b40094f +# * remove tags from arrs by @Audionut in 9c90277 +# * BLU: fix double aka by @Audionut in d93805c +# * allow debug without apikey by @Audionut in bd27a55 +# * BLU : rule compliance by @Audionut in 9d8a2b8 +# * BLU: correct group capitalization by @Audionut in 1a020d2 +# * PTP: fix getting groupID when multiple search results by @Audionut in e39b95c +# * fx: sticky id args through functions by @Audionut in 7a018b1 +# * fix: custom link dir name by @Audionut in bbcd94f +# * Update to ULCX banned groups (#858) by @Zips-sipZ in ebef94b +# * fix tracker search return by @Audionut in edb10d0 +# * unattended skip by @Audionut in 4dfab79 +# * ACM fix description by @Audionut in e9f1627 +# * fix: don't guessit tags from files by @Audionut in 3c27387 +# * catch arr type file renames by @Audionut in 98ac5c6 +# * refactor bdmv MI handling (#853) by @Audionut in 966158d +# * fix: site based language handling by @Audionut in 3592157 +# * fix(AVISTAZ): torrent naming conventions, media code search, tokens (#862) by @wastaken7 in 46c8ee0 +# * fix(AVISTAZ): ensure year is converted to string when modifying upload name (#863) by @wastaken7 in 92731d2 +# * ULCX: aka is aka except when it's not aka because other aka is aka by @Audionut in aecb72e +# * AL: return empty string for mal_rating (#866) by @WOSSFOSS in 087d7a1 +# * feat: add region and distributor information to get_confirmation (#868) by @wastaken7 in b2701a5 +# * Update banned groups in DP.py (#870) by @emb3r in 0502d68 +# * Both [code] and [quote] should coexist in PTP descriptions (#869) by @GizmoBal in df22996 +# * fix: IMDb returns title as aka by @Audionut in 417c932 +# * BLU IMDb naming by @Audionut in f519e23 +# * fix warmup config by @Audionut in d15a7de +# * wrap capture task in semaphore by @Audionut in 3a4c5f7 +# * some tracker specific in torrent creation by @Audionut in 91fe360 +# * fix: piece size preference in auto torrent and add 16 MiB option by @Audionut in 9a6ce33 +# * TVC: restrict image hosts by @Audionut in 838fc4b +# * fix torrent validation logic by @Audionut in 3de9fa6 +# * remove pointless print by @Audionut in 240f828 +# * fix(BJS): remove unnecessary raise_for_status call in response handling (#873) by @wastaken7 in 76431f9 +# * validate encode settings (#871) by @Audionut in 2f804c6 +# * HUNO: fix internal state by @Audionut in 5d2e8da +# * feature: add configurable disc requirements per tracker (#878) by @TheDarkMan in d1b18bf +# * feat(SAM): add name processing and add additional checks for Portuguese audio/subtitle tracks (#879) by @wastaken7 in 1f2c41a +# * Simplify tracker specific torrent recreation by @Audionut in 65b62d7 +# * fix: region when None by @Audionut in e52b4ff +# * print tracker name changes by @Audionut in eefa20c +# * Enhance metadata handling, description building, and refactor tracker integrations (#860) by @wastaken7 in ee1885b +# * fix versioning by @Audionut in 00eae1c +# * release notes by @Audionut in 1e59d0d +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v6.0.1...v6.1.0 +""" + + +""" +Release Notes for version v6.0.1 (2025-10-04): + +# ## What's Changed +# +# * fix version file by @Audionut in c8ccf5a +# * erroneous v in version file by @Audionut in 5428927 +# * Fix YUS get_type_id (#850) by @oxidize9779 in 25591e0 +# * fix: LCD and UNIT3D upload (#852) by @wastaken7 in f5d11b8 +# * Update banned release groups of various trackers (#848) by @flowerey in 9311996 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v6.0.0...v6.0.1 +""" + + +""" +Changelog for version v6.0.0 (2025-10-03): + +# ## RELEASE NOTES +# - Immense thanks to @wastaken7 for refactoring the unit3d based tracker code. A huge QOL improvement that removed thousands of lines of code. +# - To signify the continued contributions by @wastaken7, this project is now know simply as "Upload Assistant". +# - New package added, run requirements.txt +# - This release contains lengthy refactoring of many code aspects. Many users, with thanks, have been testing the changes and giving feedback. +# - The version bump to v6.0.0 signifies the large code changes, and you should follow an update process suitable for yourself with a major version bump. + +## New config options - see example.py +# - FFMPEG related options that may assist those having issues with screenshots. +# - AvistaZ based sites have new options in their site sections. +# - "use_italian_title" inside SHRI config, for using Italian titles where available +# - Some HDT related config options were updated/changed +# - "check_predb" for also checking predb for scene status +# - "get_bluray_info" updated to also include getting DVD data +# - "qui_proxy_url" inside qbittorrent client config, for supporting qui reverse proxy url + +# ## WHAT'S NEW - some from last release +# - New arg -sort, used for sorting filelist, to ensure UA can run with some anime folders that have allowed smaller files. +# - New arg -rtk, which can be used to process a run, removing specific trackers from your default trackers list, and processing with the remaining trackers in your default list. +# - A significant chunk of the actual upload process has been correctly asynced. Some specific site files still need to be updated and will slow the process. +# - More UNIT3D based trackers have been updated with request searching support. +# - Added support for sending applicable edition to LST api edition endpoint. +# - NoGrp type tags are not removed by default. Use "--no-tag" if desired, and/or report trackers as needed. + +# ## What's Changed +# +# * fix(GPW) - do not print empty descriptions (#805) by @wastaken7 in 07e8334 +# * SHRI - Check group tag and Italian title handling (#803) by @Tiberio in 054ce4f +# * fix(HDS) - use [pre] for mediainfo to correctly use monospaced fonts (#810) by @wastaken7 in aa62941 +# * fix(BT) - status code, post data, torrent id (#808) by @wastaken7 in 5ff6249 +# * feat(UNIT3D) - refactor UNIT3D websites to reuse common code base (#801) by @wastaken7 in 03c8ffd +# * ANT - fix trying to call lower on dict by @Audionut in 9772b0a +# * SHRI - Remove 'Dubbed', add [SUBS] tag (#815) by @Tiberio in 788be1c +# * graceful exit by @Audionut in ddbd135 +# * updated unit3d trackers - request support by @Audionut in a680692 +# * release notes by @Audionut in 49efdca +# * Update FNP resolution id (#818) by @oxidize9779 in 48fa975 +# * refactor(HDT) (#821) by @wastaken7 in 2365937 +# * more async (#819) by @Audionut in b7aea98 +# * print in debug by @Audionut in 9b68819 +# * set screens from manual frames by @Audionut in b9ef753 +# * more debugging by @Audionut in 5ad4fce +# * more debugging by @Audionut in 7902066 +# * Refine dual-audio detection for zxx (#822) by @GizmoBal in ab27990 +# * fix extended bluray parsing by @Audionut in cae1c38 +# * feat: Improve duplicate search functionality (#820) by @wastaken7 in 3b59c03 +# * remove dupe requirement by @Audionut in 5ebdc86 +# * disable filename match by @Audionut in 63adf3c +# * fix unit3d flags by @Audionut in 3555d12 +# * exact filename fix by @Audionut in 69aa3fa +# * Improve NFO downloading robustness (#827) by @noobiangodd in 09bc878 +# * PTP redact token by @Audionut in eec5d60 +# * enable predb by @Audionut in a073247 +# * qbit retries and async calls by @Audionut in 9146011 +# * add sleeps to pack processing by @Audionut in ed7eda9 +# * add DOCPLAY by @Audionut in aa97763 +# * fix unit3d flags api by @Audionut in 506ea47 +# * LST edition ids by @Audionut in df7769a +# * more parsers to lxml by @Audionut in e62e819 +# * fix pack image creation by @Audionut in 220c5f2 +# * fix request type checking by @Audionut in a06c1dd +# * Fix crash when no edit args provided (handle no/empty input safely) (#826) by @ca1m985 in 4cbebc4 +# * catch keyboard interruptions in cli_ui by @Audionut in ca76801 +# * don't remove nogrp type tags by default by @Audionut in 25b5f09 +# * AZ network fixes by @Audionut in 50595c2 +# * fix: only print overlay info if relevant by @Audionut in 4e6a5ce +# * add(meta): video container (#831) by @wastaken7 in c55094a +# * fix: frame overlay check tracker list check by @Audionut in 6d7fa3c +# * fix use_libplacebo false by @Audionut in d1044c9 +# * fix: improve container detection for different disc types (#835) by @wastaken7 in 073126c +# * set safe debugging languages by @Audionut in bfe964a +# * print automated ffmpeg tonemap checking failure by @Audionut in bebe17c +# * fix: don't overwrite ids from mediainfo by @Audionut in f3fa16c +# * HDT - auth token availability (#839) by @Audionut in 57af870 +# * Add support for bluray.com scraping for DVDs (#828) by @9Oc in 6274db1 +# * Update config-generator.py (#846) by @AzureBelmont in 070062c +# * fix(ANT): add type and audioformat to post data (#845) by @wastaken7 in 3424794 +# * refactor: replace UploadException with tracker_status handling, where applicable (#840) by @wastaken7 in 502e40d +# * cleanup handling for android by @Audionut in 1702d3d +# * add support for qui reverse proxy (#833) by @Audionut in 9a9b3c4 +# * improvement: avoid re-executing validate_credentials by temporarily saving tokens in meta (#834) by @wastaken7 in ff99d08 +# * release notes by @Audionut in a924df4 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.4.3...v6.0.0 +""" + +__version__ = "5.4.3" + +""" +Release Notes for version v5.4.3 (2025-09-19): + +# ## What's Changed +# +# * category regex tweak by @Audionut in f7c02d1 +# * Fix HUNO UHD remux (#767) by @oxidize9779 in 1bb0ae8 +# * Update to banned groups ULCX.py (#770) by @Zips-sipZ in dd0fdd9 +# * fix(HDT): update base URL (#766) by @wastaken7 in bb16dc3 +# * fix(BJS): Remove Ultrawide tag detection from remaster tags (#768) by @wastaken7 in 99e1788 +# * Added support for AvistaZ (#769) by @wastaken7 in 5bdf3cd +# * TL - api upload update by @Audionut in 341248a +# * add tonemapping header to more sites by @Audionut in 307ba71 +# * fix existing tonemapped status by @Audionut in 4950b08 +# * HDB - fix additional space in name when atmos by @Audionut in 8733c65 +# * fix bad space by @Audionut in 9165411 +# * set df encoding by @Audionut in 323a365 +# * TL api tweaks by @Audionut in 9fbde8f +# * TL - fix search existing option when api by @Audionut in 534ece7 +# * TL - add debugging by @Audionut in ab37785 +# * fix bad copy/paste by @Audionut in 6d25afd +# * TL - login update by @Audionut in 677cee8 +# * git username mapping by @Audionut in 60ed690 +# * FNP - remove a group for banned release groups (#775) by @flowerey in ab4f79a +# * Added support for CinemaZ, refactor Z sites to reuse common codebase (#777) by @wastaken7 in f14066f +# * Update titles of remux for HDB (#778) by @GizmoBal in b9473cb +# * Added support for GreatPosterWall (#779) by @wastaken7 in 4dc1b65 +# * SHRI - language handling in name by @Audionut in 5ee449f +# * fix(GPW) - timeout, screenshots, check available slots (#789) by @wastaken7 in 5862df4 +# * fix(AvistaZ sites) - languages, resolution, naming, rules (#782) by @wastaken7 in 10bf73f +# * add argument trackers remove by @Audionut in 1b0c549 +# * add(region.py) - Kocowa+ (#790) by @wastaken7 in da0b39a +# * fix(CBR.py) - UnboundLocalError when uploading a full disc (#791) by @wastaken7 in dbe3964 +# * Fix HUNO bit rate detection (#792) by @oxidize9779 in da1b891 +# * SHRI - remove dual audio by @Audionut in 5f94385 +# * add argument -sort (#796) by @Audionut in 0d0f1a4 +# * add config options for ffmpeg (#798) by @Audionut in 0dc4275 +# * add venv to .gitignore (#797) by @Tiberio in 5edfbeb +# * strip multiple spaces from bdinfo (#786) by @Audionut in 38a09aa +# * fix SHRI dual audio brain fart by @Audionut in 8623b18 +# * BHD - request search support (#773) by @Audionut in f0f5685 +# * can't spell by @Audionut in 159fc0f +# * update DP ban list (#800) by @emb3r in 42dd363 +# * fix(Avistaz) - add XviD/DivX to meta (#793) by @wastaken7 in a797844 +# * Remove TOCASHARE from supported sites (#802) by @wastaken7 in cf25142 +# * conform to GPW description image rules (#804) by @GuillaumedeVolpiano in 24c625e +# * add(get_name.py) - year for DVD's, audio for DVDRip's (#799) by @wastaken7 in adfb263 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.4.2...v5.4.3 +""" + + +""" +Release Notes for version v5.4.2 (2025-09-03): + +# ## What's Changed +# +# * enhance(PHD): add search requests option, tags and other changes (#749) by @wastaken7 in 1c970ce +# * enhance(BT): use tmdb cache file and other changes (#750) by @wastaken7 in a793060 +# * enhance(HDS): add search requests option and other changes (#751) by @wastaken7 in b0f88e3 +# * python does python things by @Audionut in 057d2be +# * FNP - fix banned groups (#753) by @flower in 54c5c32 +# * more python quoting fixes by @Audionut in d8a6779 +# * MOAR quotes by @Audionut in 7a62585 +# * chore: fix incompatible f-strings with python 3.9 (#754) by @wastaken7 in 9a8f190 +# * fix(HUNO) - add multi audio, UHD BluRay naming (#756) by @wastaken7 in 5b41f4d +# * fix default tracker list through edit process by @Audionut in 354e9c1 +# * move sanatize meta definition by @Audionut in 9d2991b +# * catch mkbrr config error by @Audionut in 34e05f9 +# * Added HDT (HD-Torrents) to client.py to allow tracker removal (#760) by @FortKnox1337 in 6c5bbc5 +# * fix(PHD): add BD resolution, basic description, remove aka from title (#761) by @wastaken7 in 8459a45 +# * fix(DC): Resize images in description generation (#762) by @wastaken7 in 41d7173 +# * add(client.py): skip more trackers (#763) by @wastaken7 in 61dfd4a +# * HUNO - unit3d torrent download by @Audionut in 637a145 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.4.1...v5.4.2 +""" + + +""" +Release Notes for version v5.4.1 (2025-09-02): + +# ## What's Changed +# +# * fix missing trackers for language processing (#747) by @wastaken7 in 34d0b4b +# * add missing function to common by @Audionut in 33d5aec +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.4.0...v5.4.1 +""" + + +""" +Release Notes for version v5.4.0 (2025-09-02): + +# +# ## RELEASE NOTES +# - Blutopia has a peer scraping issue that resulted in UNIT3D codebase being updated, requiring torrent files to be created site side. See https://github.com/HDInnovations/UNIT3D/pull/4910 +# - With the infohash being randomized site side, UA can no longer create valid torrent files for client injection, and instead the torrent file needs to be downloaded for client injection. +# - All UNIT3D based sites have been updated to prevent any issues moving forward as other sites update their UNIT3D codebase. +# - This will cause small slowdown in the upload process, as each torrent file is downloaded from corresponding sites. +# - Announce URLS for the supported sites are no longer needed in config, check example-config.py for the removed announce urls. +# +# ## WHAT'S NEW +# - UA can now search for related requests for the uploaded content, allowing you to quickly and easily see which requests can be filled by your upload. +# - Request checking via config option (see example-config) or new arg (see --help) +# - Only ASC, BJS and ULCX supported currently +# - Added a new arg to skip auto torrent searching +# +# --- +# +# ## What's Changed +# +# * Added support for PTSKIT (#730) by @wastaken7 in 19ccbe5 +# * add missing site details (#731) by @wastaken7 in e96cd15 +# * LCD - fix region, mediainfo, naming (#732) by @wastaken7 in de38dba +# * SPD - fix and changes (#727) by @wastaken7 in 16d310c +# * BLU - update torrent injection (#736) by @wastaken7 in a2d14af +# * Fix BHD tracker matching (#740) by @backstab5983 in 80b4337 +# * fix(SPD): send description to BBCode-compatible field (#738) by @wastaken7 in 95e5ab7 +# * Update HDB.py to clean size bbcode (#734) by @9Oc in 8d15765 +# * Update existing client-tracker search to add 3 more trackers (#728) by @FortKnox1337 in 3dcbb7c +# * correct screens track mapping and timeout by @Audionut in c9d5466 +# * skip auto torrent as arg by @Audionut in b78bb0a +# * fix queue handling when all trackers already in client by @Audionut in aae803f +# * skip pathed torrents when edit mode by @Audionut in eafb38c +# * preserve sat true by @Audionut in ffaddd4 +# * ULCX - remove hybrid from name by @Audionut in 1f02274 +# * fix existing torrent search when not storage directory and not qbit by @Audionut in 85e653f +# * DP - no group tagging by @Audionut in f4e236d +# * HDB - music category by @Audionut in 6a12335 +# * Option - search tracker requests (#718) by @Audionut in 2afce5b +# * add tracker list debug by @Audionut in 5418f05 +# * enhance(ASC): add localized TMDB data and search requests option (#743) by @wastaken7 in e2a3963 +# * refactor unit3d torrent handling (#741) by @Audionut in 56b3b14 +# * enhance(DC): httpx, MediaInfo for BDs, and upload split (#744) by @wastaken7 in de98c6e +# * PT- ensure audio_pt and legenda_pt flags only apply to European Portuguese (#725) by @Thiago in f238fc9 +# * fix TAoE banned group checking by @Audionut in 1e8633c +# * enhance(BJS): add localized TMDB data and search requests option (#746) by @wastaken7 in e862496 +# * redact passkeys from debug prints by @Audionut in 89809bb +# * clarify request usage by @Audionut in 5afafc0 +# * BJS also does request searching by @Audionut in d87f060 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.3.6...v5.4.0 +""" + + +""" +Release Notes for version v5.3.6 (2025-08-22): + +# ## What's Changed +# +# * fix docker mkbrr version by @Audionut in 69a1384 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.3.5...v5.3.6 +""" + + +""" +Release Notes for version v5.3.5 (2025-08-22): + +# ## What's Changed +# +# * TL - cleanup torrent file handling (#714) by @wastaken7 in 011d588 +# * ANT tag reminder by @Audionut in fbb8c2f +# * Added support for FunFile (#717) by @wastaken7 in 6436d34 +# * ULCX - aka check by @Audionut in 3b30132 +# * ANT - manual commentary flag (#720) by @wastaken7 in d8fd725 +# * [FnP] Fix resolutions, types and add banned release groups (#721) by @flower in 5e38b0e +# * Revert "Dockerfile Improvements (#710)" by @Audionut in c85e83d +# * fix release script by @Audionut in d86999d +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.3.4...v5.3.5 +""" + + +""" +Release Notes for version v5.3.4 (2025-08-18): + +# +# ## RELEASE NOTES +# - UA can now tonemap Dolby Vision profile 5 and HLG files. +# - Requires a compatible ffmpeg (get latest), see https://github.com/Audionut/Upload-Assistant/pull/706 +# - Adjust the related ffmpeg option in config, if you have a suitable ffmpeg installed, in order to skip the automated check +# +# --- +# +# ## What's Changed +# +# * RF - now needs 2fa enabled to upload by @Audionut in e731e27 +# * TL - fix outdated attribute (#701) by @wastaken7 in ebabb5d +# * Fix typo in source flag when uploading to SHRI (#703) by @backstab5983 in 0e5bb28 +# * Catch conformance error from mediainfo and warn users (#704) by @Khoa Pham in febe0f1 +# * Add correct country get to IMDb (#708) by @Audionut in e09dbf2 +# * catch empty array from btn by @Audionut in 77b539a +# * highlight tracker removal by @Audionut in 95a9e54 +# * Fix img_host and None types (#707) by @frenchcutgreenbean in c34e6be +# * Option - libplacebo tonemapping (#706) by @Audionut in 3fc3c1a +# * fix docker tagging by @Audionut in 0071c71 +# * clean empty bbcode from descriptions by @Audionut in 73b40b9 +# * require api key to search by @Audionut in ce7bec6 +# * Dockerfile Improvements (#710) by @Slikkster in 0b50d36 +# * restore docker apt update by @Audionut in a57e514 +# * PHD - fix region logic (#709) by @wastaken7 in 5e1c541 +# * fix unit3d trackers not accept valid tvdb by @Audionut in 309c54e +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.3.3...v5.3.4 +""" + + +""" +Release Notes for version v5.3.3 (2025-08-14): + +# +# ## RELEASE NOTES +# - New module added requiring update via requirements.txt. See README for instructions. +# +# --- +# +# ## What's Changed +# +# * use all of result when specific is NoneType by @Audionut in 15faaad +# * don't print guessit error in imdb by @Audionut in 3b21998 +# * add support for multiple announce links (#691) by @wastaken7 in 4a623d7 +# * Added support for PHD (#689) by @wastaken7 in 1170f46 +# * pass meta to romaji by @Audionut in 6594f2c +# * DC - API update (#695) by @wastaken7 in 14380f2 +# * remove trackers found in client (#683) by @Audionut in 3207fd3 +# * Add service Chorki (#690) by @razinares in fa16ebf +# * fix docker mediainfo install (#699) by @Audionut in aa84c07 +# * Option - send upload urls to discord (#694) by @Audionut in 29fbcf5 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.3.2...v5.3.3 +""" + + +""" +Release Notes for version v5.3.2 (2025-08-11): + +# ## What's Changed +# +# * AR - catch multiple dots in name by @Audionut in 5d5164b +# * correct meta object before inputting data by @Audionut in 166a1a5 +# * guessit fallback by @Audionut in eccef19 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.3.1...v5.3.2 +""" + + +""" +Release Notes for version v5.3.1 (2025-08-10): + +# ## What's Changed +# +# * TVDB series name not nonetype by @Audionut in 1def355 +# * remove compatibility tracks from dupe/dubbed checking by @Audionut in 48e922e +# * fix onlyID (#677) by @Audionut in 29b8caf +# * BT & BJS - fix language, add user input (#678) by @wastaken7 in 51d89c5 +# * fix: update SP category logic (#679) by @groggy9788 in 9ed3b2d +# * update mkbrr and add threading control (#680) by @Audionut in 316afe1 +# * add tv support for emby (#681) by @Audionut in 0de649b +# * add service XUMO by @Audionut in 633f151 +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.3.0...v5.3.1 +""" + + +""" +Release Notes for version v5.3.0 (2025-08-08): + +# +# ## NOTES +# - From the previous release, screenshots in description were modified. Check the options in the example-config to handle to taste, particularly https://github.com/Audionut/Upload-Assistant/blob/f45e4dd87472ab31b79569f97e3bea62e27940e0/data/example-config.py#L70 +# +# +# ## RELEASE NOTES +# - UA will no longer, 'just pick the top result suggested by TMDb'. +# - Instead, title parsing has been significantly improved. Now UA will use a weight based system that relies on the title name, AKA name and year . +# - Old scene releases such as will easily defeat the title parsing, however these releases will get an IMDB ID from srrdb, negating this issue. Poorly named P2P releases are exactly that. +# - Unfortunately, not only are there many, many releases that have exactly matching names, and release years, TMDb's own sorting algorithm doesn't perfectly return the correct result, as the first result, always. +# - This means that a prompt is required. UA will display a shortened list of results for you to select, an allows manual entry of the correct TMDb ID, such as /. +# - Given that UA would have just selected the first result previously, which could have been incorrect, some percentage of time, the net result should be a better overall user experience, since the wrong return previously required manual intervention in any event, and may have been missed previously, leading to lack luster results. +# - As always, feeding the correct ID's into UA always leads to a better experience. There are many options to accomplish this task automatically, and users should familiarize themselves with the options outlined in the example.config, and/or user-args.json +# - Overall SubsPlease handling should be greatly increased......if you have TVDB login details. +# +# ## NOTEWORTHY UPDATES +# - Two new trackers, BT and BJS have been added thanks to @wastaken7 +# - PSS was removed as offline +# - The edit pathway, when correcting Information, should now correctly handle existing args thanks to @ppkhoa +# - Some additional context has been added regarding ffmpeg screen capture issues, particularly on seedboxes, also see https://github.com/Audionut/Upload-Assistant/wiki/ffmpeg---max-workers-issues +# - Additional trackers have been added for getting existing ids, but they are currently only available via auto torrent searching +# - Getting data from trackers now has a cool off period. This should not be noticed under normal circumstances. PTP has a 60 second cool off period, which was chosen to minimize interference with other tools. +# +# --- +# +# ## What's Changed +# +# * update install/update instructions by @Audionut in 6793709 +# * TMDB retry (#646) by @Audionut in 84554d8 +# * fix missing tvdb credential checks by @Audionut in 28b0561 +# * cleanup ptp description/images handling by @Audionut in 271fc5f +# * fix bad copy/paste by @Audionut in d075a11 +# * set the ptp_imagelist by @Audionut in 3905248 +# * add option to select specific new files for queue (#648) by @Audionut in 8de31e3 +# * TMDB retry, set object by @Audionut in 12436ff +# * robust framerate by @Audionut in 955be6d +# * add clarity of max workers issues on seedboxes by @Audionut in d38f265 +# * add linux ffmpeg check by @Audionut in 89bf550 +# * ffmpeg - point to wiki by @Audionut in 6d6246b +# * generic max workers error print by @Audionut in 71d00c0 +# * handle specific ffmpeg complex error by @Audionut in 6e104ea +# * frame overlay print behind debug by @Audionut in 72804de +# * Log_file - save debug logs (#653) by @Audionut in 482dce5 +# * SPD - fix imdb in search existing (#656) by @Audionut in a640da6 +# * Skip torrents for AL if they don't have a MAL ID (#651) by @PythonCoderAS in 045bb71 +# * overrides - import at top by @Audionut in bb662e2 +# * ignore mkbrr binaries by @Audionut in 37f3d1c +# * Don't discard original args, override them (#660) by @Khoa Pham in 9554f21 +# * remove PSS (#663) by @Audionut in 31a6c57 +# * ULCX - remove erroneous space in name by @Audionut in 5bb5806 +# * fix subplease service check by @Audionut in 9fa53ba +# * fix tmdb secondary title search by @Audionut in bf77018 +# * imdb - get more crew info (#665) by @wastaken7 in 208f65c +# * Added support for BJS (#649) by @wastaken7 in 61fb607 +# * BJS - add internal flag (#668) by @wastaken7 in 3cb93f5 +# * BT - refactor (#669) by @wastaken7 in d1c6d83 +# * BJS - safe string handling of description file by @Audionut in 7c1ef78 +# * BT - safe string handling of description file by @Audionut in 67b1fce +# * rTorrent debugging by @Audionut in fb31951 +# * Update release notes handling (#671) by @Audionut in f45e4dd +# * Fix manual tracker mode (#673) by @Audionut in fdf3b54 +# * BT and BJS fixes (#672) by @wastaken7 in c478149 +# * fix: python compatibility in BJS (#674) by @wastaken7 in 9535259 +# * Add arg, skip-dupe-asking (#675) by @Audionut in 7844ce6 +# * BHD - fix tracker found match by @Audionut in 4a82aed +# * TL - fix description uploading in api mode by @Audionut in d36002e +# * ffmpeg - only first video streams by @Audionut in 85fc9ca +# * Get language from track title (#676) by @Audionut in 013aed1 +# * TMDB/IMDB searching refactor and EMBY handling (#637) by @Audionut in f68625d +# * Other minor updates and improvements +# +# **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/v5.2.1...v5.3.0 +""" + """ Release Notes for version v5.2.1 (2025-07-30): @@ -73,13 +917,13 @@ """ Changelog for version 5.1.5.2 (2025-07-19): -## What's Changed -* Update README to include supported trackers list by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/619 -* Get correct discord config in upload.py by @ppkhoa in https://github.com/Audionut/Upload-Assistant/pull/621 -* DC - Remove file extensions from upload filename before torrent upload by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/622 -* Fixed a DC edition check -* Fixed a tracker status check - +## What's Changed +* Update README to include supported trackers list by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/619 +* Get correct discord config in upload.py by @ppkhoa in https://github.com/Audionut/Upload-Assistant/pull/621 +* DC - Remove file extensions from upload filename before torrent upload by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/622 +* Fixed a DC edition check +* Fixed a tracker status check + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.5.1...5.1.5.2 """ @@ -88,8 +932,8 @@ """ Changelog for version 5.1.5.1 (2025-07-19): -- Language bases fixes. - +- Language bases fixes. + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.5...5.1.5.1 """ @@ -98,28 +942,28 @@ """ Changelog for version 5.1.5 (2025-07-18): -## What's Changed -* Fix LT edit name by @Hielito2 in https://github.com/Audionut/Upload-Assistant/pull/595 -* HUNO encode checks by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/600 -* Update ULCX Banned Release Groups by @backstab5983 in https://github.com/Audionut/Upload-Assistant/pull/601 -* Fix filenames in Description when uploading TV [ ] by @Hielito2 in https://github.com/Audionut/Upload-Assistant/pull/603 -* Handles None imdb_id string by @jacobcxdev in https://github.com/Audionut/Upload-Assistant/pull/606 -* Fix variable reuse by @moontime-goose in https://github.com/Audionut/Upload-Assistant/pull/607 -* Add image restriction to DigitalCore by @PythonCoderAS in https://github.com/Audionut/Upload-Assistant/pull/609 -* Dp banned groups by @OrbitMPGH in https://github.com/Audionut/Upload-Assistant/pull/611 -* centralized language handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/604 -* Add randomness to image taking function and cleanup by @Hielito2 in https://github.com/Audionut/Upload-Assistant/pull/608 -* ASC - remove dependency on tracker API by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/610 -* BT - remove dependency on tracker API by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/612 -* Add LDU support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/613 -* Other fixes here and there. - -## New Contributors -* @jacobcxdev made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/606 -* @moontime-goose made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/607 -* @PythonCoderAS made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/609 -* @OrbitMPGH made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/611 - +## What's Changed +* Fix LT edit name by @Hielito2 in https://github.com/Audionut/Upload-Assistant/pull/595 +* HUNO encode checks by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/600 +* Update ULCX Banned Release Groups by @backstab5983 in https://github.com/Audionut/Upload-Assistant/pull/601 +* Fix filenames in Description when uploading TV [ ] by @Hielito2 in https://github.com/Audionut/Upload-Assistant/pull/603 +* Handles None imdb_id string by @jacobcxdev in https://github.com/Audionut/Upload-Assistant/pull/606 +* Fix variable reuse by @moontime-goose in https://github.com/Audionut/Upload-Assistant/pull/607 +* Add image restriction to DigitalCore by @PythonCoderAS in https://github.com/Audionut/Upload-Assistant/pull/609 +* Dp banned groups by @OrbitMPGH in https://github.com/Audionut/Upload-Assistant/pull/611 +* centralized language handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/604 +* Add randomness to image taking function and cleanup by @Hielito2 in https://github.com/Audionut/Upload-Assistant/pull/608 +* ASC - remove dependency on tracker API by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/610 +* BT - remove dependency on tracker API by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/612 +* Add LDU support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/613 +* Other fixes here and there. + +## New Contributors +* @jacobcxdev made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/606 +* @moontime-goose made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/607 +* @PythonCoderAS made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/609 +* @OrbitMPGH made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/611 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.4.1...5.1.5 """ @@ -128,8 +972,8 @@ """ Changelog for version 5.1.4.1 (2025-07-11): -* Fix: string year for replacement. - +* Fix: string year for replacement. + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.4...5.1.4.1 """ @@ -138,17 +982,17 @@ """ Changelog for version 5.1.4 (2025-07-10): -## What's Changed -* DP - remove image host requirements by @jschavey in https://github.com/Audionut/Upload-Assistant/pull/593 -* Fixed torf torrent creation when a single file from folder -* Fixed some year matching regex that was regressing title searching -* Fixed torrent id searching from support sites -* Updated ULCX banned groups and naming standards -* Updated BLU to use name as per IMDb - -## New Contributors -* @jschavey made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/593 - +## What's Changed +* DP - remove image host requirements by @jschavey in https://github.com/Audionut/Upload-Assistant/pull/593 +* Fixed torf torrent creation when a single file from folder +* Fixed some year matching regex that was regressing title searching +* Fixed torrent id searching from support sites +* Updated ULCX banned groups and naming standards +* Updated BLU to use name as per IMDb + +## New Contributors +* @jschavey made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/593 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.3.1...5.1.4 """ @@ -157,8 +1001,8 @@ """ Changelog for version 5.1.3.1 (2025-07-08): -* Fixed disc based torrent creation - +* Fixed disc based torrent creation + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.3...5.1.3.1 """ @@ -167,9 +1011,9 @@ """ Changelog for version 5.1.3 (2025-07-08): -* Fixed en checking in audio -* Fixed torrent links - +* Fixed en checking in audio +* Fixed torrent links + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.2.4...5.1.3 """ @@ -178,11 +1022,11 @@ """ Changelog for version 5.1.2.4 (2025-07-08): -## What's Changed -* Update example-config.py by @backstab5983 in https://github.com/Audionut/Upload-Assistant/pull/589 -* Correct mediainfo validation - - +## What's Changed +* Update example-config.py by @backstab5983 in https://github.com/Audionut/Upload-Assistant/pull/589 +* Correct mediainfo validation + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.2.3...5.1.2.4 """ @@ -191,42 +1035,42 @@ """ Changelog for version 5.1.2.3 (2025-07-07): -## What's Changed -* region.py - add Pluto TV by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/583 -* Onlyimage by @edge20200 in https://github.com/Audionut/Upload-Assistant/pull/582 -* ASC - changes and fixes by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/581 -* Print cleaning and sanitation by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/580 -* HDS - description tweaks by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/585 -* (Update) ULCX banned groups by @AnabolicsAnonymous in https://github.com/Audionut/Upload-Assistant/pull/586 -* ASC - add custom layout config by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/584 -* Added support for DigitalCore by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/577 -* Fix upload to UTP by @IevgenSobko in https://github.com/Audionut/Upload-Assistant/pull/587 -* Fix torrent creation for foldered content to properly exclude bad files -* Validate Unique ID in mediainfo -* Cleaned up the UA presentation in console (see below) -* Refactored the dual/dubbed/bloated audio handling to catch some edge cases -* Fix linux dvd handling. maybe...... -* Updated auto torrent matching to catch more matches -* Run an auto config updater for edge's image host change -* Added a catch for incorrect tmdb id from BHD. Instead of allowing only an int for tmdb id, BHD changed to a string movie or tv/id arrangement, which means all manner of *plainly incorrect* ids can be returned from their API. -* Added language printing handling in descriptions using common.py, when language is not in mediainfo -* Added non-en dub warning, and skips for BHD/ULCX -* Changed -fl to be set at 100% by default -* Better auto IMDb edition handling -* Fixed an OE existing search bug that's been in the code since day dot -* Other little tweaks - -## Notes -Some large changes to the UA feedback during processing. Much more streamlined. -Two new config options: -* print_tracker_messages: False, - controls whether to print site api/html feedback on upload. -* print_tracker_links: True, - controls whether to print direct uploaded torrent links where possible. - -Even in debug mode, the console should now be sanitized of private details. There may be some edge cases, please report. - -## New Contributors -* @IevgenSobko made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/587 - +## What's Changed +* region.py - add Pluto TV by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/583 +* Onlyimage by @edge20200 in https://github.com/Audionut/Upload-Assistant/pull/582 +* ASC - changes and fixes by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/581 +* Print cleaning and sanitation by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/580 +* HDS - description tweaks by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/585 +* (Update) ULCX banned groups by @AnabolicsAnonymous in https://github.com/Audionut/Upload-Assistant/pull/586 +* ASC - add custom layout config by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/584 +* Added support for DigitalCore by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/577 +* Fix upload to UTP by @IevgenSobko in https://github.com/Audionut/Upload-Assistant/pull/587 +* Fix torrent creation for foldered content to properly exclude bad files +* Validate Unique ID in mediainfo +* Cleaned up the UA presentation in console (see below) +* Refactored the dual/dubbed/bloated audio handling to catch some edge cases +* Fix linux dvd handling. maybe...... +* Updated auto torrent matching to catch more matches +* Run an auto config updater for edge's image host change +* Added a catch for incorrect tmdb id from BHD. Instead of allowing only an int for tmdb id, BHD changed to a string movie or tv/id arrangement, which means all manner of *plainly incorrect* ids can be returned from their API. +* Added language printing handling in descriptions using common.py, when language is not in mediainfo +* Added non-en dub warning, and skips for BHD/ULCX +* Changed -fl to be set at 100% by default +* Better auto IMDb edition handling +* Fixed an OE existing search bug that's been in the code since day dot +* Other little tweaks + +## Notes +Some large changes to the UA feedback during processing. Much more streamlined. +Two new config options: +* print_tracker_messages: False, - controls whether to print site api/html feedback on upload. +* print_tracker_links: True, - controls whether to print direct uploaded torrent links where possible. + +Even in debug mode, the console should now be sanitized of private details. There may be some edge cases, please report. + +## New Contributors +* @IevgenSobko made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/587 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.1...5.1.2 """ @@ -235,19 +1079,19 @@ """ Changelog for version 5.1.1 (2025-06-28): -## What's Changed -* HDT - screens and description changes by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/575 -* HDS - load custom descriptions by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/576 -* fix DVD processing on linux by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/574 -* ASC - improve fallback data by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/578 -* is_scene - Fix crash when is_all_lowercase is not defined by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/579 -* fixed the test run prints in the readme -* OTW - add resolution to name with DVD type sources -* BHD - nfo file uploads -* ULCX - fix search_year: aka - year in title when tv and condition met -* PTP - move the youtube check so that it only asks when actually uploading - - +## What's Changed +* HDT - screens and description changes by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/575 +* HDS - load custom descriptions by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/576 +* fix DVD processing on linux by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/574 +* ASC - improve fallback data by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/578 +* is_scene - Fix crash when is_all_lowercase is not defined by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/579 +* fixed the test run prints in the readme +* OTW - add resolution to name with DVD type sources +* BHD - nfo file uploads +* ULCX - fix search_year: aka - year in title when tv and condition met +* PTP - move the youtube check so that it only asks when actually uploading + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.1.0...5.1.1 """ @@ -256,33 +1100,33 @@ """ Changelog for version 5.1.0 (2025-06-22): -## What's Changed -* Updated get category function by @b-igu in https://github.com/Audionut/Upload-Assistant/pull/536 -* Set default value for FrameRate by @minicoz in https://github.com/Audionut/Upload-Assistant/pull/555 -* Update LCD.py by @a1Thiago in https://github.com/Audionut/Upload-Assistant/pull/562 -* DP - Fix: Subtitle language check ignores English by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/561 -* refactor id handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/548 -* make discord bot work by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/551 -* Added support for HD-Space by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/568 -* Added support for BrasilTracker by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/569 -* Added support for ASC by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/560 -* Properly restore key to original value by @ppkhoa in https://github.com/Audionut/Upload-Assistant/pull/573 -* OTW - update naming for DVD and REMUX -* Fixed an outlier is DVD source handling -* Fixed the discord bot to only load when being used and skip when debug -* Fixed existing image handling from PTP when not single files -* Added feedback when trackers were being skipped because of language checks -* Better dupe check handling for releases that only list DV when they're actually DV+HDR -* Fixed manual tag handling when anime -* Fixed only_id arg handling -* Fixed an aka bug from the last release that could skip aka -* Fixed double HC in HUNO name -* Added language checking for CBR -* Fixed only use tvdb if valid credentials - -## New Contributors -* @minicoz made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/555 - +## What's Changed +* Updated get category function by @b-igu in https://github.com/Audionut/Upload-Assistant/pull/536 +* Set default value for FrameRate by @minicoz in https://github.com/Audionut/Upload-Assistant/pull/555 +* Update LCD.py by @a1Thiago in https://github.com/Audionut/Upload-Assistant/pull/562 +* DP - Fix: Subtitle language check ignores English by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/561 +* refactor id handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/548 +* make discord bot work by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/551 +* Added support for HD-Space by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/568 +* Added support for BrasilTracker by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/569 +* Added support for ASC by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/560 +* Properly restore key to original value by @ppkhoa in https://github.com/Audionut/Upload-Assistant/pull/573 +* OTW - update naming for DVD and REMUX +* Fixed an outlier is DVD source handling +* Fixed the discord bot to only load when being used and skip when debug +* Fixed existing image handling from PTP when not single files +* Added feedback when trackers were being skipped because of language checks +* Better dupe check handling for releases that only list DV when they're actually DV+HDR +* Fixed manual tag handling when anime +* Fixed only_id arg handling +* Fixed an aka bug from the last release that could skip aka +* Fixed double HC in HUNO name +* Added language checking for CBR +* Fixed only use tvdb if valid credentials + +## New Contributors +* @minicoz made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/555 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.5.1...5.1.0 """ @@ -291,8 +1135,8 @@ """ Changelog for version 5.0.5.1 (2025-06-02): -* Ensure proper category sets from sites - +* Ensure proper category sets from sites + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.5...5.0.5.1 """ @@ -301,20 +1145,20 @@ """ Changelog for version 5.0.5 (2025-06-02): -## What's Changed -* CBR - Initial modq setup by @a1Thiago in https://github.com/Audionut/Upload-Assistant/pull/546 -* Remove 'pyrobase' requirement by @ambroisie in https://github.com/Audionut/Upload-Assistant/pull/547 -* DP - fixed to allow when en subs -* fixed cat set from auto unit3d -* updated AR naming to take either scene name or folder/file name. -* changed the aka diff check to only allow (automated) aka when difference is greater than 70% -* protect screenshots from ptp through bbcode shenanigans -* added some filtering for automated imdb edition handling - -## New Contributors -* @a1Thiago made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/546 -* @ambroisie made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/547 - +## What's Changed +* CBR - Initial modq setup by @a1Thiago in https://github.com/Audionut/Upload-Assistant/pull/546 +* Remove 'pyrobase' requirement by @ambroisie in https://github.com/Audionut/Upload-Assistant/pull/547 +* DP - fixed to allow when en subs +* fixed cat set from auto unit3d +* updated AR naming to take either scene name or folder/file name. +* changed the aka diff check to only allow (automated) aka when difference is greater than 70% +* protect screenshots from ptp through bbcode shenanigans +* added some filtering for automated imdb edition handling + +## New Contributors +* @a1Thiago made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/546 +* @ambroisie made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/547 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.4.2...5.0.5 """ @@ -323,8 +1167,8 @@ """ Changelog for version 5.0.4.2 (2025-05-30): -* Fix the validation check when torrent_storage_dir - +* Fix the validation check when torrent_storage_dir + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.4.1...5.0.4.2 """ @@ -333,12 +1177,12 @@ """ Changelog for version 5.0.4.1 (2025-05-30): -* Fixed an issue from the last release that broke existing torrent validation in qbittorent -* DP - added modq option -* Better handling of REPACK detection -* Console cleaning -* Add Hybrid to filename detection - +* Fixed an issue from the last release that broke existing torrent validation in qbittorent +* DP - added modq option +* Better handling of REPACK detection +* Console cleaning +* Add Hybrid to filename detection + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.4...5.0.4.1 """ @@ -347,20 +1191,20 @@ """ Changelog for version 5.0.4 (2025-05-28): -## What's Changed -* Add additional arr instance support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/544 -* fixed anon arg -* fixed tvdb season/episode naming at HUNO -* fixed python title handling for edition and added some bad editions to skip -* fixed blank BHD descriptions also skipping images -* HDT - added quick skip for non-supported resolutions -* more tag regex shenanigans -* PTT - use only Polish name when original language is Polish (no aka) -* arr handling fixes -* PTP - if only_id, then skip if imdb_id != 0 -* reduced is_scene to one api all - - +## What's Changed +* Add additional arr instance support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/544 +* fixed anon arg +* fixed tvdb season/episode naming at HUNO +* fixed python title handling for edition and added some bad editions to skip +* fixed blank BHD descriptions also skipping images +* HDT - added quick skip for non-supported resolutions +* more tag regex shenanigans +* PTT - use only Polish name when original language is Polish (no aka) +* arr handling fixes +* PTP - if only_id, then skip if imdb_id != 0 +* reduced is_scene to one api all + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.3.3...5.0.4 """ @@ -369,10 +1213,10 @@ """ Changelog for version 5.0.3.3 (2025-05-27): -* Fix unnecessary error feedback on empty aither claims -* implement same for banned groups detection -* fix DVD error - +* Fix unnecessary error feedback on empty aither claims +* implement same for banned groups detection +* fix DVD error + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.3.2...5.0.3.3 """ @@ -381,8 +1225,8 @@ """ Changelog for version 5.0.3.2 (2025-05-26): -* Fix arr always return valid data - +* Fix arr always return valid data + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.3.1...5.0.3.2 """ @@ -391,8 +1235,8 @@ """ Changelog for version 5.0.3.1 (2025-05-26): -* Fixed a bad await breaking HUNO - +* Fixed a bad await breaking HUNO + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.3...5.0.3.1 """ @@ -401,38 +1245,38 @@ """ Changelog for version 5.0.3 (2025-05-26): -## What's Changed -* update mediainfo by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/514 -* HUNO - naming update by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/535 -* add arr support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/538 -* Tracker specific custom link_dir and linking fallback by @brah in https://github.com/Audionut/Upload-Assistant/pull/537 -* Group tagging fixes -* Updated PTP url checking to catch old PTP torrent comments with non-ssl addy. (match more torrents) -* Whole bunch of console print cleaning -* Changed Limit Queue to only limit based on successful uploads -* Fixed PTP to not grab description in instances where it was not needed -* Set the TMP directory in docker to ensure description editing works in all cases -* Other little tweaks and fixes - -## NOTES +## What's Changed +* update mediainfo by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/514 +* HUNO - naming update by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/535 +* add arr support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/538 +* Tracker specific custom link_dir and linking fallback by @brah in https://github.com/Audionut/Upload-Assistant/pull/537 +* Group tagging fixes +* Updated PTP url checking to catch old PTP torrent comments with non-ssl addy. (match more torrents) +* Whole bunch of console print cleaning +* Changed Limit Queue to only limit based on successful uploads +* Fixed PTP to not grab description in instances where it was not needed +* Set the TMP directory in docker to ensure description editing works in all cases +* Other little tweaks and fixes + +## NOTES * Added specific mediainfo binary for DVD's. Update pymediainfo to use latest mediainfo for everything else. Defaulting to user installation because normal site-packages is not writeable Collecting pymediainfo Downloading pymediainfo-7.0.1-py3-none-manylinux_2_27_x86_64.whl.metadata (9.0 kB) Downloading pymediainfo-7.0.1-py3-none-manylinux_2_27_x86_64.whl (6.0 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.0/6.0 MB 100.6 MB/s eta 0:00:00 Installing collected packages: pymediainfo -Successfully installed pymediainfo-7.0.1 -* With arr support, if the file is in your sonarr/radarr instance, it will pull data from the arr. -* Updated --webdv as the HYBRID title set. Works better than using --edition - -## New configs -* for tracker specific linking directory name instead of tracker acronym. -* to use original folder client injection model if linking failure. -* to keep description images when is True - -## New Contributors -* @brah made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/537 - +Successfully installed pymediainfo-7.0.1 +* With arr support, if the file is in your sonarr/radarr instance, it will pull data from the arr. +* Updated --webdv as the HYBRID title set. Works better than using --edition + +## New configs +* for tracker specific linking directory name instead of tracker acronym. +* to use original folder client injection model if linking failure. +* to keep description images when is True + +## New Contributors +* @brah made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/537 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.2...5.0.3 """ @@ -441,9 +1285,9 @@ """ Changelog for version 5.0.2 (2025-05-20): -- gather tmdb tasks to speed process -- add backup config to git ignore - +- gather tmdb tasks to speed process +- add backup config to git ignore + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.1...5.0.2 """ @@ -452,9 +1296,9 @@ """ Changelog for version 5.0.1 (2025-05-19): -* Fixes DVD -* Fixes BHD description handling - +* Fixes DVD +* Fixes BHD description handling + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/5.0.0...5.0.1 """ @@ -463,37 +1307,37 @@ """ Changelog for version 5.0.0 (2025-05-19): -## A major version bump given some significant code changes - -## What's Changed -* Get edition from IMDB by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/519 -* Update LT.py by @Aerglonus in https://github.com/Audionut/Upload-Assistant/pull/520 -* (Add) mod queue opt-in option to OTW tracker by @AnabolicsAnonymous in https://github.com/Audionut/Upload-Assistant/pull/524 -* Add test run action by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/525 -* Prep is getting out of hand by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/518 -* Config generator and updater by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/522 -* Image rehosting use os.chdir as final fallback by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/529 -* Get edition from IMDB by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/519 -* Added a fallback to cover issue that causes glob to not find images when site rehosting images -* Fixed an issue that send dubbed as dual audio to MTV -* Fixed an issue when HDB descriptions returned None from bbcode cleaning -* Stopped using non-English names from TVDB when original language is not English -* Caught an error when TMDB is None from BHD -* Added function so that series packs can get TVDB name -* Other little tweaks and fixes - -## NOTES -- There is now a config generator and updater. config-generator.py. Usage is in the readme and docker wiki. As the name implies, you can generate new configs and update existing configs. -- If you are an existing user wanting to use the config-generator, I highly recommend to update your client names to match those set in the example-config https://github.com/Audionut/Upload-Assistant/blob/5f27e01a7f179e0ea49796dcbcae206718366423/data/example-config.py#L551 -- The names that match what you set as the default_torrent_client https://github.com/Audionut/Upload-Assistant/blob/5f27e01a7f179e0ea49796dcbcae206718366423/data/example-config.py#L140 -- This will make your experience with the config-generator much more pleasurable. -- BHD api/rss keys for BHD id/description parsing are now located with the BHD tracker settings and not within the DEFAULT settings section. It will continue to work with a notice being printed for the meantime, but please update your configs as I will permanently retire the old settings in time. -- modq for UNIT3D sites has been fixed in the UNIT3D source thanks to @AnabolicsAnonymous let me know if a site you use has updated to the latest UNIT3D source code with modq api fix, and it can be added to that sites UA file. -- You may notice that the main landing page now contains some Test Run passing displays. This does some basic checking that won't catch every error, but it may be useful for those who update directly from master branch. I'll keep adding to this over time to better catch any errors, If this display shows error, probably don't git pull. - -## New Contributors -* @Aerglonus made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/520 - +## A major version bump given some significant code changes + +## What's Changed +* Get edition from IMDB by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/519 +* Update LT.py by @Aerglonus in https://github.com/Audionut/Upload-Assistant/pull/520 +* (Add) mod queue opt-in option to OTW tracker by @AnabolicsAnonymous in https://github.com/Audionut/Upload-Assistant/pull/524 +* Add test run action by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/525 +* Prep is getting out of hand by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/518 +* Config generator and updater by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/522 +* Image rehosting use os.chdir as final fallback by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/529 +* Get edition from IMDB by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/519 +* Added a fallback to cover issue that causes glob to not find images when site rehosting images +* Fixed an issue that send dubbed as dual audio to MTV +* Fixed an issue when HDB descriptions returned None from bbcode cleaning +* Stopped using non-English names from TVDB when original language is not English +* Caught an error when TMDB is None from BHD +* Added function so that series packs can get TVDB name +* Other little tweaks and fixes + +## NOTES +- There is now a config generator and updater. config-generator.py. Usage is in the readme and docker wiki. As the name implies, you can generate new configs and update existing configs. +- If you are an existing user wanting to use the config-generator, I highly recommend to update your client names to match those set in the example-config https://github.com/Audionut/Upload-Assistant/blob/5f27e01a7f179e0ea49796dcbcae206718366423/data/example-config.py#L551 +- The names that match what you set as the default_torrent_client https://github.com/Audionut/Upload-Assistant/blob/5f27e01a7f179e0ea49796dcbcae206718366423/data/example-config.py#L140 +- This will make your experience with the config-generator much more pleasurable. +- BHD api/rss keys for BHD id/description parsing are now located with the BHD tracker settings and not within the DEFAULT settings section. It will continue to work with a notice being printed for the meantime, but please update your configs as I will permanently retire the old settings in time. +- modq for UNIT3D sites has been fixed in the UNIT3D source thanks to @AnabolicsAnonymous let me know if a site you use has updated to the latest UNIT3D source code with modq api fix, and it can be added to that sites UA file. +- You may notice that the main landing page now contains some Test Run passing displays. This does some basic checking that won't catch every error, but it may be useful for those who update directly from master branch. I'll keep adding to this over time to better catch any errors, If this display shows error, probably don't git pull. + +## New Contributors +* @Aerglonus made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/520 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.2.4.1...5.0.0 """ @@ -502,12 +1346,12 @@ """ Changelog for version 4.2.4.1 (2025-05-10): -## What's Changed -* Make search imdb not useless by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/517 -* Remove brackets from TVDB titles -* Fix PTP adding group. - - +## What's Changed +* Make search imdb not useless by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/517 +* Remove brackets from TVDB titles +* Fix PTP adding group. + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.2.4...4.2.4.1 """ @@ -516,18 +1360,18 @@ """ Changelog for version 4.2.4 (2025-05-10): -## What's Changed -* Update PTT.py by @btTeddy in https://github.com/Audionut/Upload-Assistant/pull/511 -* Update OTW banned release groups by @backstab5983 in https://github.com/Audionut/Upload-Assistant/pull/512 -* tmdb from imdb updates by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/515 -* Use TVDB title by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/516 -* HDB descriptions by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/498 -* Fixed manual frame code changes breaking packed images handling -* DP - removed nordic from name per their request -* Fixed PTP groupID not being set in meta -* Added a config option for screenshot header when tonemapping - - +## What's Changed +* Update PTT.py by @btTeddy in https://github.com/Audionut/Upload-Assistant/pull/511 +* Update OTW banned release groups by @backstab5983 in https://github.com/Audionut/Upload-Assistant/pull/512 +* tmdb from imdb updates by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/515 +* Use TVDB title by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/516 +* HDB descriptions by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/498 +* Fixed manual frame code changes breaking packed images handling +* DP - removed nordic from name per their request +* Fixed PTP groupID not being set in meta +* Added a config option for screenshot header when tonemapping + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.2.3.1...4.2.4 """ @@ -536,8 +1380,8 @@ """ Changelog for version 4.2.3.1 (2025-05-05): -* Fix cat call - +* Fix cat call + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.2.3...4.2.3.1 """ @@ -546,16 +1390,16 @@ """ Changelog for version 4.2.3 (2025-05-05): -## What's Changed -* Update PSS banned release groups by @backstab5983 in https://github.com/Audionut/Upload-Assistant/pull/504 -* Add BR streaming services by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/505 -* Fixed PTP manual concert type -* Fixed PTP trump/subs logic (again) -* Fixed PT that I broke when fixing PTT -* Catch imdb str id from HUNO -* Skip auto PTP searching if TV - does not effect manual ID or client searching - - +## What's Changed +* Update PSS banned release groups by @backstab5983 in https://github.com/Audionut/Upload-Assistant/pull/504 +* Add BR streaming services by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/505 +* Fixed PTP manual concert type +* Fixed PTP trump/subs logic (again) +* Fixed PT that I broke when fixing PTT +* Catch imdb str id from HUNO +* Skip auto PTP searching if TV - does not effect manual ID or client searching + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.2.2...4.2.3 """ @@ -564,26 +1408,26 @@ """ Changelog for version 4.2.2 (2025-05-03): -## What's Changed -* Update Service Mapping NOW by @yoyo292949158 in https://github.com/Audionut/Upload-Assistant/pull/494 -* (Add) mod queue opt-in option to ULCX tracker by @AnabolicsAnonymous in https://github.com/Audionut/Upload-Assistant/pull/491 -* Fix typo in HDB comps by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/492 -* Check lowercase names against srrdb for proper tag by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/495 -* Additional bbcode editing on PTP/HDB/BHD/BLU by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/493 -* Further bbcode conversions by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/496 -* Stop convert_comparison_to_centered to crush spaces in names by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/500 -* TOCA remove EUR as region by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/501 -* CBR - add dvdrip by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/502 -* CBR - aka and year updats for name by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/503 -* Added validation to BHD description images -* Fixed an issue with PTP/THR when no IMDB -* BHD/AR graceful error handling -* Fix PTT tracker setup -* Added 'hd.ma.5.1' as a bad group tag to skip - -## New Contributors -* @AnabolicsAnonymous made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/491 - +## What's Changed +* Update Service Mapping NOW by @yoyo292949158 in https://github.com/Audionut/Upload-Assistant/pull/494 +* (Add) mod queue opt-in option to ULCX tracker by @AnabolicsAnonymous in https://github.com/Audionut/Upload-Assistant/pull/491 +* Fix typo in HDB comps by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/492 +* Check lowercase names against srrdb for proper tag by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/495 +* Additional bbcode editing on PTP/HDB/BHD/BLU by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/493 +* Further bbcode conversions by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/496 +* Stop convert_comparison_to_centered to crush spaces in names by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/500 +* TOCA remove EUR as region by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/501 +* CBR - add dvdrip by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/502 +* CBR - aka and year updats for name by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/503 +* Added validation to BHD description images +* Fixed an issue with PTP/THR when no IMDB +* BHD/AR graceful error handling +* Fix PTT tracker setup +* Added 'hd.ma.5.1' as a bad group tag to skip + +## New Contributors +* @AnabolicsAnonymous made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/491 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.2.1...4.2.2 """ @@ -592,23 +1436,23 @@ """ Changelog for version 4.2.1 (2025-04-29): -## What's Changed -* Update RAS.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/483 -* Add support for Portugas by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/482 -* OTW - use year in TV title by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/481 -* Adding ADN as a provider by @ppkhoa in https://github.com/Audionut/Upload-Assistant/pull/484 -* Allow '-s 0' option when uploading to HDB by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/485 -* CBR: Refactor get_audio function to handle multiple languages by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/488 -* Screens handling updates by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/486 -* Add comparison images by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/487 -* Should be improvements to PTP hardcoded subs handling -* Corrected AR imdb url -* Fixed an issue in a tmdb episode pathway that would fail without tvdb -* Cleaned more private details from debug prints -* Fixed old BHD code to respect only supported BDMV regions -* Update OE against their image hosts rule -* Added passtheima.ge support - +## What's Changed +* Update RAS.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/483 +* Add support for Portugas by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/482 +* OTW - use year in TV title by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/481 +* Adding ADN as a provider by @ppkhoa in https://github.com/Audionut/Upload-Assistant/pull/484 +* Allow '-s 0' option when uploading to HDB by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/485 +* CBR: Refactor get_audio function to handle multiple languages by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/488 +* Screens handling updates by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/486 +* Add comparison images by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/487 +* Should be improvements to PTP hardcoded subs handling +* Corrected AR imdb url +* Fixed an issue in a tmdb episode pathway that would fail without tvdb +* Cleaned more private details from debug prints +* Fixed old BHD code to respect only supported BDMV regions +* Update OE against their image hosts rule +* Added passtheima.ge support + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.2.0.1...4.2.1 """ @@ -617,12 +1461,12 @@ """ Changelog for version 4.2.0.1 (2025-04-24): -- OE - only allow with English subs if not English audio -- Fixed the bad copy/paste that missed the ULCX torrent url -- Added the new trackers args auto api to example config -- Fixed overwriting custom descriptions with bad data -- Updated HDR check to find and correctly check for relevant strings. - +- OE - only allow with English subs if not English audio +- Fixed the bad copy/paste that missed the ULCX torrent url +- Added the new trackers args auto api to example config +- Fixed overwriting custom descriptions with bad data +- Updated HDR check to find and correctly check for relevant strings. + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.2.0...4.2.0.1 """ @@ -631,21 +1475,21 @@ """ Changelog for version 4.2.0 (2025-04-24): -## What's Changed -* store and use any found torrent data by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/452 -* Automated bluray region-distributor parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/471 -* add image upload retry logic by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/472 -* TVC Allow 1080p HEVC by @yoyo292949158 in https://github.com/Audionut/Upload-Assistant/pull/478 -* Small fixes to AL title formatting by @b-igu in https://github.com/Audionut/Upload-Assistant/pull/477 -* fixed a bug that skipped tvdb episode data handling -* made THR work - -## Config additions -* A bunch of new config options starting here: https://github.com/Audionut/Upload-Assistant/blob/b382ece4fde22425dd307d1098198fb3fc9e0289/data/example-config.py#L183 - -## New Contributors -* @yoyo292949158 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/478 - +## What's Changed +* store and use any found torrent data by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/452 +* Automated bluray region-distributor parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/471 +* add image upload retry logic by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/472 +* TVC Allow 1080p HEVC by @yoyo292949158 in https://github.com/Audionut/Upload-Assistant/pull/478 +* Small fixes to AL title formatting by @b-igu in https://github.com/Audionut/Upload-Assistant/pull/477 +* fixed a bug that skipped tvdb episode data handling +* made THR work + +## Config additions +* A bunch of new config options starting here: https://github.com/Audionut/Upload-Assistant/blob/b382ece4fde22425dd307d1098198fb3fc9e0289/data/example-config.py#L183 + +## New Contributors +* @yoyo292949158 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/478 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.9...4.2.0 """ @@ -654,24 +1498,24 @@ """ Changelog for version 4.1.9 (2025-04-20): -## What's Changed -* PTP. Do not ask if files with en-GB subs are trumpable. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/459 -* Add tag for releases without a group name (PSS) by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/461 -* In PTP descriptions, do not replace [code] by [quote]. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/463 -* In HDB descriptions, do not replace [code] by [quote]. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/466 -* handle cleanup on mac os without termination by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/465 -* Refactor CBR.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/467 -* Description Customization by @zercsy in https://github.com/Audionut/Upload-Assistant/pull/468 -* Fixed THR -* Added an option that allows sites to skip upload when content does not contain English -* Fixed cleanup on Mac OS -* Fixed an error causing regenerated torrents to fail being added to client -* Added fallback search for HDB when no IMDB -* Other minor fixes - -## New Contributors -* @zercsy made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/468 - +## What's Changed +* PTP. Do not ask if files with en-GB subs are trumpable. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/459 +* Add tag for releases without a group name (PSS) by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/461 +* In PTP descriptions, do not replace [code] by [quote]. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/463 +* In HDB descriptions, do not replace [code] by [quote]. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/466 +* handle cleanup on mac os without termination by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/465 +* Refactor CBR.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/467 +* Description Customization by @zercsy in https://github.com/Audionut/Upload-Assistant/pull/468 +* Fixed THR +* Added an option that allows sites to skip upload when content does not contain English +* Fixed cleanup on Mac OS +* Fixed an error causing regenerated torrents to fail being added to client +* Added fallback search for HDB when no IMDB +* Other minor fixes + +## New Contributors +* @zercsy made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/468 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.8.1...4.1.9 """ @@ -680,8 +1524,8 @@ """ Changelog for version 4.1.8.1 (2025-04-15): -* Fixed a quote bug - +* Fixed a quote bug + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.8...4.1.8.1 """ @@ -690,16 +1534,16 @@ """ Changelog for version 4.1.8 (2025-04-14): -## What's Changed -* Correct typo to enable UA to set the 'Internal' tag on HDB. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/456 -* Updated AL upload by @b-igu in https://github.com/Audionut/Upload-Assistant/pull/457 -* Run cleaning between items in a queue - fixes terminal issue when running a queue -* Fixed an error when imdb returns no results -* Fixes image rehosting was overwriting main image_list - -## New Contributors -* @b-igu made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/457 - +## What's Changed +* Correct typo to enable UA to set the 'Internal' tag on HDB. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/456 +* Updated AL upload by @b-igu in https://github.com/Audionut/Upload-Assistant/pull/457 +* Run cleaning between items in a queue - fixes terminal issue when running a queue +* Fixed an error when imdb returns no results +* Fixes image rehosting was overwriting main image_list + +## New Contributors +* @b-igu made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/457 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.7...4.1.8 """ @@ -708,16 +1552,16 @@ """ Changelog for version 4.1.7 (2025-04-13): -## What's Changed -* Fix missing HHD config in example-config.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/455 -* Updated mkbrr including fix for BDMV torrent and symlink creation -* Fixed manual source with BHD -* Added nfo file upload support for DP -* Changed logo handling so individual sites can pull language specific logos -* Fixed an error with adding mkbrr regenerated torrents to client -* Refactored Torf torrent creation to be quicker - - +## What's Changed +* Fix missing HHD config in example-config.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/455 +* Updated mkbrr including fix for BDMV torrent and symlink creation +* Fixed manual source with BHD +* Added nfo file upload support for DP +* Changed logo handling so individual sites can pull language specific logos +* Fixed an error with adding mkbrr regenerated torrents to client +* Refactored Torf torrent creation to be quicker + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.6...4.1.7 """ @@ -726,15 +1570,15 @@ """ Changelog for version 4.1.6 (2025-04-12): -## What's Changed -* qBittorrent Option: Include Tracker as Tag - New sites SAM and UHD by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/454 -* fixed image retaking -* fixed pack images to be saved in unique file now that meta is deleted by default -* updated OE to check all mediainfo when language checking -* updated OTW to include resolution with DVD -* updated DP rule compliance - - +## What's Changed +* qBittorrent Option: Include Tracker as Tag - New sites SAM and UHD by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/454 +* fixed image retaking +* fixed pack images to be saved in unique file now that meta is deleted by default +* updated OE to check all mediainfo when language checking +* updated OTW to include resolution with DVD +* updated DP rule compliance + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.5...4.1.6 """ @@ -743,12 +1587,12 @@ """ Changelog for version 4.1.5 (2025-04-10): -## What's Changed -* Clean existing meta by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/451 -* Added frame overlays to disc based content -* Refactored ss_times - - +## What's Changed +* Clean existing meta by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/451 +* Added frame overlays to disc based content +* Refactored ss_times + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.4.1...4.1.5 """ @@ -757,11 +1601,11 @@ """ Changelog for version 4.1.4.1 (2025-04-09): -## What's Changed -* Minor fixes in TIK.py by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/449 -* Fixed year getting inserted into incorrect TV - - +## What's Changed +* Minor fixes in TIK.py by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/449 +* Fixed year getting inserted into incorrect TV + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.4...4.1.4.1 """ @@ -770,25 +1614,25 @@ """ Changelog for version 4.1.4 (2025-04-08): -## What's Changed -* Update SP.py to replace with . per upload guidelines by @tubaboy26 in https://github.com/Audionut/Upload-Assistant/pull/435 -* HUNO - remove region from name by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/441 -* Correct absolute episode number lookup by @ppkhoa in https://github.com/Audionut/Upload-Assistant/pull/447 -* add more args overrides options by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/437 -* add rTorrent linking support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/390 -* Accept both relative and absolute path for the description filename. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/448 -* Updated dupe checking - mainly to allow uploads when more than 1 of a content is allowed -* Added an argument which cleans just the tmp directory for the current pathed content -* Hide some not important console prints behind debug -* Fixed HDR tonemapping -* Added config option to overlay some details on screenshots (currently only files) -* Adjust font size of screenshot overlays to match the resolution. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/442 -* Fixed manual year -* Other minor fixes - -## New Contributors -* @GizmoBal made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/442 - +## What's Changed +* Update SP.py to replace with . per upload guidelines by @tubaboy26 in https://github.com/Audionut/Upload-Assistant/pull/435 +* HUNO - remove region from name by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/441 +* Correct absolute episode number lookup by @ppkhoa in https://github.com/Audionut/Upload-Assistant/pull/447 +* add more args overrides options by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/437 +* add rTorrent linking support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/390 +* Accept both relative and absolute path for the description filename. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/448 +* Updated dupe checking - mainly to allow uploads when more than 1 of a content is allowed +* Added an argument which cleans just the tmp directory for the current pathed content +* Hide some not important console prints behind debug +* Fixed HDR tonemapping +* Added config option to overlay some details on screenshots (currently only files) +* Adjust font size of screenshot overlays to match the resolution. by @GizmoBal in https://github.com/Audionut/Upload-Assistant/pull/442 +* Fixed manual year +* Other minor fixes + +## New Contributors +* @GizmoBal made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/442 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.3...4.1.4 """ @@ -797,11 +1641,11 @@ """ Changelog for version 4.1.3 (2025-04-02): -- All torrent creation issues should now be fixed -- Site upload issues are gracefully handled -- tvmaze episode title fallback -- Fix web/hdtv dupe handling - +- All torrent creation issues should now be fixed +- Site upload issues are gracefully handled +- tvmaze episode title fallback +- Fix web/hdtv dupe handling + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.2...4.1.3 """ @@ -810,15 +1654,15 @@ """ Changelog for version 4.1.2 (2025-03-30): -## What's Changed -* Added support for DarkPeers and Rastastugan by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/431 -* fixed HDB missing call for torf regeneration -* fixed cutoff screens handling when taking images -* fixed existing image timeout error causing UA to hard crash -* tweaked pathway to ensure no duplicate api calls -* fixed a duplicate import in PTP that could cause some python versions to hard error -* removed JPTV - +## What's Changed +* Added support for DarkPeers and Rastastugan by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/431 +* fixed HDB missing call for torf regeneration +* fixed cutoff screens handling when taking images +* fixed existing image timeout error causing UA to hard crash +* tweaked pathway to ensure no duplicate api calls +* fixed a duplicate import in PTP that could cause some python versions to hard error +* removed JPTV + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.1...4.1.2 """ @@ -827,17 +1671,17 @@ """ Changelog for version 4.1.1 (2025-03-30): -## What's Changed -* add argument --not-anime by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/430 -* fixed linking on linux when volumes have the same mount -* fixed torf torrent regeneration in MTV -* added null language check for tmdb logo (mostly useful for movies) -* fixed -* fixed ssrdb release matching print -* fixed tvdb season matching under some conditions (wasn't serious) - -Check v4.1.0 release notes if not already https://github.com/Audionut/Upload-Assistant/releases/tag/4.1.0 - +## What's Changed +* add argument --not-anime by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/430 +* fixed linking on linux when volumes have the same mount +* fixed torf torrent regeneration in MTV +* added null language check for tmdb logo (mostly useful for movies) +* fixed +* fixed ssrdb release matching print +* fixed tvdb season matching under some conditions (wasn't serious) + +Check v4.1.0 release notes if not already https://github.com/Audionut/Upload-Assistant/releases/tag/4.1.0 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.0.2...4.1.1 """ @@ -846,45 +1690,45 @@ """ Changelog for version 4.1.0.2 (2025-03-29): -**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.0.1...4.1.0.2 - -4..1.0 release notes: - -## New config options -See example-config.py -- and - add tv series logo to top of descriptions with size control -- - from the last release, adds tv series overview to description. Now includes season name and details if applicable, see below -- (qBitTorrent v5+ only) - don't automatically try and find a matching torrent from just the path -- and for tvdb data support - -## Notes -- UA will now try and automatically find a torrent from qBitTorrent (v5+ only) that matches any site based argument. If it finds a matching torrent, for instance from PTP, it will automatically set . In other words, you no longer need to set a site argument ( or or --whatever (or and/or ) as UA will now do this automatically if the path matches a torrent in client. Use the applicable config option to disable this default behavior. - -- TVDB requires token to be initially inputted, after which time it will be auto generated as needed. -- Automatic Absolute Order to Aired Order season/episode numbering with TVDB. -- BHD now supports torrent id instead of just hash. -- Some mkbrr updates, including support for and rehashing for sites as needed. -- TMDB searching should be improved. - - -See examples below for new logo and episode data handling. - - - -## What's Changed -* BHD torrent id parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/417 -* Better title/year parsing for tmdb searching by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/416 -* feat: pull logo from tmdb by @markhc in https://github.com/Audionut/Upload-Assistant/pull/425 -* fix: logo displayed as None by @markhc in https://github.com/Audionut/Upload-Assistant/pull/427 -* Update region.py by @ikitub3 in https://github.com/Audionut/Upload-Assistant/pull/429 -* proper mkbrr handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/397 -* TVDB support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/423 -* qBitTorrent auto torrent grabing and rTorrent infohash support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/428 - -## New Contributors -* @markhc made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/425 -* @ikitub3 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/429 - +**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.0.1...4.1.0.2 + +4..1.0 release notes: + +## New config options +See example-config.py +- and - add tv series logo to top of descriptions with size control +- - from the last release, adds tv series overview to description. Now includes season name and details if applicable, see below +- (qBitTorrent v5+ only) - don't automatically try and find a matching torrent from just the path +- and for tvdb data support + +## Notes +- UA will now try and automatically find a torrent from qBitTorrent (v5+ only) that matches any site based argument. If it finds a matching torrent, for instance from PTP, it will automatically set . In other words, you no longer need to set a site argument ( or or --whatever (or and/or ) as UA will now do this automatically if the path matches a torrent in client. Use the applicable config option to disable this default behavior. + +- TVDB requires token to be initially inputted, after which time it will be auto generated as needed. +- Automatic Absolute Order to Aired Order season/episode numbering with TVDB. +- BHD now supports torrent id instead of just hash. +- Some mkbrr updates, including support for and rehashing for sites as needed. +- TMDB searching should be improved. + + +See examples below for new logo and episode data handling. + + + +## What's Changed +* BHD torrent id parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/417 +* Better title/year parsing for tmdb searching by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/416 +* feat: pull logo from tmdb by @markhc in https://github.com/Audionut/Upload-Assistant/pull/425 +* fix: logo displayed as None by @markhc in https://github.com/Audionut/Upload-Assistant/pull/427 +* Update region.py by @ikitub3 in https://github.com/Audionut/Upload-Assistant/pull/429 +* proper mkbrr handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/397 +* TVDB support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/423 +* qBitTorrent auto torrent grabing and rTorrent infohash support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/428 + +## New Contributors +* @markhc made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/425 +* @ikitub3 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/429 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.6...4.1.0 """ @@ -893,45 +1737,45 @@ """ Changelog for version 4.1.0.1 (2025-03-29): -**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.0...4.1.0.1 - -From 4.1.0 - -## New config options -See example-config.py -- and - add tv series logo to top of descriptions with size control -- - from the last release, adds tv series overview to description. Now includes season name and details if applicable, see below -- (qBitTorrent v5+ only) - don't automatically try and find a matching torrent from just the path -- and for tvdb data support - -## Notes -- UA will now try and automatically find a torrent from qBitTorrent (v5+ only) that matches any site based argument. If it finds a matching torrent, for instance from PTP, it will automatically set . In other words, you no longer need to set a site argument ( or or --whatever (or and/or ) as UA will now do this automatically if the path matches a torrent in client. Use the applicable config option to disable this default behavior. - -- TVDB requires token to be initially inputted, after which time it will be auto generated as needed. -- Automatic Absolute Order to Aired Order season/episode numbering with TVDB. -- BHD now supports torrent id instead of just hash. -- Some mkbrr updates, including support for and rehashing for sites as needed. -- TMDB searching should be improved. - - -See examples below for new logo and episode data handling. - - - -## What's Changed -* BHD torrent id parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/417 -* Better title/year parsing for tmdb searching by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/416 -* feat: pull logo from tmdb by @markhc in https://github.com/Audionut/Upload-Assistant/pull/425 -* fix: logo displayed as None by @markhc in https://github.com/Audionut/Upload-Assistant/pull/427 -* Update region.py by @ikitub3 in https://github.com/Audionut/Upload-Assistant/pull/429 -* proper mkbrr handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/397 -* TVDB support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/423 -* qBitTorrent auto torrent grabing and rTorrent infohash support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/428 - -## New Contributors -* @markhc made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/425 -* @ikitub3 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/429 - +**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.1.0...4.1.0.1 + +From 4.1.0 + +## New config options +See example-config.py +- and - add tv series logo to top of descriptions with size control +- - from the last release, adds tv series overview to description. Now includes season name and details if applicable, see below +- (qBitTorrent v5+ only) - don't automatically try and find a matching torrent from just the path +- and for tvdb data support + +## Notes +- UA will now try and automatically find a torrent from qBitTorrent (v5+ only) that matches any site based argument. If it finds a matching torrent, for instance from PTP, it will automatically set . In other words, you no longer need to set a site argument ( or or --whatever (or and/or ) as UA will now do this automatically if the path matches a torrent in client. Use the applicable config option to disable this default behavior. + +- TVDB requires token to be initially inputted, after which time it will be auto generated as needed. +- Automatic Absolute Order to Aired Order season/episode numbering with TVDB. +- BHD now supports torrent id instead of just hash. +- Some mkbrr updates, including support for and rehashing for sites as needed. +- TMDB searching should be improved. + + +See examples below for new logo and episode data handling. + + + +## What's Changed +* BHD torrent id parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/417 +* Better title/year parsing for tmdb searching by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/416 +* feat: pull logo from tmdb by @markhc in https://github.com/Audionut/Upload-Assistant/pull/425 +* fix: logo displayed as None by @markhc in https://github.com/Audionut/Upload-Assistant/pull/427 +* Update region.py by @ikitub3 in https://github.com/Audionut/Upload-Assistant/pull/429 +* proper mkbrr handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/397 +* TVDB support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/423 +* qBitTorrent auto torrent grabing and rTorrent infohash support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/428 + +## New Contributors +* @markhc made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/425 +* @ikitub3 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/429 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.6...4.1.0 """ @@ -940,41 +1784,41 @@ """ Changelog for version 4.1.0 (2025-03-29): -## New config options -See example-config.py -- and - add tv series logo to top of descriptions with size control -- - from the last release, adds tv series overview to description. Now includes season name and details if applicable, see below -- (qBitTorrent v5+ only) - don't automatically try and find a matching torrent from just the path -- and for tvdb data support - -## Notes -- UA will now try and automatically find a torrent from qBitTorrent (v5+ only) that matches any site based argument. If it finds a matching torrent, for instance from PTP, it will automatically set . In other words, you no longer need to set a site argument ( or or --whatever (or and/or ) as UA will now do this automatically if the path matches a torrent in client. Use the applicable config option to disable this default behavior. - -- TVDB requires token to be initially inputted, after which time it will be auto generated as needed. -- Automatic Absolute Order to Aired Order season/episode numbering with TVDB. -- BHD now supports torrent id instead of just hash. -- Some mkbrr updates, including support for and rehashing for sites as needed. -- TMDB searching should be improved. - - -See examples below for new logo and episode data handling. - - - -## What's Changed -* BHD torrent id parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/417 -* Better title/year parsing for tmdb searching by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/416 -* feat: pull logo from tmdb by @markhc in https://github.com/Audionut/Upload-Assistant/pull/425 -* fix: logo displayed as None by @markhc in https://github.com/Audionut/Upload-Assistant/pull/427 -* Update region.py by @ikitub3 in https://github.com/Audionut/Upload-Assistant/pull/429 -* proper mkbrr handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/397 -* TVDB support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/423 -* qBitTorrent auto torrent grabing and rTorrent infohash support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/428 - -## New Contributors -* @markhc made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/425 -* @ikitub3 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/429 - +## New config options +See example-config.py +- and - add tv series logo to top of descriptions with size control +- - from the last release, adds tv series overview to description. Now includes season name and details if applicable, see below +- (qBitTorrent v5+ only) - don't automatically try and find a matching torrent from just the path +- and for tvdb data support + +## Notes +- UA will now try and automatically find a torrent from qBitTorrent (v5+ only) that matches any site based argument. If it finds a matching torrent, for instance from PTP, it will automatically set . In other words, you no longer need to set a site argument ( or or --whatever (or and/or ) as UA will now do this automatically if the path matches a torrent in client. Use the applicable config option to disable this default behavior. + +- TVDB requires token to be initially inputted, after which time it will be auto generated as needed. +- Automatic Absolute Order to Aired Order season/episode numbering with TVDB. +- BHD now supports torrent id instead of just hash. +- Some mkbrr updates, including support for and rehashing for sites as needed. +- TMDB searching should be improved. + + +See examples below for new logo and episode data handling. + + + +## What's Changed +* BHD torrent id parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/417 +* Better title/year parsing for tmdb searching by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/416 +* feat: pull logo from tmdb by @markhc in https://github.com/Audionut/Upload-Assistant/pull/425 +* fix: logo displayed as None by @markhc in https://github.com/Audionut/Upload-Assistant/pull/427 +* Update region.py by @ikitub3 in https://github.com/Audionut/Upload-Assistant/pull/429 +* proper mkbrr handling by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/397 +* TVDB support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/423 +* qBitTorrent auto torrent grabing and rTorrent infohash support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/428 + +## New Contributors +* @markhc made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/425 +* @ikitub3 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/429 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.6...4.1.0 """ @@ -983,20 +1827,20 @@ """ Changelog for version 4.0.6 (2025-03-25): -## What's Changed -* update to improve 540 detection by @swannie-eire in https://github.com/Audionut/Upload-Assistant/pull/413 -* Update YUS.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/414 -* BHD - file/folder searching by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/415 -* Allow some hardcoded user overrides by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/411 -* option episode overview in description by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/418 -* Catch HUNO BluRay naming requirement by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/419 -* group tag regex by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/420 -* OTW - stop pre-filtering image hosts -* revert automatic episode title - -BHD auto searching does not currently return description/image links - - +## What's Changed +* update to improve 540 detection by @swannie-eire in https://github.com/Audionut/Upload-Assistant/pull/413 +* Update YUS.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/414 +* BHD - file/folder searching by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/415 +* Allow some hardcoded user overrides by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/411 +* option episode overview in description by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/418 +* Catch HUNO BluRay naming requirement by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/419 +* group tag regex by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/420 +* OTW - stop pre-filtering image hosts +* revert automatic episode title + +BHD auto searching does not currently return description/image links + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.5...4.0.6 """ @@ -1005,18 +1849,18 @@ """ Changelog for version 4.0.5 (2025-03-21): -## What's Changed -* Refactor TOCA.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/410 -* fixed an imdb search returning bad results -* don't run episode title checks on season packs or episode == 0 -* cleaned PTP mediainfo in packed content (scrubbed by PTP upload parser anyway) -* fixed some sites duplicating episode title -* docker should only pull needed mkbrr binaries, not all of them -* removed private details from some console prints -* fixed handling in ptp mediainfo check -* fixed arg work with no value -* removed rehosting from OTW, they seem fine with ptpimg now. - +## What's Changed +* Refactor TOCA.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/410 +* fixed an imdb search returning bad results +* don't run episode title checks on season packs or episode == 0 +* cleaned PTP mediainfo in packed content (scrubbed by PTP upload parser anyway) +* fixed some sites duplicating episode title +* docker should only pull needed mkbrr binaries, not all of them +* removed private details from some console prints +* fixed handling in ptp mediainfo check +* fixed arg work with no value +* removed rehosting from OTW, they seem fine with ptpimg now. + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.4...4.0.5 """ @@ -1025,23 +1869,23 @@ """ Changelog for version 4.0.4 (2025-03-19): -## What's Changed -* get episode title from tmdb by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/403 -* supporting 540p by @swannie-eire in https://github.com/Audionut/Upload-Assistant/pull/404 -* LT - fix no distributor api endpoint by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/406 -* reset terminal fix -* ULCX content checks -* PTP - set EN sub flag when trumpable for HC's English subs -* PTP - fixed an issue where description images were not being parsed correctly -* Caught an IMDB issue when no IMDB is returned by metadata functions -* Changed the banned groups/claims checking to daily - -## Episode title data change -Instead of relying solely on guessit to catch episode titles, UA now pulls episode title information from TMDB. There is some pre-filtering to catch placeholder title information like 'Episode 2', but you should monitor your TV uploads. Setting with an empty space will clear the episode title. - -Conversely (reminder of already existing functionality), setting met with some title will force that episode title. - - +## What's Changed +* get episode title from tmdb by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/403 +* supporting 540p by @swannie-eire in https://github.com/Audionut/Upload-Assistant/pull/404 +* LT - fix no distributor api endpoint by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/406 +* reset terminal fix +* ULCX content checks +* PTP - set EN sub flag when trumpable for HC's English subs +* PTP - fixed an issue where description images were not being parsed correctly +* Caught an IMDB issue when no IMDB is returned by metadata functions +* Changed the banned groups/claims checking to daily + +## Episode title data change +Instead of relying solely on guessit to catch episode titles, UA now pulls episode title information from TMDB. There is some pre-filtering to catch placeholder title information like 'Episode 2', but you should monitor your TV uploads. Setting with an empty space will clear the episode title. + +Conversely (reminder of already existing functionality), setting met with some title will force that episode title. + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.3.1...4.0.4 """ @@ -1050,8 +1894,8 @@ """ Changelog for version 4.0.3.1 (2025-03-17): -- Fix erroneous AKA in title when AKA empty - +- Fix erroneous AKA in title when AKA empty + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.3...4.0.3.1 """ @@ -1060,16 +1904,16 @@ """ Changelog for version 4.0.3 (2025-03-17): -## What's Changed -* Update naming logic for SP Anime Uploads by @tubaboy26 in https://github.com/Audionut/Upload-Assistant/pull/399 -* Fix ITT torrent comment by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/400 -* Fix --cleanup without path -* Fix tracker casing -* Fix AKA - -## New Contributors -* @tubaboy26 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/399 - +## What's Changed +* Update naming logic for SP Anime Uploads by @tubaboy26 in https://github.com/Audionut/Upload-Assistant/pull/399 +* Fix ITT torrent comment by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/400 +* Fix --cleanup without path +* Fix tracker casing +* Fix AKA + +## New Contributors +* @tubaboy26 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/399 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.2...4.0.3 """ @@ -1078,15 +1922,15 @@ """ Changelog for version 4.0.2 (2025-03-15): -## What's Changed -* Update CBR.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/392 -* Update ITT.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/393 -* Added support for TocaShare by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/394 -* Force auto torrent management to false when using linking - -## New Contributors -* @wastaken7 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/392 - +## What's Changed +* Update CBR.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/392 +* Update ITT.py by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/393 +* Added support for TocaShare by @wastaken7 in https://github.com/Audionut/Upload-Assistant/pull/394 +* Force auto torrent management to false when using linking + +## New Contributors +* @wastaken7 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/392 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.1...4.0.2 """ @@ -1095,11 +1939,11 @@ """ Changelog for version 4.0.1 (2025-03-14): -- fixed a tracker handling error when answering no to title confirmation -- fixed imdb from srrdb -- strip matching distributor from title and add to meta object -- other little fixes - +- fixed a tracker handling error when answering no to title confirmation +- fixed imdb from srrdb +- strip matching distributor from title and add to meta object +- other little fixes + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.0.3...4.0.1 """ @@ -1108,26 +1952,26 @@ """ Changelog for version 4.0.0.3 (2025-03-13): -- added platform to docker building -- fixed anime titling -- fixed aither dvdrip naming - -**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.0.2...4.0.0.3 - -## Version 4 release notes: -## Breaking change -* When using trackers argument, or , you must now use a comma separated list. - -## Linking support in qBitTorrent -### This is not fully tested. -It seems to be working fine on this windows box, but you absolutely should test with the argument to make sure it works on your system before putting it into production. -* You can specify to use symbolic or hard links -* -* Add one or many (local) paths which you want to contain the links, and UA will map the correct drive/volume for hardlinks. - -## Reminder -* UA has mkbrr support -* You can specify an argument or set the config +- added platform to docker building +- fixed anime titling +- fixed aither dvdrip naming + +**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.0.2...4.0.0.3 + +## Version 4 release notes: +## Breaking change +* When using trackers argument, or , you must now use a comma separated list. + +## Linking support in qBitTorrent +### This is not fully tested. +It seems to be working fine on this windows box, but you absolutely should test with the argument to make sure it works on your system before putting it into production. +* You can specify to use symbolic or hard links +* +* Add one or many (local) paths which you want to contain the links, and UA will map the correct drive/volume for hardlinks. + +## Reminder +* UA has mkbrr support +* You can specify an argument or set the config * UA loads binary files for the supported mkbrr OS. If you find mkbrr slower than the original torf implementation when hashing torrents, the mkbrr devs are likely to be appreciative of any reports. """ @@ -1136,26 +1980,26 @@ """ Changelog for version 4.0.0.2 (2025-03-13): -- two site files manually imported tmdbsimple. -- fixed R4E by adding the want tmdb data from the main tmdb api call, which negates the need to make a needless api call when uploading to R4E, and will shave around 2 seconds from the time it takes to upload. -- other site file will be fixed when I get around to dealing with that mess. - -**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.0.1...4.0.0.2 - -## Version 4 release notes: -## Breaking change -* When using trackers argument, or , you must now use a comma separated list. - -## Linking support in qBitTorrent -### This is not fully tested. -It seems to be working fine on this windows box, but you absolutely should test with the argument to make sure it works on your system before putting it into production. -* You can specify to use symbolic or hard links -* -* Add one or many (local) paths which you want to contain the links, and UA will map the correct drive/volume for hardlinks. - -## Reminder -* UA has mkbrr support -* You can specify an argument or set the config +- two site files manually imported tmdbsimple. +- fixed R4E by adding the want tmdb data from the main tmdb api call, which negates the need to make a needless api call when uploading to R4E, and will shave around 2 seconds from the time it takes to upload. +- other site file will be fixed when I get around to dealing with that mess. + +**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.0.1...4.0.0.2 + +## Version 4 release notes: +## Breaking change +* When using trackers argument, or , you must now use a comma separated list. + +## Linking support in qBitTorrent +### This is not fully tested. +It seems to be working fine on this windows box, but you absolutely should test with the argument to make sure it works on your system before putting it into production. +* You can specify to use symbolic or hard links +* +* Add one or many (local) paths which you want to contain the links, and UA will map the correct drive/volume for hardlinks. + +## Reminder +* UA has mkbrr support +* You can specify an argument or set the config * UA loads binary files for the supported mkbrr OS. If you find mkbrr slower than the original torf implementation when hashing torrents, the mkbrr devs are likely to be appreciative of any reports. """ @@ -1164,25 +2008,25 @@ """ Changelog for version 4.0.0.1 (2025-03-13): -- fix broken trackers handling -- fix client inject when not using linking. - -**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.0...4.0.0.1 - -## Version 4 release notes: -## Breaking change -* When using trackers argument, or , you must now use a comma separated list. - -## Linking support in qBitTorrent -### This is not fully tested. -It seems to be working fine on this windows box, but you absolutely should test with the argument to make sure it works on your system before putting it into production. -* You can specify to use symbolic or hard links -* -* Add one or many (local) paths which you want to contain the links, and UA will map the correct drive/volume for hardlinks. - -## Reminder -* UA has mkbrr support -* You can specify an argument or set the config +- fix broken trackers handling +- fix client inject when not using linking. + +**Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/4.0.0...4.0.0.1 + +## Version 4 release notes: +## Breaking change +* When using trackers argument, or , you must now use a comma separated list. + +## Linking support in qBitTorrent +### This is not fully tested. +It seems to be working fine on this windows box, but you absolutely should test with the argument to make sure it works on your system before putting it into production. +* You can specify to use symbolic or hard links +* +* Add one or many (local) paths which you want to contain the links, and UA will map the correct drive/volume for hardlinks. + +## Reminder +* UA has mkbrr support +* You can specify an argument or set the config * UA loads binary files for the supported mkbrr OS. If you find mkbrr slower than the original torf implementation when hashing torrents, the mkbrr devs are likely to be appreciative of any reports. """ @@ -1191,30 +2035,30 @@ """ Changelog for version 4.0.0 (2025-03-13): -Pushing this as v4 given some significant code changes. - -## Breaking change -* When using trackers argument, or , you must now use a comma separated list. - -## Linking support in qBitTorrent -### This is not fully tested. -It seems to be working fine on this windows box, but you absolutely should test with the argument to make sure it works on your system before putting it into production. -* You can specify to use symbolic or hard links -* -* Add one or many (local) paths which you want to contain the links, and UA will map the correct drive/volume for hardlinks. - -## Reminder -* UA has mkbrr support -* You can specify an argument or set the config -* UA loads binary files for the supported mkbrr OS. If you find mkbrr slower than the original torf implementation when hashing torrents, the mkbrr devs are likely to be appreciative of any reports. - -## What's Changed -* move cleanup to file by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/384 -* async metadata calls by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/382 -* add initial linking support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/380 -* Refactor args parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/383 - - +Pushing this as v4 given some significant code changes. + +## Breaking change +* When using trackers argument, or , you must now use a comma separated list. + +## Linking support in qBitTorrent +### This is not fully tested. +It seems to be working fine on this windows box, but you absolutely should test with the argument to make sure it works on your system before putting it into production. +* You can specify to use symbolic or hard links +* +* Add one or many (local) paths which you want to contain the links, and UA will map the correct drive/volume for hardlinks. + +## Reminder +* UA has mkbrr support +* You can specify an argument or set the config +* UA loads binary files for the supported mkbrr OS. If you find mkbrr slower than the original torf implementation when hashing torrents, the mkbrr devs are likely to be appreciative of any reports. + +## What's Changed +* move cleanup to file by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/384 +* async metadata calls by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/382 +* add initial linking support by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/380 +* Refactor args parsing by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/383 + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/3.6.5...4.0.0 """ @@ -1223,13 +2067,13 @@ """ Changelog for version 3.6.5 (2025-03-12): -## What's Changed -* bunch of id related issues fixed -* if using , take that moment to validate and export the torrent file -* some prettier printing with torf torrent hashing -* mkbrr binary files by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/381 - - +## What's Changed +* bunch of id related issues fixed +* if using , take that moment to validate and export the torrent file +* some prettier printing with torf torrent hashing +* mkbrr binary files by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/381 + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/3.6.4...3.6.5 """ @@ -1238,9 +2082,9 @@ """ Changelog for version 3.6.4 (2025-03-09): -- Added option to use mkbrr https://github.com/autobrr/mkbrr (). About 4 times faster than torf for a file in cache . Can be set via config -- fixed empty HDB file/folder searching giving bad feedback print - +- Added option to use mkbrr https://github.com/autobrr/mkbrr (). About 4 times faster than torf for a file in cache . Can be set via config +- fixed empty HDB file/folder searching giving bad feedback print + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/3.6.3.1...3.6.4 """ @@ -1249,8 +2093,8 @@ """ Changelog for version 3.6.3.1 (2025-03-09): -- Fix BTN ID grabbing - +- Fix BTN ID grabbing + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/3.6.3...3.6.3.1 """ @@ -1259,25 +2103,25 @@ """ Changelog for version 3.6.3 (2025-03-09): -## Config changes -* As part of the effort to fix unresponsive terminals on unix systems, a new config option has been added , and an existing config option , now has a default setting even if commented out/not preset. -* Non-unix users (or users without terminal issue) should uncomment and modify these settings to taste -* https://github.com/Audionut/Upload-Assistant/blob/de7689ff36f76d7ba9b92afe1175b703a59cda65/data/example-config.py#L53 - -## What's Changed -* Create YUS.py by @fiftieth3322 in https://github.com/Audionut/Upload-Assistant/pull/373 -* remote_path as list by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/365 -* Correcting PROPER number namings in title by @Zips-sipZ in https://github.com/Audionut/Upload-Assistant/pull/378 -* Save extracted description images to disk (can be useful for rehosting to save the capture/optimization step) -* Updates/fixes to ID handling across the board -* Catch session interruptions in AR to ensure session is closed -* Work around a bug that sets empty description to None, breaking repeated processing with same meta -* Remote paths now accept list -* More effort to stop unix terminals shitting the bed - -## New Contributors -* @fiftieth3322 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/373 - +## Config changes +* As part of the effort to fix unresponsive terminals on unix systems, a new config option has been added , and an existing config option , now has a default setting even if commented out/not preset. +* Non-unix users (or users without terminal issue) should uncomment and modify these settings to taste +* https://github.com/Audionut/Upload-Assistant/blob/de7689ff36f76d7ba9b92afe1175b703a59cda65/data/example-config.py#L53 + +## What's Changed +* Create YUS.py by @fiftieth3322 in https://github.com/Audionut/Upload-Assistant/pull/373 +* remote_path as list by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/365 +* Correcting PROPER number namings in title by @Zips-sipZ in https://github.com/Audionut/Upload-Assistant/pull/378 +* Save extracted description images to disk (can be useful for rehosting to save the capture/optimization step) +* Updates/fixes to ID handling across the board +* Catch session interruptions in AR to ensure session is closed +* Work around a bug that sets empty description to None, breaking repeated processing with same meta +* Remote paths now accept list +* More effort to stop unix terminals shitting the bed + +## New Contributors +* @fiftieth3322 made their first contribution in https://github.com/Audionut/Upload-Assistant/pull/373 + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/3.6.2...3.6.3 """ @@ -1286,17 +2130,17 @@ """ Changelog for version 3.6.2 (2025-03-04): -## Update Notification -This release adds some new config options relating to update notifications: https://github.com/Audionut/Upload-Assistant/blob/a8b9ada38323c2f05b0f808d1d19d1d79c2a9acf/data/example-config.py#L9 - -## What's Changed -* Add proper2 and proper3 support by @Kha-kis in https://github.com/Audionut/Upload-Assistant/pull/371 -* added update notification -* HDB image rehosting updates -* updated srrdb handling -* other minor fixes - - +## Update Notification +This release adds some new config options relating to update notifications: https://github.com/Audionut/Upload-Assistant/blob/a8b9ada38323c2f05b0f808d1d19d1d79c2a9acf/data/example-config.py#L9 + +## What's Changed +* Add proper2 and proper3 support by @Kha-kis in https://github.com/Audionut/Upload-Assistant/pull/371 +* added update notification +* HDB image rehosting updates +* updated srrdb handling +* other minor fixes + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/3.6.1...3.6.2 """ @@ -1305,11 +2149,11 @@ """ Changelog for version 3.6.1 (2025-03-01): -- fix manual package screens uploading -- switch to subprocess for setting stty sane -- print version to console -- other minor fixes - +- fix manual package screens uploading +- switch to subprocess for setting stty sane +- print version to console +- other minor fixes + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/3.6.0...3.6.1 """ @@ -1318,10 +2162,10 @@ """ Changelog for version 3.6.0 (2025-02-28): -## What's Changed -* cleanup tasks by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/364 - - +## What's Changed +* cleanup tasks by @Audionut in https://github.com/Audionut/Upload-Assistant/pull/364 + + **Full Changelog**: https://github.com/Audionut/Upload-Assistant/compare/3.5.3.3...3.6.0 """ diff --git a/discordbot.py b/discordbot.py index c017f41c3..e4e6559b0 100644 --- a/discordbot.py +++ b/discordbot.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import asyncio import datetime import logging diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 000000000..62fc51c3f --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,21 @@ +services: + l4g-upload-assistant-cli: + image: ghcr.io/audionut/upload-assistant:web-ui + container_name: UA + restart: unless-stopped + networks: + - yournetwork #change to the network with ur torrent instance + ports: + - "5000:5000" #change left side to your specific port. + environment: + - ENABLE_WEB_UI=true + entrypoint: /bin/bash + command: -c "source /venv/bin/activate && python /Upload-Assistant/web_ui/server.py & tail -f /dev/null" + volumes: + - /path/to/torrents/:/data/torrents/:rw #map this to qbit download location, map exactly as qbittorent template on both sides. + - /mnt/user/appdata/Upload-Assistant/data/config.py:/Upload-Assistant/data/config.py:rw #map this to config.py exactly + - /mnt/user/appdata/qBittorrent/data/BT_backup/:/torrent_storage_dir:rw #map this to your qbittorrent bt_backup + - /mnt/user/appdata/Upload-Assistant/tmp/:/Upload-Assistant/tmp:rw #map this to your /tmp folder. +networks: + "yournetwork": #change this to your network + external: true \ No newline at end of file diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100644 index 000000000..ed53fb910 --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -e + +# Activate virtual environment (where Flask is installed) +source /venv/bin/activate + +# Start Web UI if enabled +if [ "$ENABLE_WEB_UI" = "true" ]; then + echo "Starting Upload Assistant Web UI..." + cd /Upload-Assistant + python web_ui/server.py & + WEB_UI_PID=$! + echo "Web UI started with PID: $WEB_UI_PID" + echo "Access at: http://localhost:5000" +fi + +# If no command is provided, or the first argument is not an executable, +# default to running upload.py with the supplied arguments. +if [ $# -eq 0 ]; then + set -- python upload.py +elif ! command -v "$1" >/dev/null 2>&1 && [ ! -x "$1" ]; then + set -- python upload.py "$@" +fi + +# Execute the main command +exec "$@" diff --git a/requirements.txt b/requirements.txt index c54596901..95c682076 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,27 +1,29 @@ aiofiles aiohttp anitopy +babel +bbcode beautifulsoup4 bencode.py -click cli-ui +click +cloudscraper deluge-client discord ffmpeg-python guessit httpx Jinja2 -langcodes +langcodes[data] +lxml nest_asyncio packaging Pillow psutil +pycountry pyimgbox -pymediainfo; sys_platform == "win32" -pymediainfo==6.0.1; sys_platform == "darwin" -pymediainfo==6.0.1; sys_platform == "linux" +pymediainfo==7.0.1 pyotp -pyoxipng pyparsebluray qbittorrent-api requests @@ -30,5 +32,8 @@ tmdbsimple torf tqdm transmission_rpc +tvdb-v4-official unidecode urllib3 +flask>=2.3.0 +flask-cors>=4.0.0 diff --git a/src/add_comparison.py b/src/add_comparison.py index 0fb19b7d4..cf7317a49 100644 --- a/src/add_comparison.py +++ b/src/add_comparison.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import os import re import json diff --git a/src/apply_overrides.py b/src/apply_overrides.py index 7c28391c4..3553af08b 100644 --- a/src/apply_overrides.py +++ b/src/apply_overrides.py @@ -1,5 +1,9 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import json +import traceback from src.console import console +from src.args import Args +from data.config import config async def get_source_override(meta, other_id=False): @@ -107,9 +111,6 @@ async def parse_tmdb_id(tmdb_id, category=None): async def apply_args_to_meta(meta, args): - from src.args import Args - from data.config import config - try: arg_keys_to_track = set() arg_values = {} @@ -186,7 +187,6 @@ async def apply_args_to_meta(meta, args): except Exception as e: console.print(f"[red]Error processing arguments: {e}") if meta['debug']: - import traceback console.print(traceback.format_exc()) return meta diff --git a/src/args.py b/src/args.py index 827d00bc8..6cd26a31e 100644 --- a/src/args.py +++ b/src/args.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- import argparse import urllib.parse @@ -13,6 +14,7 @@ class ShortHelpFormatter(argparse.HelpFormatter): Custom formatter for short help (-h) Only displays essential options. """ + def __init__(self, prog): super().__init__(prog, max_help_position=40, width=80) @@ -48,6 +50,7 @@ class CustomArgumentParser(argparse.ArgumentParser): """ Custom ArgumentParser to handle short (-h) and long (--help) help messages. """ + def print_help(self, file=None): """ Show short help for `-h` and full help for `--help` @@ -65,6 +68,7 @@ class Args(): """ Parse Args """ + def __init__(self, config): self.config = config pass @@ -75,15 +79,17 @@ def parse(self, args, meta): usage="upload.py [path...] [options]", ) - parser.add_argument('path', nargs='+', help="Path to file/directory (in single/double quotes is best)") + parser.add_argument('path', nargs='*', help="Path to file/directory (in single/double quotes is best)") parser.add_argument('--queue', nargs=1, required=False, help="(--queue queue_name) Process an entire folder (files/subfolders) in a queue") parser.add_argument('-lq', '--limit-queue', dest='limit_queue', nargs=1, required=False, help="Limit the amount of queue files processed", type=int, default=0) + parser.add_argument('-sc', '--site-check', dest='site_check', action='https://github.com/store_true', required=False, help="Just search sites for suitable uploads and create log file, no uploading", default=False) + parser.add_argument('-su', '--site-upload', dest='site_upload', nargs=1, required=False, help="Specify a single tracker, and it will process the site searches and upload.", type=str, default=None) parser.add_argument('--unit3d', action='https://github.com/store_true', required=False, help="[parse a txt output file from UNIT3D-Upload-Checker]") parser.add_argument('-s', '--screens', nargs=1, required=False, help="Number of screenshots", default=int(self.config['DEFAULT']['screens'])) parser.add_argument('-comps', '--comparison', nargs='+', required=False, help="Use comparison images from a folder (input folder path). See: https://github.com/Audionut/Upload-Assistant/pull/487", default=None) parser.add_argument('-comps_index', '--comparison_index', nargs=1, required=False, help="Which of your comparison indexes is the main images (required when comps)", type=int, default=None) parser.add_argument('-mf', '--manual_frames', nargs=1, required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) - parser.add_argument('-c', '--category', nargs=1, required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) + parser.add_argument('-c', '--category', nargs=1, required=False, help="Category [movie, tv, fanres]", choices=['movie', 'tv', 'fanres'], dest="manual_category") parser.add_argument('-t', '--type', nargs=1, required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV, DVDRIP]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv', 'dvdrip'], dest="manual_type") parser.add_argument('--source', nargs=1, required=False, help="Source [Blu-ray, BluRay, DVD, DVD5, DVD9, HDDVD, WEB, HDTV, UHDTV, LaserDisc, DCP]", choices=['Blu-ray', 'BluRay', 'DVD', 'DVD5', 'DVD9', 'HDDVD', 'WEB', 'HDTV', 'UHDTV', 'LaserDisc', 'DCP'], dest="manual_source") parser.add_argument('-res', '--resolution', nargs=1, required=False, help="Resolution [2160p, 1080p, 1080i, 720p, 576p, 576i, 480p, 480i, 8640p, 4320p, OTHER]", choices=['2160p', '1080p', '1080i', '720p', '576p', '576i', '480p', '480i', '8640p', '4320p', 'other']) @@ -113,6 +119,10 @@ def parse(self, args, meta): parser.add_argument('-oil', '--only-if-languages', dest='has_languages', nargs='*', required=False, help="Require at least one of the languages to upload. Comma separated list e.g. 'English, French, Spanish'", type=str) parser.add_argument('-ns', '--no-seed', action='https://github.com/store_true', required=False, help="Do not add torrent to the client") parser.add_argument('-year', '--year', dest='manual_year', nargs=1, required=False, help="Override the year found", type=int, default=0) + parser.add_argument('-mc', '--commentary', dest='manual_commentary', action='https://github.com/store_true', required=False, help="Manually indicate whether commentary tracks are included") + parser.add_argument('-sfxs', '--sfx-subtitles', dest='sfx_subtitles', action='https://github.com/store_true', required=False, help="Manually indicate whether subtitles with visual enhancements like animations, effects, or backgrounds are included") + parser.add_argument('-e', '--extras', dest='extras', action='https://github.com/store_true', required=False, help="Indicates that extras are included. Mainly used for Blu-rays discs") + parser.add_argument('-sort', '--sorted-filelist', dest='sorted_filelist', action='https://github.com/store_true', required=False, help="Use the largest video file for processing instead of the first video file found") parser.add_argument('-ptp', '--ptp', nargs=1, required=False, help="PTP torrent id/permalink", type=str) parser.add_argument('-blu', '--blu', nargs=1, required=False, help="BLU torrent id/link", type=str) parser.add_argument('-aither', '--aither', nargs=1, required=False, help="Aither torrent id/link", type=str) @@ -124,6 +134,8 @@ def parse(self, args, meta): parser.add_argument('-bhd', '--bhd', nargs=1, required=False, help="BHD torrent_id/link", type=str) parser.add_argument('-huno', '--huno', nargs=1, required=False, help="HUNO torrent id/link", type=str) parser.add_argument('-ulcx', '--ulcx', nargs=1, required=False, help="ULCX torrent id/link", type=str) + parser.add_argument('-req', '--search_requests', action='https://github.com/store_true', required=False, help="Search for matching requests on supported trackers", default=None) + parser.add_argument('-sat', '--skip_auto_torrent', action='https://github.com/store_true', required=False, help="Skip automated qbittorrent client torrent searching", default=None) parser.add_argument('-onlyID', '--onlyID', action='https://github.com/store_true', required=False, help="Only grab meta ids (tmdb/imdb/etc) from tracker, not description/image links.", default=None) parser.add_argument('--foreign', dest='foreign', action='https://github.com/store_true', required=False, help="Set for TIK Foreign category") parser.add_argument('--opera', dest='opera', action='https://github.com/store_true', required=False, help="Set for TIK Opera & Musical category") @@ -131,13 +143,14 @@ def parse(self, args, meta): parser.add_argument('-disctype', '--disctype', nargs=1, required=False, help="Type of disc for TIK (BD100, BD66, BD50, BD25, NTSC DVD9, NTSC DVD5, PAL DVD9, PAL DVD5, Custom, 3D)", type=str) parser.add_argument('--untouched', dest='untouched', action='https://github.com/store_true', required=False, help="Set when a completely untouched disc at TIK") parser.add_argument('-manual_dvds', '--manual_dvds', nargs=1, required=False, help="Override the default number of DVD's (eg: use 2xDVD9+DVD5 instead)", type=str, dest='manual_dvds', default="") - parser.add_argument('-pb', '--desclink', nargs=1, required=False, help="Custom Description (link to hastebin/pastebin)") - parser.add_argument('-df', '--descfile', nargs=1, required=False, help="Custom Description (path to file OR filename in current working directory)") - parser.add_argument('-ih', '--imghost', nargs=1, required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump', 'ptscreens', 'onlyimage', 'dalexni', 'zipline']) + parser.add_argument('-pb', '--desclink', dest='description_link', nargs=1, required=False, help="Custom Description (link to hastebin/pastebin)") + parser.add_argument('-df', '--descfile', dest='description_file', nargs=1, required=False, help="Custom Description (path to file OR filename in current working directory)") + parser.add_argument('-menus', '--disc-menus', dest='path_to_menu_screenshots', nargs=1, required=False, help="Raw Disc only (Blu-ray/DVD). Path to the folder containing screenshots of the disc menus. All image files found in the folder will be used. Files should preferably be in PNG format (due to restrictions on some trackers), but other formats can be used (jpg, jpeg, webp)", type=str, default="") + parser.add_argument('-ih', '--imghost', nargs=1, required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump', 'ptscreens', 'onlyimage', 'dalexni', 'zipline', 'passtheimage', 'seedpool_cdn']) parser.add_argument('-siu', '--skip-imagehost-upload', dest='skip_imghost_upload', action='https://github.com/store_true', required=False, help="Skip Uploading to an image host") parser.add_argument('-th', '--torrenthash', nargs=1, required=False, help="Torrent Hash to re-use from your client's session directory") parser.add_argument('-nfo', '--nfo', action='https://github.com/store_true', required=False, help="Use .nfo in directory for description") - parser.add_argument('-k', '--keywords', nargs=1, required=False, help="Add comma seperated keywords e.g. 'keyword, keyword2, etc'") + parser.add_argument('-k', '--keywords', nargs=1, required=False, help="Add comma separated keywords e.g. 'keyword, keyword2, etc'") parser.add_argument('-kf', '--keep-folder', action='https://github.com/store_true', required=False, help="Keep the folder containing the single file. Works only when supplying a directory as input. For uploads with poor filenames, like some scene.") parser.add_argument('-reg', '--region', nargs=1, required=False, help="Region for discs") parser.add_argument('-a', '--anon', action='https://github.com/store_true', required=False, help="Upload anonymously") @@ -145,10 +158,11 @@ def parse(self, args, meta): parser.add_argument('-webdv', '--webdv', action='https://github.com/store_true', required=False, help="Contains a Dolby Vision layer converted using dovi_tool (HYBRID)") parser.add_argument('-hc', '--hardcoded-subs', action='https://github.com/store_true', required=False, help="Contains hardcoded subs", dest="hardcoded-subs") parser.add_argument('-pr', '--personalrelease', action='https://github.com/store_true', required=False, help="Personal Release") - parser.add_argument('-sdc', '--skip-dupe-check', action='https://github.com/store_true', required=False, help="Pass if you know this is a dupe (Skips dupe check)", dest="dupe") + parser.add_argument('-sdc', '--skip-dupe-check', action='https://github.com/store_true', required=False, help="Ignore dupes and upload anyway (Skips dupe check)", dest="dupe") + parser.add_argument('-sda', '--skip-dupe-asking', action='https://github.com/store_true', required=False, help="Don't prompt about dupes, just treat dupes as actual dupes", dest="ask_dupe") parser.add_argument('-debug', '--debug', action='https://github.com/store_true', required=False, help="Debug Mode, will run through all the motions providing extra info, but will not upload to trackers.") parser.add_argument('-ffdebug', '--ffdebug', action='https://github.com/store_true', required=False, help="Will show info from ffmpeg while taking screenshots.") - parser.add_argument('-mps', '--max-piece-size', nargs=1, required=False, help="Set max piece size allowed in MiB for default torrent creation (default 128 MiB)", choices=['2', '4', '8', '16', '32', '64', '128']) + parser.add_argument('-mps', '--max-piece-size', nargs=1, required=False, help="Set max piece size allowed in MiB for default torrent creation (default 128 MiB)", choices=['1', '2', '4', '8', '16', '32', '64', '128']) parser.add_argument('-nh', '--nohash', action='https://github.com/store_true', required=False, help="Don't hash .torrent") parser.add_argument('-rh', '--rehash', action='https://github.com/store_true', required=False, help="DO hash .torrent") parser.add_argument('-mkbrr', '--mkbrr', action='https://github.com/store_true', required=False, help="Use mkbrr for torrent hashing") @@ -158,21 +172,37 @@ def parse(self, args, meta): parser.add_argument('-qbt', '--qbit-tag', dest='qbit_tag', nargs=1, required=False, help="Add to qbit with this tag") parser.add_argument('-qbc', '--qbit-cat', dest='qbit_cat', nargs=1, required=False, help="Add to qbit with this category") parser.add_argument('-rtl', '--rtorrent-label', dest='rtorrent_label', nargs=1, required=False, help="Add to rtorrent with this label") - parser.add_argument('-tk', '--trackers', nargs=1, required=False, help="Upload to these trackers, comma seperated (--trackers blu,bhd) including manual") + parser.add_argument('-tk', '--trackers', nargs=1, required=False, help="Upload to these trackers, comma separated (--trackers blu,bhd) including manual") + parser.add_argument('-rtk', '--trackers-remove', dest='trackers_remove', nargs=1, required=False, help="Remove these trackers when processing default trackers, comma separated (--trackers-remove blu,bhd)") parser.add_argument('-tpc', '--trackers-pass', dest='trackers_pass', nargs=1, required=False, help="How many trackers need to pass all checks (dupe/banned group/etc) to actually proceed to uploading", type=int) parser.add_argument('-rt', '--randomized', nargs=1, required=False, help="Number of extra, torrents with random infohash", default=0) parser.add_argument('-entropy', '--entropy', dest='entropy', nargs=1, required=False, help="Use entropy in created torrents. (32 or 64) bits (ie: -entropy 32). Not supported at all sites, you many need to redownload the torrent", type=int, default=0) parser.add_argument('-ua', '--unattended', action='https://github.com/store_true', required=False, help=argparse.SUPPRESS) - parser.add_argument('-uac', '--unattended-confirm', action='https://github.com/store_true', required=False, help=argparse.SUPPRESS) + parser.add_argument('-uac', '--unattended_confirm', action='https://github.com/store_true', required=False, help=argparse.SUPPRESS) parser.add_argument('-vs', '--vapoursynth', action='https://github.com/store_true', required=False, help="Use vapoursynth for screens (requires vs install)") parser.add_argument('-dm', '--delete-meta', action='https://github.com/store_true', required=False, dest='delete_meta', help="Delete only meta.json from tmp directory") parser.add_argument('-dtmp', '--delete-tmp', action='https://github.com/store_true', required=False, dest='delete_tmp', help="Delete tmp directory for the working file/folder") parser.add_argument('-cleanup', '--cleanup', action='https://github.com/store_true', required=False, help="Clean up tmp directory") parser.add_argument('-fl', '--freeleech', nargs=1, required=False, help="Freeleech Percentage. Any value 1-100 works, but site search is limited to certain values", default=0, dest="freeleech") parser.add_argument('--infohash', nargs=1, required=False, help="V1 Info Hash") + parser.add_argument('-emby', '--emby', action='https://github.com/store_true', required=False, help="Create an Emby-compliant NFO file and optionally symlink the content") + parser.add_argument('-emby_cat', '--emby_cat', nargs=1, required=False, help="Set the expected category for Emby (e.g., 'movie', 'tv')") + parser.add_argument('-emby_debug', '--emby_debug', action='https://github.com/store_true', required=False, help="Does debugging stuff for Audionut") + parser.add_argument('-ch', '--channel', nargs=1, required=False, help="SPD only: Channel ID number or tag to upload to (preferably the ID), without '@'. Example: '-ch spd' when using a tag, or '-ch 1' when using an ID.", type=str, dest='spd_channel', default="") args, before_args = parser.parse_known_args(input) args = vars(args) # console.print(args) + + # Validation: require either path or site_upload + if not args.get('path') and not args.get('site_upload'): + console.print("[red]Error: Either a path must be provided or --site-upload must be specified.[/red]") + parser.print_help() + sys.exit(1) + + # For site upload mode, provide a dummy path if none given + if args.get('site_upload') and not args.get('path'): + args['path'] = ['dummy_path_for_site_upload'] + if meta.get('manual_frames') is not None: try: # Join the list into a single string, split by commas, and convert to integers @@ -205,7 +235,7 @@ def parse(self, args, meta): meta['manual_type'] = value2.upper().replace('-', '') elif key == 'tag': meta[key] = f"-{value2}" - elif key == 'descfile': + elif key == 'description_file': meta[key] = os.path.abspath(value2) elif key == 'comparison': meta[key] = os.path.abspath(value2) @@ -358,33 +388,69 @@ def parse(self, args, meta): meta[key] = value2 else: meta[key] = value - elif key in ("manual_edition"): + if key == 'site_upload': + if isinstance(value, list) and len(value) == 1: + meta[key] = value[0].upper() # Extract the tracker acronym and uppercase it + elif value is not None: + meta[key] = str(value).upper() + else: + meta[key] = None + if key in ("manual_edition"): if isinstance(value, list) and len(value) == 1: meta[key] = value[0] else: meta[key] = value - elif key in ("manual_dvds"): - meta[key] = value - elif key in ("freeleech"): - meta[key] = 100 - elif key in ("tag") and value == []: - meta[key] = "" - elif key in ["manual_episode_title"] and value == []: + if key in ("manual_dvds"): + if isinstance(value, list) and len(value) == 1: + meta[key] = value[0] + elif value not in (None, [], ""): + meta[key] = value + else: + meta[key] = "" + if key in ("freeleech"): + if isinstance(value, list) and len(value) == 1: + meta[key] = int(value[0]) + elif value not in (None, [], 0): + meta[key] = int(value) + else: + meta[key] = 0 + if key in ["manual_episode_title"] and value == []: meta[key] = "" - elif key in ["manual_episode_title"]: - meta[key] = value - elif key in ["tvmaze_manual"]: - meta[key] = value - elif key == 'trackers': + if key in ["tvmaze_manual"]: + if isinstance(value, list) and len(value) == 1: + meta[key] = value[0] + elif value not in (None, []): + meta[key] = value + if key == 'trackers': if value: - tracker_value = value + # Extract from list if it's a single-item list (from nargs=1) + if isinstance(value, list) and len(value) == 1: + tracker_value = value[0] + else: + tracker_value = value + if isinstance(tracker_value, str): tracker_value = tracker_value.strip('"\'') - if isinstance(tracker_value, str) and ',' in tracker_value: - meta[key] = [t.strip().upper() for t in tracker_value.split(',')] + # Split by comma if present + if ',' in tracker_value: + meta[key] = [t.strip().upper() for t in tracker_value.split(',')] + else: + meta[key] = [tracker_value.strip().upper()] + elif isinstance(tracker_value, list): + # Handle list of strings + expanded = [] + for t in tracker_value: + if isinstance(t, str): + if ',' in t: + expanded.extend([x.strip().upper() for x in t.split(',')]) + else: + expanded.append(t.strip().upper()) + else: + expanded.append(str(t).upper()) + meta[key] = expanded else: - meta[key] = [tracker_value.strip().upper()] if isinstance(tracker_value, str) else [tracker_value.upper()] + meta[key] = [str(tracker_value).upper()] else: meta[key] = [] else: diff --git a/src/audio.py b/src/audio.py index e54854bba..08ca21923 100644 --- a/src/audio.py +++ b/src/audio.py @@ -1,41 +1,235 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import json +import os import time import traceback +import re + +from data.config import config from src.console import console +from src.trackers.COMMON import COMMON + + +def determine_channel_count(channels, channel_layout, additional, format): + # Coerce channels to string and extract first integer (handles values like "6 channels", "8 / 6", etc.) + s = str(channels).strip() if channels is not None else "" + m = re.search(r"\d+", s) + if not m: + return "Unknown" + + channels = int(m.group(0)) + channel_layout = channel_layout.strip() if channel_layout else "" + + # Handle specific Atmos/immersive audio cases first + if is_atmos_or_immersive_audio(additional, format, channel_layout): + if channel_layout: + return handle_atmos_channel_count(channels, channel_layout) + + # Handle standard channel layouts with proper LFE detection + if channel_layout: + return parse_channel_layout(channels, channel_layout) + + # Fallback for when no layout information is available + return fallback_channel_count(channels) + + +def is_atmos_or_immersive_audio(additional, format, channel_layout): + """Check if this is Dolby Atmos, DTS:X, or other immersive audio format.""" + atmos_indicators = [ + 'JOC', 'Atmos', '16-ch', 'Atmos Audio', + 'TrueHD Atmos', 'E-AC-3 JOC', 'Dolby Atmos' + ] + + dtsx_indicators = ['DTS:X', 'XLL X'] + + # Check in additional features + if additional: + if any(indicator in str(additional) for indicator in atmos_indicators + dtsx_indicators): + return True + + # Check in format + if format and any(indicator in str(format) for indicator in atmos_indicators + dtsx_indicators): + return True + + # Check for height channels in layout (indicating immersive audio) + if channel_layout: + height_indicators = [ + 'Tfc', 'Tfl', 'Tfr', 'Tbl', 'Tbr', 'Tbc', # Top channels + 'TFC', 'TFL', 'TFR', 'TBL', 'TBR', 'TBC', # Top channels (uppercase) + 'Vhc', 'Vhl', 'Vhr', # Vertical height channels + 'Ch', 'Lh', 'Rh', 'Chr', 'Lhr', 'Rhr', # Height variants + 'Top', 'Height' # Generic height indicators + ] + if any(indicator in channel_layout for indicator in height_indicators): + return True + + return False + + +def handle_atmos_channel_count(channels, channel_layout): + """Handle Dolby Atmos and immersive audio channel counting.""" + + # Parse the layout to count bed and height channels + bed_channels, lfe_count, height_channels = parse_atmos_layout(channel_layout) + + if height_channels > 0: + if lfe_count > 0: + return f"{bed_channels}.{lfe_count}.{height_channels}" + else: + return f"{bed_channels}.0.{height_channels}" + else: + # Fallback to standard counting + return parse_channel_layout(channels, channel_layout) + + +def parse_atmos_layout(channel_layout): + """Parse channel layout to separate bed channels, LFE, and height channels.""" + if not channel_layout: + return 0, 0, 0 + + layout = channel_layout.upper() + + # Split by spaces to get individual channel identifiers + channels = layout.split() + bed_count = 0 + height_count = 0 + lfe_count = 0 + + for channel in channels: + channel = channel.strip() + if not channel: + continue + + # Check for LFE first + if 'LFE' in channel: + lfe_count += 1 + # Check for height channels + elif any(height_indicator in channel for height_indicator in [ + 'TFC', 'TFL', 'TFR', 'TBL', 'TBR', 'TBC', # Top channels + 'VHC', 'VHL', 'VHR', # Vertical height + 'CH', 'LH', 'RH', 'CHR', 'LHR', 'RHR', # Height variants + 'TSL', 'TSR', 'TLS', 'TRS' # Top surround + ]): + height_count += 1 + # Everything else is a bed channel + elif channel in ['L', 'R', 'C', 'FC', 'LS', 'RS', 'SL', 'SR', + 'BL', 'BR', 'BC', 'SB', 'FLC', 'FRC', 'LC', 'RC', + 'LW', 'RW', 'FLW', 'FRW', 'LSS', 'RSS', 'SIL', 'SIR', + 'LB', 'RB', 'CB', 'CS']: + bed_count += 1 + + return bed_count, lfe_count, height_count + + +def parse_channel_layout(channels, channel_layout): + """Parse standard channel layout to determine proper channel count notation.""" + layout = channel_layout.upper() + + # Count LFE channels + lfe_count = layout.count('LFE') + if lfe_count == 0 and 'LFE' in layout: + lfe_count = 1 + + # Handle multiple LFE channels (rare but possible) + if lfe_count > 1: + main_channels = channels - lfe_count + return f"{main_channels}.{lfe_count}" + elif lfe_count == 1: + return f"{channels - 1}.1" + else: + # No LFE detected + if channels <= 2: + return f"{channels}.0" + else: + # Check for specific mono layouts + if 'MONO' in layout or channels == 1: + return "1.0" + # Check for specific stereo layouts + elif channels == 2: + return "2.0" + # For multichannel without LFE, assume it's a .0 configuration + else: + return f"{channels}.0" + + +def fallback_channel_count(channels): + """Fallback channel counting when no layout information is available.""" + if channels <= 2: + return f"{channels}.0" + elif channels == 3: + return "2.1" # Assume L/R/LFE + elif channels == 4: + return "3.1" # Assume L/R/C/LFE + elif channels == 5: + return "4.1" # Assume L/R/Ls/Rs/LFE + elif channels == 6: + return "5.1" # Standard 5.1 + elif channels == 7: + return "6.1" # 6.1 or 7.0 + elif channels == 8: + return "7.1" # Standard 7.1 + else: + return f"{channels - 1}.1" async def get_audio_v2(mi, meta, bdinfo): extra = dual = "" has_commentary = False meta['bloated'] = False + bd_mi = None # Get formats if bdinfo is not None: # Disks - format_settings = "" - format = bdinfo.get('audio', [{}])[0].get('codec', '') - commercial = format additional = bdinfo.get('audio', [{}])[0].get('atmos_why_you_be_like_this', '') + if 'atmos' in additional.lower(): + common = COMMON(config) + bd_mi = await common.get_bdmv_mediainfo(meta) + try: + base_dir = meta.get('base_dir') + folder_id = meta.get('uuid') or meta.get('folder_id') + if base_dir and folder_id: + mi_path = os.path.join(base_dir, 'tmp', folder_id, 'MediaInfo.json') + if os.path.exists(mi_path): + with open(mi_path, 'r', encoding='utf-8') as f: + mi = json.load(f) + if meta.get('debug'): + console.print(f"[yellow]Loaded MediaInfo from file:[/yellow] {mi_path}") + except Exception: + if meta.get('debug'): + console.print("[red]Failed to load MediaInfo.json from tmp directory[/red]") + console.print(traceback.format_exc()) + bd_mi = None + else: + format_settings = "" + format = bdinfo.get('audio', [{}])[0].get('codec', '') + commercial = format + chan = bdinfo.get('audio', [{}])[0].get('channels', '') - # Channels - chan = bdinfo.get('audio', [{}])[0].get('channels', '') - else: + if bdinfo is None or bd_mi is not None: # Rips or BD with mediainfo tracks = mi.get('media', {}).get('track', []) audio_tracks = [t for t in tracks if t.get('@type') == "Audio"] first_audio_track = None if audio_tracks: - tracks_with_order = [t for t in audio_tracks if t.get('StreamOrder')] + tracks_with_order = [t for t in audio_tracks if t.get('StreamOrder') and not isinstance(t.get('StreamOrder'), dict)] if tracks_with_order: - first_audio_track = min(tracks_with_order, key=lambda x: int(x.get('StreamOrder', '999'))) + try: + first_audio_track = min(tracks_with_order, key=lambda x: int(str(x.get('StreamOrder', '999')))) + except (ValueError, TypeError): + first_audio_track = tracks_with_order[0] else: - tracks_with_id = [t for t in audio_tracks if t.get('ID')] + tracks_with_id = [t for t in audio_tracks if t.get('ID') and not isinstance(t.get('ID'), dict)] if tracks_with_id: - first_audio_track = min(tracks_with_id, key=lambda x: int(x.get('ID', '999'))) + try: + # Extract numeric part from ID (e.g., "128 (0x80)" -> 128) + first_audio_track = min(tracks_with_id, key=lambda x: int(re.search(r'\d+', str(x.get('ID', '999'))).group())) + except (ValueError, TypeError, AttributeError): + first_audio_track = tracks_with_id[0] else: first_audio_track = audio_tracks[0] track = first_audio_track if first_audio_track else {} format = track.get('Format', '') commercial = track.get('Format_Commercial', '') or track.get('Format_Commercial_IfAny', '') - if track.get('Language', '') == "zxx": meta['silent'] = True @@ -56,85 +250,84 @@ async def get_audio_v2(mi, meta, bdinfo): except Exception: channel_layout = '' - if channel_layout and "LFE" in channel_layout: - chan = f"{int(channels) - 1}.1" - elif channel_layout == "": - if int(channels) <= 2: - chan = f"{int(channels)}.0" - else: - chan = f"{int(channels) - 1}.1" - else: - chan = f"{channels}.0" + # Enhanced channel count determination based on MediaArea AudioChannelLayout + chan = determine_channel_count(channels, channel_layout, additional, format) if meta.get('dual_audio', False): dual = "Dual-Audio" else: # if not meta.get('original_language', '').startswith('en'): - eng, orig, non_en_non_commentary = False, False, False - orig_lang = meta.get('original_language', '').lower() - if meta['debug']: - console.print(f"DEBUG: Original Language: {orig_lang}") - try: - tracks = mi.get('media', {}).get('track', []) - has_commentary = False - has_coms = [t for t in tracks if "commentary" in (t.get('Title') or '').lower()] - if has_coms: - has_commentary = True - if meta['debug']: - console.print(f"DEBUG: Found {len(has_coms)} commentary tracks, has_commentary = {has_commentary}") - audio_tracks = [ - t for t in tracks - if t.get('@type') == "Audio" and "commentary" not in (t.get('Title') or '').lower() - ] - audio_language = None + if not bd_mi or not bdinfo: + eng, orig, non_en_non_commentary = False, False, False + orig_lang = meta.get('original_language', '').lower() if meta['debug']: - console.print(f"DEBUG: Audio Tracks (not commentary)= {len(audio_tracks)}") - for t in audio_tracks: - audio_language = t.get('Language', '') + console.print(f"DEBUG: Original Language: {orig_lang}") + try: + tracks = mi.get('media', {}).get('track', []) + has_commentary = False + has_compatibility = False + has_coms = [t for t in tracks if "commentary" in (t.get('Title') or '').lower()] + has_compat = [t for t in tracks if "compatibility" in (t.get('Title') or '').lower()] + if has_coms: + has_commentary = True + if has_compat: + has_compatibility = True if meta['debug']: - console.print(f"DEBUG: Audio Language = {audio_language}") - - if isinstance(audio_language, str): - if audio_language.startswith("en"): - if meta['debug']: - console.print(f"DEBUG: Found English audio track: {audio_language}") - eng = True - - if audio_language and "en" not in audio_language and audio_language.startswith(orig_lang): - if meta['debug']: - console.print(f"DEBUG: Found original language audio track: {audio_language}") - orig = True - - variants = ['zh', 'cn', 'cmn', 'no', 'nb'] - if any(audio_language.startswith(var) for var in variants) and any(orig_lang.startswith(var) for var in variants): - if meta['debug']: - console.print(f"DEBUG: Found original language audio track with variant: {audio_language}") - orig = True - - if isinstance(audio_language, str): - audio_language = audio_language.strip().lower() - if audio_language and not audio_language.startswith(orig_lang) and not audio_language.startswith("en"): - non_en_non_commentary = True - console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") - time.sleep(5) - - if ( - orig_lang == "en" - and eng - and non_en_non_commentary - ): - console.print("[bold red]This release is English original, has English audio, but also has other non-English audio tracks (not commentary). This may be considered bloated.[/bold red]") - meta['bloated'] = True - time.sleep(5) - - if ((eng and (orig or non_en_non_commentary)) or (orig and non_en_non_commentary)) and len(audio_tracks) > 1 and not meta.get('no_dual', False): - dual = "Dual-Audio" - meta['dual_audio'] = True - elif eng and not orig and orig_lang not in ['zxx', 'xx', 'en', None] and not meta.get('no_dub', False): - dual = "Dubbed" - except Exception: - console.print(traceback.format_exc()) - pass + console.print(f"DEBUG: Found {len(has_coms)} commentary tracks, has_commentary = {has_commentary}") + console.print(f"DEBUG: Found {len(has_compat)} compatibility tracks, has_compatibility = {has_compatibility}") + audio_tracks = [ + t for t in tracks + if t.get('@type') == "Audio" and "commentary" not in (t.get('Title') or '').lower() and "compatibility" not in (t.get('Title') or '').lower() + ] + audio_language = None + if meta['debug']: + console.print(f"DEBUG: Audio Tracks (not commentary)= {len(audio_tracks)}") + for t in audio_tracks: + audio_language = t.get('Language', '') + if meta['debug']: + console.print(f"DEBUG: Audio Language = {audio_language}") + + if isinstance(audio_language, str): + if audio_language.startswith("en"): + if meta['debug']: + console.print(f"DEBUG: Found English audio track: {audio_language}") + eng = True + + if audio_language and "en" not in audio_language and audio_language.startswith(orig_lang): + if meta['debug']: + console.print(f"DEBUG: Found original language audio track: {audio_language}") + orig = True + + variants = ['zh', 'cn', 'cmn', 'no', 'nb'] + if any(audio_language.startswith(var) for var in variants) and any(orig_lang.startswith(var) for var in variants): + if meta['debug']: + console.print(f"DEBUG: Found original language audio track with variant: {audio_language}") + orig = True + + if isinstance(audio_language, str): + audio_language = audio_language.strip().lower() + if audio_language and not audio_language.startswith(orig_lang) and not audio_language.startswith("en") and not audio_language.startswith("zx"): + non_en_non_commentary = True + console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") + time.sleep(5) + + if ( + orig_lang == "en" + and eng + and non_en_non_commentary + ): + console.print("[bold red]This release is English original, has English audio, but also has other non-English audio tracks (not commentary). This may be considered bloated.[/bold red]") + meta['bloated'] = True + time.sleep(5) + + if ((eng and (orig or non_en_non_commentary)) or (orig and non_en_non_commentary)) and len(audio_tracks) > 1 and not meta.get('no_dual', False): + dual = "Dual-Audio" + meta['dual_audio'] = True + elif eng and not orig and orig_lang not in ['zxx', 'xx', 'en', None] and not meta.get('no_dub', False): + dual = "Dubbed" + except Exception: + console.print(traceback.format_exc()) + pass # Convert commercial name to naming conventions audio = { @@ -213,7 +406,6 @@ async def get_audio_v2(mi, meta, bdinfo): if format.startswith("DTS"): if additional and additional.endswith("X"): codec = "DTS:X" - chan = f"{int(channels) - 1}.1" if format == "MPEG Audio": if format_profile == "Layer 2": diff --git a/src/bbcode.py b/src/bbcode.py index 8a1e07b3d..4e737f61c 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import re import html import urllib.parse @@ -127,6 +128,8 @@ def clean_hdb_description(self, description): desc = desc.replace(f"[url={web_url}][img]{img_url}[/img][/url]", '') description = desc.strip() + if self.is_only_bbcode(description): + return "", imagelist return description, imagelist def clean_bhd_description(self, description, meta): @@ -200,6 +203,9 @@ def clean_bhd_description(self, description, meta): else: description = "" + if self.is_only_bbcode(description): + return "", imagelist + return description, imagelist def clean_ptp_description(self, desc, is_disc): @@ -251,7 +257,7 @@ def clean_ptp_description(self, desc, is_disc): comps.extend(hides) nocomp = desc - # Exclude URLs from exculed array fom `nocomp` + # Exclude URLs from excluded array fom `nocomp` for url in excluded_urls: nocomp = nocomp.replace(url, '') @@ -401,7 +407,8 @@ def restore_links(desc, links): desc = desc.strip('\n') if desc.replace('\n', '').strip() == '': - console.print("[yellow]Description is empty after cleaning.") + return "", imagelist + if self.is_only_bbcode(desc): return "", imagelist return desc, imagelist @@ -414,17 +421,18 @@ def clean_unit3d_description(self, desc, site): # Remove links to site site_netloc = urllib.parse.urlparse(site).netloc - site_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" + site_domain = site_netloc.split('.')[0] + site_regex = rf"(\[url[\=\]]https?:\/\/{site_domain}\.[^\/\]]+/[^\]]+])([^\[]+)(\[\/url\])?" site_url_tags = re.findall(site_regex, desc) if site_url_tags: for site_url_tag in site_url_tags: site_url_tag = ''.join(site_url_tag) - url_tag_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" + url_tag_regex = rf"(\[url[\=\]]https?:\/\/{site_domain}\.[^\/\]]+[^\]]+])" url_tag_removed = re.sub(url_tag_regex, "", site_url_tag) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(site_url_tag, url_tag_removed) - desc = desc.replace(site_netloc, site_netloc.split('.')[0]) + desc = desc.replace(site_netloc, site_domain) # Temporarily hide spoiler tags spoilers = re.findall(r"\[spoiler[\s\S]*?\[\/spoiler\]", desc) @@ -435,26 +443,38 @@ def clean_unit3d_description(self, desc, site): desc = desc.replace(spoilers[i], f"SPOILER_PLACEHOLDER-{i} ") spoiler_placeholders.append(spoilers[i]) - # Get Images from [img] tags and remove them from the description + # Get Images from [img] tags, checking if they're wrapped in [url] tags imagelist = [] + + # First, find images wrapped in URL tags: [url=web_url][img]img_url[/img][/url] + url_img_pattern = r"\[url=(https?://[^\]]+)\]\[img[^\]]*\](.*?)\[/img\]\[/url\]" + url_img_matches = re.findall(url_img_pattern, desc, flags=re.IGNORECASE) + for web_url, img_url in url_img_matches: + image_dict = { + 'img_url': img_url.strip(), + 'raw_url': img_url.strip(), + 'web_url': web_url.strip(), + } + imagelist.append(image_dict) + # Remove the entire [url=...][img]...[/img][/url] structure + desc = re.sub(rf"\[url={re.escape(web_url)}\]\[img[^\]]*\]{re.escape(img_url)}\[/img\]\[/url\]", '', desc, flags=re.IGNORECASE) + + # Then find standalone [img] tags (not wrapped in URL) img_tags = re.findall(r"\[img[^\]]*\](.*?)\[/img\]", desc, re.IGNORECASE) if img_tags: for img_url in img_tags: - image_dict = { - 'img_url': img_url.strip(), - 'raw_url': img_url.strip(), - 'web_url': img_url.strip(), - } - imagelist.append(image_dict) - # Remove the [img] tag and its contents from the description + img_url = img_url.strip() + # Check if this image was already added (wrapped in URL) + if not any(img['img_url'] == img_url for img in imagelist): + image_dict = { + 'img_url': img_url, + 'raw_url': img_url, + 'web_url': img_url, + } + imagelist.append(image_dict) + # Remove the [img] tag desc = re.sub(rf"\[img[^\]]*\]{re.escape(img_url)}\[/img\]", '', desc, flags=re.IGNORECASE) - # Now, remove matching URLs from [URL] tags - for img in imagelist: - img_url = re.escape(img['img_url']) - desc = re.sub(rf"\[URL={img_url}\]\[/URL\]", '', desc, flags=re.IGNORECASE) - desc = re.sub(rf"\[URL={img_url}\]\[img[^\]]*\]{img_url}\[/img\]\[/URL\]", '', desc, flags=re.IGNORECASE) - # Filter out bot images from imagelist bot_image_urls = [ "https://blutopia.xyz/favicon.ico", # Example bot image URL @@ -495,28 +515,47 @@ def clean_unit3d_description(self, desc, site): \sAuto\sUploader\[\/b\]\s*\[\/center\]| \[center\]\[url=https:\/\/github\.com\/z-ink\/uploadrr\]\[img=\d+\]https:\/\/i\.ibb\.co\/2NVWb0c\/uploadrr\.webp\[\/img\]\[\/url\]\[\/center\]| \n\[center\]\[url=https:\/\/github\.com\/edge20200\/Only-Uploader\]Powered\sby\s - Only-Uploader\[\/url\]\[\/center\] + Only-Uploader\[\/url\]\[\/center\]| + \[center\]\[url=\/torrents\?perPage=\d+&name=[^\]]*\]\[\/url\]\[\/center\] """ desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE | re.VERBOSE) - desc = re.sub(r"\[center\].*Created by (L4G|Audionut)('?s)? Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) + # Remove Aither internal signature + desc = re.sub(r"\[center\]\[b\]\[size=\d+\]🖌️\[/size\]\[/b\][\s\S]*?This is an internal release which was first released exclusively on Aither\.[\s\S]*?🍻 Cheers to all the Aither.*?\[/center\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[center\].*Created by.*Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[right\].*Created by.*Upload Assistant.*\[\/right\]", "", desc, flags=re.IGNORECASE) # Remove leftover [img] or [URL] tags in the description desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) - desc = re.sub(r"\[URL=[\s\S]*?\]\[\/URL\]", "", desc, flags=re.IGNORECASE) + # desc = re.sub(r"\[URL=[\s\S]*?\]\[\/URL\]", "", desc, flags=re.IGNORECASE) # Strip trailing whitespace and newlines: desc = desc.rstrip() if desc.replace('\n', '') == '': return "", imagelist + if self.is_only_bbcode(desc): + return "", imagelist return desc, imagelist + def is_only_bbcode(self, desc): + # Remove all BBCode tags + text = re.sub(r"\[/?[a-zA-Z0-9]+(?:=[^\]]*)?\]", "", desc) + # Remove whitespace and newlines + text = text.strip() + # If nothing left, it's only BBCode + return not text + def convert_pre_to_code(self, desc): desc = desc.replace('[pre]', '[code]') desc = desc.replace('[/pre]', '[/code]') return desc + def convert_code_to_pre(self, desc): + desc = desc.replace('[code]', '[pre]') + desc = desc.replace('[/code]', '[/pre]') + return desc + def convert_hide_to_spoiler(self, desc): desc = desc.replace('[hide', '[spoiler') desc = desc.replace('[/hide]', '[/spoiler]') @@ -527,10 +566,26 @@ def convert_spoiler_to_hide(self, desc): desc = desc.replace('[/spoiler]', '[/hide]') return desc + def remove_hide(self, desc): + desc = desc.replace('[hide]', '').replace('[/hide]', '') + return desc + + def convert_named_spoiler_to_named_hide(self, desc): + ''' + Converts [spoiler=Name] to [hide=Name] + ''' + desc = re.sub(r"\[spoiler=([^]]+)]", r"[hide=\1]", desc, flags=re.IGNORECASE) + desc = desc.replace('[/spoiler]', '[/hide]') + return desc + def remove_spoiler(self, desc): desc = re.sub(r"\[\/?spoiler[\s\S]*?\]", "", desc, flags=re.IGNORECASE) return desc + def convert_named_spoiler_to_normal_spoiler(self, desc): + desc = re.sub(r'(\[spoiler=[^]]+])', '[spoiler]', desc, flags=re.IGNORECASE) + return desc + def convert_spoiler_to_code(self, desc): desc = desc.replace('[spoiler', '[code') desc = desc.replace('[/spoiler]', '[/code]') @@ -541,6 +596,49 @@ def convert_code_to_quote(self, desc): desc = desc.replace('[/code]', '[/quote]') return desc + def remove_img_resize(self, desc): + ''' + Converts [img=number] or any other parameters to just [img] + ''' + desc = re.sub(r'\[img(?:[^\]]*)\]', '[img]', desc, flags=re.IGNORECASE) + return desc + + def remove_extra_lines(self, desc): + ''' + Removes more than 2 consecutive newlines + ''' + desc = re.sub(r'\n{3,}', '\n\n', desc) + return desc + + def convert_to_align(self, desc): + ''' + Converts [right], [left], [center] to [align=right], [align=left], [align=center] + ''' + desc = re.sub(r'\[(right|center|left)\]', lambda m: f"[align={m.group(1)}]", desc) + desc = re.sub(r'\[/(right|center|left)\]', "[/align]", desc) + return desc + + def remove_sup(self, desc): + ''' + Removes [sup] tags + ''' + desc = desc.replace('[sup]', '').replace('[/sup]', '') + return desc + + def remove_sub(self, desc): + ''' + Removes [sub] tags + ''' + desc = desc.replace('[sub]', '').replace('[/sub]', '') + return desc + + def remove_list(self, desc): + ''' + Removes [list] tags + ''' + desc = desc.replace('[list]', '').replace('[/list]', '') + return desc + def convert_comparison_to_collapse(self, desc, max_width): comparisons = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) for comp in comparisons: diff --git a/src/bluray_com.py b/src/bluray_com.py index f152c6a00..72aa72544 100644 --- a/src/bluray_com.py +++ b/src/bluray_com.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import httpx import random import asyncio @@ -31,7 +32,8 @@ async def search_bluray(meta): console.print(f"[yellow]Error reading cached file: {str(e)}[/yellow]") # If we're here, we need to make a request - console.print(f"[dim]Search URL: {url}[/dim]") + if meta['debug']: + console.print(f"[dim]Search URL: {url}[/dim]") headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", @@ -131,10 +133,9 @@ def extract_bluray_links(html_content): results = [] try: - soup = BeautifulSoup(html_content, 'html.parser') + soup = BeautifulSoup(html_content, 'lxml') movie_divs = soup.select('div.figure') if not movie_divs: - console.print("[red]No movie divs found in the search results[/red]") return None for i, movie_div in enumerate(movie_divs, 1): @@ -177,12 +178,15 @@ async def extract_bluray_release_info(html_content, meta): is_3d = meta.get('3D', '') == 'yes' resolution = meta.get('resolution', '').lower() is_4k = '2160p' in resolution or '4k' in resolution - release_type = "4K" if is_4k else "3D" if is_3d else "BD" + is_dvd = meta['is_disc'] == "DVD" + release_type = "4K" if is_4k else "3D" if is_3d else "DVD" if is_dvd else "BD" if is_3d: console.print("[blue]Looking for 3D Blu-ray releases[/blue]") elif is_4k: console.print("[blue]Looking for 4K/UHD Blu-ray releases[/blue]") + elif is_dvd: + console.print("[blue]Looking for DVD releases[/blue]") else: console.print("[blue]Looking for standard Blu-ray releases[/blue]") @@ -195,14 +199,20 @@ async def extract_bluray_release_info(html_content, meta): console.print(f"[dim]Could not save debug file: {str(e)}[/dim]") try: - soup = BeautifulSoup(html_content, 'html.parser') + dvd_sections = None + soup = BeautifulSoup(html_content, 'lxml') + if is_dvd: + dvd_sections = soup.find_all('h3', string=lambda s: s and ('DVD Editions' in s)) + selected_sections = dvd_sections + else: + bluray_sections = soup.find_all('h3', string=lambda s: s and ('Blu-ray Editions' in s or '4K Blu-ray Editions' in s or '3D Blu-ray Editions' in s)) + selected_sections = bluray_sections - bluray_sections = soup.find_all('h3', string=lambda s: s and ('Blu-ray Editions' in s or '4K Blu-ray Editions' in s or '3D Blu-ray Editions' in s)) if meta['debug']: - console.print(f"[blue]Found {len(bluray_sections)} Blu-ray section(s)[/blue]") - + release_type_debug = "DVD" if is_dvd else "Blu-ray" + console.print(f"[blue]Found {len(selected_sections)} {release_type_debug} section(s)[/blue]") filtered_sections = [] - for section in bluray_sections: + for section in selected_sections: section_title = section.text # Check if this section matches what we're looking for @@ -214,6 +224,10 @@ async def extract_bluray_release_info(html_content, meta): filtered_sections.append(section) if meta['debug']: console.print(f"[green]Including 4K section: {section_title}[/green]") + elif is_dvd and 'DVD Editions' in section_title: + filtered_sections.append(section) + if meta['debug']: + console.print(f"[green]Including DVD section: {section_title}[/green]") elif not is_3d and not is_4k and 'Blu-ray Editions' in section_title and '3D Blu-ray Editions' not in section_title and '4K Blu-ray Editions' not in section_title: filtered_sections.append(section) if meta['debug']: @@ -222,18 +236,18 @@ async def extract_bluray_release_info(html_content, meta): # If no sections match our filter criteria, use all sections if not filtered_sections: console.print("[yellow]No sections match exact media type, using all available sections[/yellow]") - filtered_sections = bluray_sections + filtered_sections = selected_sections for section_idx, section in enumerate(filtered_sections, 1): parent_tr = section.find_parent('tr') if not parent_tr: - console.print("[red]Could not find parent tr for Blu-ray section[/red]") + console.print(f"[red]Could not find parent tr for {release_type_debug} section[/red]") continue release_links = [] current = section.find_next() while current and (not current.name == 'h3'): - if current.name == 'a' and current.has_attr('href') and 'blu-ray.com/movies/' in current['href']: + if current.name == 'a' and current.has_attr('href') and ('blu-ray.com/movies/' in current['href'] or 'blu-ray.com/dvd/' in current['href']): release_links.append(current) current = current.find_next() @@ -248,7 +262,7 @@ async def extract_bluray_release_info(html_content, meta): publisher_tag = link.find_next('small', style=lambda s: s and 'color: #999999' in s) publisher = publisher_tag.text.strip() if publisher_tag else "Unknown" - release_id_match = re.search(r'blu-ray\.com/movies/.*?/(\d+)/', release_url) + release_id_match = re.search(r'blu-ray\.com/(movies|dvd)/.*?/(\d+)/', release_url) if release_id_match: release_id = release_id_match.group(1) if meta['debug']: @@ -305,7 +319,8 @@ async def get_bluray_releases(meta): movie_links = extract_bluray_links(html_content) if not movie_links: - console.print(f"[red]No movies found for IMDB ID: tt{meta['imdb_id']:07d}[/red]") + if meta['debug']: + console.print(f"[red]No movies found for IMDB ID: tt{meta['imdb_id']:07d}[/red]") return [] matching_releases = [] @@ -424,7 +439,7 @@ async def get_bluray_releases(meta): console.print("[yellow]===== BluRay.com search results summary =====[/yellow]") if matching_releases: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): for idx, release in enumerate(matching_releases, 1): console.print(f"[green]{idx}. {release['movie_title']} ({release['movie_year']}):[/green]") console.print(f" [blue]Title: {release['title']}[/blue]") @@ -433,11 +448,11 @@ async def get_bluray_releases(meta): console.print(f" [blue]Price: {release['price']}[/blue]") console.print(f" [dim]URL: {release['url']}[/dim]") - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): console.print() - console.print("[green]Blu-ray Release Selection") + console.print("[green]Release Selection") console.print("[green]=======================================") - console.print("[dim]Please select a Blu-ray release to use for region and distributor information:") + console.print("[dim]Please select a release to use for region and distributor information:") console.print("[dim]Enter release number, 'a' for all releases, or 'n' to skip") console.print("[dim]Selecting all releases will search every release for more information...") console.print("[dim]More releases will require more time to process") @@ -512,7 +527,7 @@ async def get_bluray_releases(meta): async def parse_release_details(response_text, release, meta): try: - soup = BeautifulSoup(response_text, 'html.parser') + soup = BeautifulSoup(response_text, 'lxml') specs_td = soup.find('td', width="228px", style=lambda s: s and 'font-size: 12px' in s) if not specs_td: @@ -553,7 +568,7 @@ async def parse_release_details(response_text, release, meta): if audio_div: audio_html = str(audio_div) audio_html = re.sub(r'', '\n', audio_html) - audio_soup = BeautifulSoup(audio_html, 'html.parser') + audio_soup = BeautifulSoup(audio_html, 'lxml') raw_text = audio_soup.get_text() raw_lines = [line.strip() for line in raw_text.split('\n') if line.strip() and 'less' not in line] @@ -762,7 +777,7 @@ async def download_cover_images(meta): def extract_cover_images(html_content): cover_images = {} - soup = BeautifulSoup(html_content, 'html.parser') + soup = BeautifulSoup(html_content, 'lxml') scripts = soup.find_all('script', string=lambda s: s and "$(document).ready" in s and "append(' 0: + console.print(f"[green]{operation_name} succeeded on attempt {attempt + 1}") + return result + except asyncio.TimeoutError: + if attempt < max_retries: + console.print(f"[yellow]{operation_name} timed out after {timeout}s (attempt {attempt + 1}/{max_retries + 1}), retrying...") + await asyncio.sleep(1) # Brief pause before retry + else: + console.print(f"[bold red]{operation_name} failed after {max_retries + 1} attempts (final timeout: {timeout}s)") + raise # Re-raise the TimeoutError so caller can handle it + + async def init_qbittorrent_client(self, client): + # Creates and logs into a qbittorrent client, with caching to avoid redundant logins + # If login fails, returns None + client_key = (client['qbit_url'], client['qbit_port'], client['qbit_user']) + async with qbittorrent_locks[client_key]: + # We lock to further prevent concurrent logins for the same client. If two clients try to init at the same time, if the first one succeeds, the second one can use the cached client. + potential_cached_client = qbittorrent_cached_clients.get(client_key) + if potential_cached_client is not None: + return potential_cached_client + + qbt_client = qbittorrentapi.Client( + host=client['qbit_url'], + port=client['qbit_port'], + username=client['qbit_user'], + password=client['qbit_pass'], + VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) + ) + try: + await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.auth_log_in), + "qBittorrent login" + ) + except asyncio.TimeoutError: + console.print("[bold red]Connection to qBittorrent timed out after retries") + return None + except qbittorrentapi.LoginFailed: + console.print("[bold red]Failed to login to qBittorrent - incorrect credentials") + return None + except qbittorrentapi.APIConnectionError: + console.print("[bold red]Failed to connect to qBittorrent - check host/port") + return None + else: + qbittorrent_cached_clients[client_key] = qbt_client + return qbt_client + + async def add_to_client(self, meta, tracker, cross=False): + if cross: + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}_cross].torrent" + else: + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent" if meta.get('no_seed', False) is True: console.print("[bold red]--no-seed was passed, so the torrent will not be added to the client") console.print("[bold yellow]Add torrent manually to the client") @@ -35,53 +98,137 @@ async def add_to_client(self, meta, tracker): torrent = Torrent.read(torrent_path) else: return - if meta.get('client', None) is None: - default_torrent_client = self.config['DEFAULT']['default_torrent_client'] - else: - default_torrent_client = meta['client'] - if meta.get('client', None) == 'none': + + inject_clients = [] + if meta.get('client') and meta.get('client') != 'none': + inject_clients = [meta['client']] + elif meta.get('client') == 'none': return - if default_torrent_client == "none": + else: + inject_clients_config = self.config['DEFAULT'].get('injecting_client_list') + if isinstance(inject_clients_config, str) and inject_clients_config.strip(): + inject_clients = [inject_clients_config] + elif isinstance(inject_clients_config, list): + # Filter out empty strings and whitespace-only strings + inject_clients = [c for c in inject_clients_config if c and str(c).strip()] + else: + inject_clients = [] + + if not inject_clients: + default_client = self.config['DEFAULT'].get('default_torrent_client') + if default_client and default_client != 'none': + inject_clients = [default_client] + + if not inject_clients: return - client = self.config['TORRENT_CLIENTS'][default_torrent_client] - torrent_client = client['torrent_client'] - local_path, remote_path = await self.remote_path_map(meta) + for client_name in inject_clients: + if client_name == "none" or not client_name: + continue - if meta['debug']: - console.print(f"[bold green]Adding to {torrent_client}") - if torrent_client.lower() == "rtorrent": - self.rtorrent(meta['path'], torrent_path, torrent, meta, local_path, remote_path, client, tracker) - elif torrent_client == "qbit": - await self.qbittorrent(meta['path'], torrent, local_path, remote_path, client, meta['is_disc'], meta['filelist'], meta, tracker) - elif torrent_client.lower() == "deluge": - if meta['type'] == "DISC": - path = os.path.dirname(meta['path']) # noqa F841 - self.deluge(meta['path'], torrent_path, torrent, local_path, remote_path, client, meta) - elif torrent_client.lower() == "transmission": - self.transmission(meta['path'], torrent, local_path, remote_path, client, meta) - elif torrent_client.lower() == "watch": - shutil.copy(torrent_path, client['watch_folder']) + if client_name not in self.config['TORRENT_CLIENTS']: + console.print(f"[bold red]Torrent client '{client_name}' not found in config.") + continue + + client = self.config['TORRENT_CLIENTS'][client_name] + torrent_client = client['torrent_client'] + + # Must pass client_name to remote_path_map + local_path, remote_path = await self.remote_path_map(meta, client_name) + + if meta['debug']: + console.print(f"[bold green]Adding to {client_name} ({torrent_client})") + + try: + if torrent_client.lower() == "rtorrent": + self.rtorrent(meta['path'], torrent_path, torrent, meta, local_path, remote_path, client, tracker) + elif torrent_client == "qbit": + await self.qbittorrent(meta['path'], torrent, local_path, remote_path, client, meta['is_disc'], meta['filelist'], meta, tracker, cross) + elif torrent_client.lower() == "deluge": + if meta['type'] == "DISC": + path = os.path.dirname(meta['path']) # noqa F841 + self.deluge(meta['path'], torrent_path, torrent, local_path, remote_path, client, meta) + elif torrent_client.lower() == "transmission": + self.transmission(meta['path'], torrent, local_path, remote_path, client, meta) + elif torrent_client.lower() == "watch": + shutil.copy(torrent_path, client['watch_folder']) + except Exception as e: + console.print(f"[bold red]Failed to add torrent to {client_name}: {e}") return async def find_existing_torrent(self, meta): - if meta.get('client', None) is None: - default_torrent_client = self.config['DEFAULT']['default_torrent_client'] - else: - default_torrent_client = meta['client'] - if meta.get('client', None) == 'none' or default_torrent_client == 'none': - return None - - client = self.config['TORRENT_CLIENTS'][default_torrent_client] - torrent_storage_dir = client.get('torrent_storage_dir') - torrent_client = client.get('torrent_client', '').lower() + # Determine piece size preferences mtv_config = self.config['TRACKERS'].get('MTV') + piece_limit = self.config['DEFAULT'].get('prefer_max_16_torrent', False) + mtv_torrent = False if isinstance(mtv_config, dict): - prefer_small_pieces = mtv_config.get('prefer_mtv_torrent', False) + mtv_torrent = mtv_config.get('prefer_mtv_torrent', False) + prefer_small_pieces = mtv_torrent else: - prefer_small_pieces = False + if piece_limit: + prefer_small_pieces = True + else: + prefer_small_pieces = False best_match = None # Track the best match for fallback if prefer_small_pieces is enabled + default_torrent_client = self.config['DEFAULT']['default_torrent_client'] + + if meta.get('client') and meta['client'] != 'none': + clients_to_search = [meta['client']] + else: + searching_list = self.config['DEFAULT'].get('searching_client_list', []) + + if isinstance(searching_list, list) and len(searching_list) > 0: + clients_to_search = [c for c in searching_list if c and c != 'none'] + else: + clients_to_search = [] + + if not clients_to_search: + if default_torrent_client and default_torrent_client != 'none': + clients_to_search = [default_torrent_client] + if meta['debug']: + console.print(f"[cyan]DEBUG: Falling back to default_torrent_client: {default_torrent_client}[/cyan]") + else: + console.print("[yellow]No clients configured for searching...[/yellow]") + return None + + for client_name in clients_to_search: + if client_name not in self.config['TORRENT_CLIENTS']: + console.print(f"[yellow]Client '{client_name}' not found in TORRENT_CLIENTS config, skipping...") + continue + + result = await self._search_single_client_for_torrent( + meta, client_name, prefer_small_pieces, mtv_torrent, piece_limit, best_match + ) + + if result: + if isinstance(result, dict): + # Got a valid torrent but not ideal piece size + best_match = result + # If prefer_small_pieces is False, we don't care about piece size optimization + # so stop searching after finding the first valid torrent + if not prefer_small_pieces: + console.print(f"[green]Found valid torrent in client '{client_name}', stopping search[/green]") + return best_match['torrent_path'] + else: + # Got a path - this means we found a torrent with ideal piece size + console.print(f"[green]Found valid torrent with preferred piece size in client '{client_name}', stopping search[/green]") + return result + + if prefer_small_pieces and best_match: + console.print(f"[yellow]Using best match torrent with hash: [bold yellow]{best_match['torrenthash']}[/bold yellow]") + return best_match['torrent_path'] + + console.print("[bold yellow]No Valid .torrent found") + return None + + async def _search_single_client_for_torrent(self, meta, client_name, prefer_small_pieces, mtv_torrent, piece_limit, best_match): + """Search a single client for an existing torrent by hash or via API search (qbit only).""" + + client = self.config['TORRENT_CLIENTS'][client_name] + torrent_client = client.get('torrent_client', '').lower() + torrent_storage_dir = client.get('torrent_storage_dir') + # Iterate through pre-specified hashes for hash_key in ['torrenthash', 'ext_torrenthash']: hash_value = meta.get(hash_key) @@ -92,21 +239,39 @@ async def find_existing_torrent(self, meta): if torrent_storage_dir: torrent_path = os.path.join(torrent_storage_dir, f"{hash_value}.torrent") else: - # Fetch from qBittorrent since we don't have torrent_storage_dir - console.print(f"[yellow]Fetching .torrent file from qBittorrent for hash: {hash_value}") + if torrent_client != 'qbit': + return None try: - qbt_client = qbittorrentapi.Client( - host=client['qbit_url'], - port=client['qbit_port'], - username=client['qbit_user'], - password=client['qbit_pass'], - VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) - ) - qbt_client.auth_log_in() + proxy_url = client.get('qui_proxy_url') + if proxy_url: + qbt_proxy_url = proxy_url.rstrip('/') + async with aiohttp.ClientSession() as session: + try: + async with session.post(f"{qbt_proxy_url}/api/v2/torrents/export", + data={'hash': hash_value}) as response: + if response.status == 200: + torrent_file_content = await response.read() + else: + console.print(f"[red]Failed to export torrent via proxy: {response.status}") + continue + except Exception as e: + console.print(f"[red]Error exporting torrent via proxy: {e}") + continue + else: + potential_qbt_client = await self.init_qbittorrent_client(client) + if not potential_qbt_client: + continue + else: + qbt_client = potential_qbt_client - # Retrieve the .torrent file - torrent_file_content = qbt_client.torrents_export(torrent_hash=hash_value) + try: + torrent_file_content = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_export, torrent_hash=hash_value), + f"Export torrent {hash_value}" + ) + except (asyncio.TimeoutError, qbittorrentapi.APIError): + continue if not torrent_file_content: console.print(f"[bold red]qBittorrent returned an empty response for hash {hash_value}") continue # Skip to the next hash @@ -125,29 +290,124 @@ async def find_existing_torrent(self, meta): continue # Validate the .torrent file - valid, resolved_path = await self.is_valid_torrent(meta, torrent_path, hash_value, torrent_client, client, print_err=True) + valid, resolved_path = await self.is_valid_torrent(meta, torrent_path, hash_value, torrent_client, client_name, print_err=True) if valid: - console.print(f"[green]Found a valid torrent: [bold yellow]{hash_value}") return resolved_path # Search the client if no pre-specified hash matches if torrent_client == 'qbit' and client.get('enable_search'): try: - found_hash = await self.search_qbit_for_torrent(meta, client) + qbt_client, qbt_session, proxy_url = None, None, None + + proxy_url = client.get('qui_proxy_url') + + if proxy_url: + qbt_session = aiohttp.ClientSession( + timeout=aiohttp.ClientTimeout(total=10), + connector=aiohttp.TCPConnector(verify_ssl=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + ) + else: + qbt_client = await self.init_qbittorrent_client(client) + + found_hash = await self.search_qbit_for_torrent(meta, client, qbt_client, qbt_session, proxy_url) + + # Clean up session if we created one + if qbt_session: + await qbt_session.close() + except KeyboardInterrupt: console.print("[bold red]Search cancelled by user") found_hash = None + if qbt_session: + await qbt_session.close() + except asyncio.TimeoutError: + if qbt_session: + await qbt_session.close() + raise except Exception as e: console.print(f"[bold red]Error searching qBittorrent: {e}") found_hash = None + if qbt_session: + await qbt_session.close() if found_hash: extracted_torrent_dir = os.path.join(meta.get('base_dir', ''), "tmp", meta.get('uuid', '')) - found_torrent_path = os.path.join(torrent_storage_dir, f"{found_hash}.torrent") if torrent_storage_dir else os.path.join(extracted_torrent_dir, f"{found_hash}.torrent") - valid, resolved_path = await self.is_valid_torrent( - meta, found_torrent_path, found_hash, torrent_client, client, print_err=False - ) + if torrent_storage_dir: + found_torrent_path = os.path.join(torrent_storage_dir, f"{found_hash}.torrent") + else: + found_torrent_path = os.path.join(extracted_torrent_dir, f"{found_hash}.torrent") + + if not os.path.exists(found_torrent_path): + console.print(f"[yellow]Exporting .torrent file from qBittorrent for hash: {found_hash}[/yellow]") + + try: + proxy_url = client.get('qui_proxy_url') + if proxy_url: + qbt_proxy_url = proxy_url.rstrip('/') + async with aiohttp.ClientSession() as session: + try: + async with session.post(f"{qbt_proxy_url}/api/v2/torrents/export", + data={'hash': found_hash}) as response: + if response.status == 200: + torrent_file_content = await response.read() + else: + console.print(f"[red]Failed to export torrent via proxy: {response.status}") + found_hash = None + except Exception as e: + console.print(f"[red]Error exporting torrent via proxy: {e}") + found_hash = None + else: + # Reuse or create qbt_client if needed + if qbt_client is None: + qbt_client = qbittorrentapi.Client( + host=client['qbit_url'], + port=client['qbit_port'], + username=client['qbit_user'], + password=client['qbit_pass'], + VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) + ) + try: + await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.auth_log_in), + "qBittorrent login" + ) + except (asyncio.TimeoutError, qbittorrentapi.LoginFailed, qbittorrentapi.APIConnectionError) as e: + console.print(f"[bold red]Failed to connect to qBittorrent for export: {e}") + found_hash = None + + if found_hash: # Only proceed if we still have a hash + try: + torrent_file_content = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_export, torrent_hash=found_hash), + f"Export torrent {found_hash}" + ) + except (asyncio.TimeoutError, qbittorrentapi.APIError) as e: + console.print(f"[red]Error exporting torrent: {e}") + + if found_hash: # Only proceed if export succeeded + if not torrent_file_content: + found_hash = None + else: + os.makedirs(extracted_torrent_dir, exist_ok=True) + with open(found_torrent_path, "wb") as f: + f.write(torrent_file_content) + console.print(f"[green]Successfully saved .torrent file: {found_torrent_path}") + except Exception as e: + console.print(f"[bold red]Unexpected error fetching .torrent from qBittorrent: {e}") + console.print("[cyan]DEBUG: Skipping found_hash due to unexpected error[/cyan]") + found_hash = None + else: + console.print(f"[cyan]DEBUG: .torrent file already exists at {found_torrent_path}[/cyan]") + + # Only validate if we still have a hash (export succeeded or file already existed) + if found_hash: + valid, resolved_path = await self.is_valid_torrent( + meta, found_torrent_path, found_hash, torrent_client, client_name, print_err=False + ) + else: + valid = False + console.print("[cyan]DEBUG: Skipping validation because found_hash is None[/cyan]") if valid: torrent = Torrent.read(resolved_path) @@ -159,21 +419,19 @@ async def find_existing_torrent(self, meta): return resolved_path # Track best match for small pieces - if piece_size <= 8388608: + if piece_size <= 8388608 and mtv_torrent: console.print(f"[green]Found a valid torrent with preferred piece size from client search: [bold yellow]{found_hash}") return resolved_path + if piece_size < 16777216 and piece_limit: # 16 MiB + console.print(f"[green]Found a valid torrent with piece size under 16 MiB from client search: [bold yellow]{found_hash}") + return resolved_path + if best_match is None or piece_size < best_match['piece_size']: best_match = {'torrenthash': found_hash, 'torrent_path': resolved_path, 'piece_size': piece_size} console.print(f"[yellow]Storing valid torrent from client search as best match: [bold yellow]{found_hash}") - # Use best match if no preferred torrent found - if prefer_small_pieces and best_match: - console.print(f"[yellow]Using best match torrent with hash: [bold yellow]{best_match['torrenthash']}[/bold yellow]") - return best_match['torrent_path'] - - console.print("[bold yellow]No Valid .torrent found") - return None + return best_match async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, client, print_err=False): valid = False @@ -224,7 +482,7 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client elif len(torrent.files) == len(meta['filelist']): torrent_filepath = os.path.commonpath(torrent.files) actual_filepath = os.path.commonpath(meta['filelist']) - local_path, remote_path = await self.remote_path_map(meta) + local_path, remote_path = await self.remote_path_map(meta, client) if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): actual_filepath = actual_filepath.replace(local_path, remote_path).replace(os.sep, '/') @@ -254,15 +512,15 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client console.log(f"Checking piece size, count and size: pieces={reuse_torrent.pieces}, piece_size={piece_in_mib} MiB, .torrent size={torrent_file_size_kib} KiB") # Piece size and count validations - if not meta.get('prefer_small_pieces', False): - if reuse_torrent.pieces >= 8000 and reuse_torrent.piece_size < 8488608: - if meta['debug']: - console.print("[bold red]Torrent needs to have less than 8000 pieces with a 8 MiB piece size") - valid = False - elif reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4294304: - if meta['debug']: - console.print("[bold red]Torrent needs to have less than 5000 pieces with a 4 MiB piece size") - valid = False + max_piece_size = meta.get('max_piece_size') + if reuse_torrent.pieces >= 5000 and reuse_torrent.piece_size < 4294304 and (max_piece_size is None or max_piece_size >= 4): + if meta['debug']: + console.print("[bold red]Torrent needs to have less than 5000 pieces with a 4 MiB piece size") + valid = False + elif reuse_torrent.pieces >= 8000 and reuse_torrent.piece_size < 8488608 and (max_piece_size is None or max_piece_size >= 8) and not meta.get('prefer_small_pieces', False): + if meta['debug']: + console.print("[bold red]Torrent needs to have less than 8000 pieces with a 8 MiB piece size") + valid = False elif 'max_piece_size' not in meta and reuse_torrent.pieces >= 12000: if meta['debug']: console.print("[bold red]Torrent needs to have less than 12000 pieces to be valid") @@ -280,7 +538,8 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client console.log("[bold red]Provided .torrent has files that were not expected") valid = False else: - console.print(f"[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}") + if meta['debug']: + console.log(f"[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}") except Exception as e: console.print(f'[bold red]Error checking reuse torrent: {e}') valid = False @@ -293,7 +552,7 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client return valid, torrent_path - async def search_qbit_for_torrent(self, meta, client): + async def search_qbit_for_torrent(self, meta, client, qbt_client=None, qbt_session=None, proxy_url=None): mtv_config = self.config['TRACKERS'].get('MTV') if isinstance(mtv_config, dict): prefer_small_pieces = mtv_config.get('prefer_mtv_torrent', False) @@ -309,14 +568,16 @@ async def search_qbit_for_torrent(self, meta, client): return None try: - qbt_client = qbittorrentapi.Client( - host=client['qbit_url'], - port=client['qbit_port'], - username=client['qbit_user'], - password=client['qbit_pass'], - VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) - ) - qbt_client.auth_log_in() + if qbt_client is None and proxy_url is None: + potential_qbt_client = await self.init_qbittorrent_client(client) + if potential_qbt_client is None: + return None + qbt_client = potential_qbt_client + elif proxy_url and qbt_session is None: + qbt_session = aiohttp.ClientSession( + timeout=aiohttp.ClientTimeout(total=10), + connector=aiohttp.TCPConnector(verify_ssl=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + ) except qbittorrentapi.LoginFailed: console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") @@ -332,17 +593,54 @@ async def search_qbit_for_torrent(self, meta, client): best_match = None matching_torrents = [] - for torrent in qbt_client.torrents.info(): + try: + if proxy_url: + qbt_proxy_url = proxy_url.rstrip('/') + async with qbt_session.get(f"{qbt_proxy_url}/api/v2/torrents/info") as response: + if response.status == 200: + torrents_data = await response.json() + + class MockTorrent: + def __init__(self, data): + for key, value in data.items(): + setattr(self, key, value) + # For proxy API, we need to fetch files separately or use num_files from torrents/info + # The torrents/info endpoint doesn't include files array but has 'num_files' field + if not hasattr(self, 'tracker'): + self.tracker = '' + if not hasattr(self, 'comment'): + self.comment = '' + # Create a files list based on num_files to make len() work + if hasattr(self, 'num_files'): + self.files = [None] * self.num_files # Dummy list with correct length + elif not hasattr(self, 'files'): + self.files = [] + torrents = [MockTorrent(torrent) for torrent in torrents_data] + else: + console.print(f"[bold red]Failed to get torrents list via proxy: {response.status}") + return None + else: + torrents = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_info), + "Get torrents list", + initial_timeout=14.0 + ) + except asyncio.TimeoutError: + console.print("[bold red]Getting torrents list timed out after retries") + return None + except Exception as e: + console.print(f"[bold red]Error getting torrents list: {e}") + return None + + torrent_count = 0 + for torrent in torrents: try: torrent_path = torrent.name + torrent_count += 1 except AttributeError: continue # Ignore torrents with missing attributes - if meta['is_disc'] in ("", None) and len(meta['filelist']) == 1: - if torrent_path != meta['uuid'] or len(torrent.files) != len(meta['filelist']): - continue - - elif meta['uuid'] != torrent_path: + if meta['uuid'].lower() != torrent_path.lower(): continue if meta['debug']: @@ -353,6 +651,7 @@ async def search_qbit_for_torrent(self, meta, client): matching_torrents.append({'hash': torrent.hash, 'name': torrent.name}) + console.print(f"[cyan]DEBUG: Checked {torrent_count} total torrents in qBittorrent[/cyan]") if not matching_torrents: console.print("[yellow]No matching torrents found in qBittorrent.") return None @@ -385,17 +684,33 @@ async def search_qbit_for_torrent(self, meta, client): if meta['debug']: console.print(f"[cyan]Exporting .torrent file for {torrent_hash}") - try: - torrent_file_content = qbt_client.torrents_export(torrent_hash=torrent_hash) + torrent_file_content = None + if proxy_url: + qbt_proxy_url = proxy_url.rstrip('/') + try: + async with qbt_session.post(f"{qbt_proxy_url}/api/v2/torrents/export", + data={'hash': torrent_hash}) as response: + if response.status == 200: + torrent_file_content = await response.read() + else: + console.print(f"[red]Failed to export torrent via proxy: {response.status}") + except Exception as e: + console.print(f"[red]Error exporting torrent via proxy: {e}") + else: + torrent_file_content = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_export, torrent_hash=torrent_hash), + f"Export torrent {torrent_hash}" + ) + + if torrent_file_content is not None: torrent_file_path = os.path.join(extracted_torrent_dir, f"{torrent_hash}.torrent") with open(torrent_file_path, "wb") as f: f.write(torrent_file_content) if meta['debug']: console.print(f"[green]Successfully saved .torrent file: {torrent_file_path}") - - except qbittorrentapi.APIError as e: - console.print(f"[bold red]Failed to export .torrent for {torrent_hash}: {e}") + else: + console.print(f"[bold red]Failed to export .torrent for {torrent_hash} after retries") continue # Skip this torrent if unable to fetch # **Validate the .torrent file** @@ -407,7 +722,6 @@ async def search_qbit_for_torrent(self, meta, client): torrent_path = None if valid: - console.print("prefersmallpieces", prefer_small_pieces) if prefer_small_pieces: # **Track best match based on piece size** try: @@ -435,10 +749,15 @@ async def search_qbit_for_torrent(self, meta, client): # **Return the best match if `prefer_small_pieces` is enabled** if best_match: console.print(f"[green]Using best match torrent with hash: {best_match['hash']}") - return best_match['hash'] + result = best_match['hash'] + else: + console.print("[yellow]No valid torrents found.") + result = None - console.print("[yellow]No valid torrents found.") - return None + if qbt_session and proxy_url: + await qbt_session.close() + + return result def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, client, tracker): # Get the appropriate source path (same as in qbittorrent method) @@ -644,9 +963,14 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c rtorrent = xmlrpc.client.Server(client['rtorrent_url'], context=ssl._create_stdlib_context()) metainfo = bencode.bread(torrent_path) + if meta['debug']: + print(f"{rtorrent}: {redact_private_info(rtorrent)}") + print(f"{metainfo}: {redact_private_info(metainfo)}") try: # Use dst path if linking was successful, otherwise use original path resume_path = dst if (use_symlink or use_hardlink) and os.path.exists(dst) else path + if meta['debug']: + console.print(f"[cyan]Using resume path: {resume_path}") fast_resume = self.add_fast_resume(metainfo, resume_path, torrent) except EnvironmentError as exc: console.print("[red]Error making fast-resume data (%s)" % (exc,)) @@ -655,7 +979,8 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c new_meta = bencode.bencode(fast_resume) if new_meta != metainfo: fr_file = torrent_path.replace('.torrent', '-resume.torrent') - console.print("Creating fast resume") + if meta['debug']: + console.print("Creating fast resume file:", fr_file) bencode.bwrite(fast_resume, fr_file) # Use dst path if linking was successful, otherwise use original path @@ -671,26 +996,38 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c shutil.copy(fr_file, f"{path_dir}/fr.torrent") fr_file = f"{os.path.dirname(path)}/fr.torrent" modified_fr = True + if meta['debug']: + console.print(f"[cyan]Modified fast resume file path because path mapping: {fr_file}") if isdir is False: path = os.path.dirname(path) + if meta['debug']: + console.print(f"[cyan]Final path for rTorrent: {path}") console.print("[bold yellow]Adding and starting torrent") rtorrent.load.start_verbose('', fr_file, f"d.directory_base.set={path}") + if meta['debug']: + console.print(f"[green]rTorrent load start for {fr_file} with d.directory_base.set={path}") time.sleep(1) # Add labels if client.get('rtorrent_label', None) is not None: + if meta['debug']: + console.print(f"[cyan]Setting rTorrent label: {client['rtorrent_label']}") rtorrent.d.custom1.set(torrent.infohash, client['rtorrent_label']) if meta.get('rtorrent_label') is not None: rtorrent.d.custom1.set(torrent.infohash, meta['rtorrent_label']) + if meta['debug']: + console.print(f"[cyan]Setting rTorrent label from meta: {meta['rtorrent_label']}") # Delete modified fr_file location if modified_fr: + if meta['debug']: + console.print(f"[cyan]Removing modified fast resume file: {fr_file}") os.remove(f"{path_dir}/fr.torrent") if meta.get('debug', False): console.print(f"[cyan]Path: {path}") return - async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta, tracker): + async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta, tracker, cross=False): if meta.get('keep_folder'): path = os.path.dirname(path) else: @@ -750,7 +1087,6 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d mounted_volumes.append(mount_point) else: # Fall back to mount command if /proc/mounts doesn't exist - import subprocess output = subprocess.check_output(['mount'], text=True) for line in output.splitlines(): parts = line.split() @@ -828,128 +1164,61 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d console.print(f"[bold red]{error_msg}") raise ValueError(error_msg) - # Create tracker-specific directory inside linked folder + tracker_dir = None if use_symlink or use_hardlink: - # allow overridden folder name with link_dir_name config var tracker_cfg = self.config["TRACKERS"].get(tracker.upper(), {}) link_dir_name = str(tracker_cfg.get("link_dir_name", "")).strip() tracker_dir = os.path.join(link_target, link_dir_name or tracker) - os.makedirs(tracker_dir, exist_ok=True) - - if meta['debug']: - console.print(f"[bold yellow]Linking to tracker directory: {tracker_dir}") - console.print(f"[cyan]Source path: {src}") - - # Extract only the folder or file name from `src` - src_name = os.path.basename(src.rstrip(os.sep)) # Ensure we get just the name - dst = os.path.join(tracker_dir, src_name) # Destination inside linked folder - - # path magic - if os.path.exists(dst) or os.path.islink(dst): - if meta['debug']: - console.print(f"[yellow]Skipping linking, path already exists: {dst}") + await asyncio.to_thread(os.makedirs, tracker_dir, exist_ok=True) + + if cross: + linking_success = await create_cross_seed_links( + meta=meta, + torrent=torrent, + tracker_dir=tracker_dir, + use_hardlink=use_hardlink + ) else: - allow_fallback = self.config['TRACKERS'].get('allow_fallback', True) - fallback_to_original = False - if use_hardlink: - try: - # Check if we're linking a file or directory - if os.path.isfile(src): - # For a single file, create a hardlink directly - try: - os.link(src, dst) - if meta['debug']: - console.print(f"[green]Hard link created: {dst} -> {src}") - except OSError as e: - console.print(f"[yellow]Hard link failed: {e}") - if allow_fallback: - console.print(f"[yellow]Using original path without linking: {src}") - use_hardlink = False - fallback_to_original = True - else: - # For directories, we need to link each file inside - console.print("[yellow]Cannot hardlink directories directly. Creating directory structure...") - os.makedirs(dst, exist_ok=True) - - for root, _, files in os.walk(src): - # Get the relative path from source - rel_path = os.path.relpath(root, src) - - # Create corresponding directory in destination - if rel_path != '.': - dst_dir = os.path.join(dst, rel_path) - os.makedirs(dst_dir, exist_ok=True) - - # Create hardlinks for each file - for file in files: - src_file = os.path.join(root, file) - dst_file = os.path.join(dst if rel_path == '.' else dst_dir, file) - try: - os.link(src_file, dst_file) - if meta['debug'] and files.index(file) == 0: - console.print(f"[green]Hard link created for file: {dst_file} -> {src_file}") - except OSError as e: - console.print(f"[yellow]Hard link failed for file {file}: {e}") - if allow_fallback: - console.print(f"[yellow]Using original path without linking: {src}") - fallback_to_original = True - break - - if fallback_to_original: - use_hardlink = False - link_target = None - # Clean up the partially created directory - try: - shutil.rmtree(dst) - except Exception as cleanup_error: - console.print(f"[red]Warning: Failed to clean up partial directory {dst}: {cleanup_error}") - - except OSError as e: - # Global exception handler for any linking operation - error_msg = f"Failed to create hard link: {e}" - console.print(f"[bold red]{error_msg}") - if allow_fallback: - console.print(f"[yellow]Using original path without linking: {src}") - use_hardlink = False - if meta['debug']: - console.print(f"[yellow]Source: {src} (exists: {os.path.exists(src)})") - console.print(f"[yellow]Destination: {dst}") + src_name = os.path.basename(src.rstrip(os.sep)) + dst = os.path.join(tracker_dir, src_name) + linking_success = await async_link_directory( + src=src, + dst=dst, + use_hardlink=use_hardlink, + debug=meta.get('debug', False) + ) - elif use_symlink: - try: - if platform.system() == "Windows": - os.symlink(src, dst, target_is_directory=os.path.isdir(src)) - else: - os.symlink(src, dst) + allow_fallback = client.get('allow_fallback', True) + if not linking_success and allow_fallback: + console.print(f"[yellow]Using original path without linking: {src}") + use_hardlink = False + use_symlink = False + elif not linking_success: + console.print("[bold red]Linking failed and fallback is disabled; aborting qBittorrent add") + return + elif cross: + console.print("[yellow]Cross seed requested, but no linking method is configured. Proceeding with original path naming.") - if meta['debug']: - console.print(f"[green]Symbolic link created: {dst} -> {src}") + proxy_url = client.get('qui_proxy_url') + qbt_client = None + qbt_session = None - except OSError as e: - error_msg = f"Failed to create symlink: {e}" - console.print(f"[bold red]{error_msg}") - if allow_fallback: - console.print(f"[yellow]Using original path without linking: {src}") - use_symlink = False - - # Initialize qBittorrent client - qbt_client = qbittorrentapi.Client( - host=client['qbit_url'], - port=client['qbit_port'], - username=client['qbit_user'], - password=client['qbit_pass'], - VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) - ) + if proxy_url: + qbt_session = aiohttp.ClientSession( + timeout=aiohttp.ClientTimeout(total=10), + connector=aiohttp.TCPConnector(verify_ssl=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + ) + qbt_proxy_url = proxy_url.rstrip('/') + else: + potential_qbt_client = await self.init_qbittorrent_client(client) + if not potential_qbt_client: + return + else: + qbt_client = potential_qbt_client if meta['debug']: console.print("[bold yellow]Adding and rechecking torrent") - try: - qbt_client.auth_log_in() - except qbittorrentapi.LoginFailed: - console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") - return - # Apply remote pathing to `tracker_dir` before assigning `save_path` if use_symlink or use_hardlink: save_path = tracker_dir # Default to linked directory @@ -998,52 +1267,185 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d if os.path.normpath(am_config).lower() in os.path.normpath(path).lower() and am_config.strip() != "": auto_management = True - qbt_category = client.get("qbit_cat") if not meta.get("qbit_cat") else meta.get('qbit_cat') + if cross and client.get('qbit_cross_cat'): + qbt_category = client['qbit_cross_cat'] + else: + qbt_category = client.get("qbit_cat") if not meta.get("qbit_cat") else meta.get('qbit_cat') content_layout = client.get('content_layout', 'Original') if meta['debug']: console.print("qbt_category:", qbt_category) console.print(f"Content Layout: {content_layout}") console.print(f"[bold yellow]qBittorrent save path: {save_path}") + if cross: + skip_checking = True + paused_on_add = True + else: + skip_checking = True + paused_on_add = False + tag = None + if cross and client.get('qbit_cross_tag'): + tag = client['qbit_cross_tag'] + else: + if meta.get('qbit_tag'): + tag = meta['qbit_tag'] + elif client.get("use_tracker_as_tag", False) and tracker: + tag = tracker + elif client.get('qbit_tag'): + tag = client['qbit_tag'] + try: - qbt_client.torrents_add( - torrent_files=torrent.dump(), - save_path=save_path, - use_auto_torrent_management=auto_management, - is_skip_checking=True, - content_layout=content_layout, - category=qbt_category - ) - except qbittorrentapi.APIConnectionError as e: - console.print(f"[red]Failed to add torrent: {e}") + if proxy_url: + # Create FormData for multipart/form-data request + data = aiohttp.FormData() + data.add_field('savepath', save_path) + data.add_field('autoTMM', str(auto_management).lower()) + data.add_field('skip_checking', str(skip_checking).lower()) + data.add_field('paused', str(paused_on_add).lower()) + data.add_field('contentLayout', content_layout) + if qbt_category: + data.add_field('category', qbt_category) + if tag: + data.add_field('tags', tag) + data.add_field('torrents', torrent.dump(), filename='torrent.torrent', content_type='application/x-bittorrent') + if meta['debug']: + console.print(f"[cyan]POSTing to {redact_private_info(qbt_proxy_url)}/api/v2/torrents/add with data: savepath={save_path}, autoTMM={auto_management}, skip_checking={skip_checking}, paused={paused_on_add}, contentLayout={content_layout}, category={qbt_category}, tags={tag}") + + async with qbt_session.post(f"{qbt_proxy_url}/api/v2/torrents/add", + data=data) as response: + if response.status != 200: + console.print(f"[bold red]Failed to add torrent via proxy: {response.status}") + return + else: + await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_add, + torrent_files=torrent.dump(), + save_path=save_path, + use_auto_torrent_management=auto_management, + is_skip_checking=skip_checking, + paused=paused_on_add, + content_layout=content_layout, + category=qbt_category, + tags=tag), + "Add torrent to qBittorrent", + initial_timeout=14.0 + ) + except (asyncio.TimeoutError, qbittorrentapi.APIConnectionError): + console.print("[bold red]Failed to add torrent to qBittorrent") + if qbt_session: + await qbt_session.close() + return + except Exception as e: + console.print(f"[bold red]Error adding torrent: {e}") + if qbt_session: + await qbt_session.close() return # Wait for torrent to be added timeout = 30 for _ in range(timeout): - if len(qbt_client.torrents_info(torrent_hashes=torrent.infohash)) > 0: - break + try: + if proxy_url: + async with qbt_session.get(f"{qbt_proxy_url}/api/v2/torrents/info", + params={'hashes': torrent.infohash}) as response: + if response.status == 200: + torrents_info = await response.json() + if len(torrents_info) > 0: + break + else: + pass # Continue waiting + else: + torrents_info = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_info, torrent_hashes=torrent.infohash), + "Check torrent addition", + max_retries=1, + initial_timeout=10.0 + ) + if len(torrents_info) > 0: + break + except asyncio.TimeoutError: + pass # Continue waiting + except Exception: + pass # Continue waiting await asyncio.sleep(1) else: console.print("[red]Torrent addition timed out.") + if qbt_session: + await qbt_session.close() return - # Resume and tag torrent - qbt_client.torrents_resume(torrent.infohash) - if client.get("use_tracker_as_tag", False) and tracker: - qbt_client.torrents_add_tags(tags=tracker, torrent_hashes=torrent.infohash) - if client.get('qbit_tag'): - qbt_client.torrents_add_tags(tags=client['qbit_tag'], torrent_hashes=torrent.infohash) - if meta and meta.get('qbit_tag'): - qbt_client.torrents_add_tags(tags=meta['qbit_tag'], torrent_hashes=torrent.infohash) + if not cross: + try: + if proxy_url: + console.print("[yellow]No qui proxy resume support....") + # async with qbt_session.post(f"{qbt_proxy_url}/api/v2/torrents/resume", + # data={'hashes': torrent.infohash}) as response: + # if response.status != 200: + # console.print(f"[yellow]Failed to resume torrent via proxy: {response.status}") + else: + await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_resume, torrent.infohash), + "Resume torrent" + ) + except asyncio.TimeoutError: + console.print("[yellow]Failed to resume torrent after retries") + except Exception as e: + console.print(f"[yellow]Error resuming torrent: {e}") + + if tracker in client.get("super_seed_trackers", []) and not cross: + try: + if meta['debug']: + console.print(f"{tracker}: Setting super-seed mode.") + if proxy_url: + async with qbt_session.post(f"{qbt_proxy_url}/api/v2/torrents/setSuperSeeding", + data={'hashes': torrent.infohash, "value": "true"}) as response: + if response.status != 200: + console.print(f"{tracker}: Failed to set super-seed via proxy: {response.status}") + else: + await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_set_super_seeding, torrent_hashes=torrent.infohash), + "Set super-seed mode", + initial_timeout=10.0 + ) + except asyncio.TimeoutError: + console.print(f"{tracker}: Super-seed request timed out") + except Exception as e: + console.print(f"{tracker}: Super-seed error: {e}") if meta['debug']: - info = qbt_client.torrents_info(torrent_hashes=torrent.infohash) - console.print(f"[cyan]Actual qBittorrent save path: {info[0].save_path}") + try: + if proxy_url: + async with qbt_session.get(f"{qbt_proxy_url}/api/v2/torrents/info", + params={'hashes': torrent.infohash}) as response: + if response.status == 200: + info = await response.json() + if info: + console.print(f"[cyan]Actual qBittorrent save path: {info[0].get('save_path', 'Unknown')}") + else: + console.print("[yellow]No torrent info returned from proxy") + else: + console.print(f"[yellow]Failed to get torrent info via proxy: {response.status}") + else: + info = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_info, torrent_hashes=torrent.infohash), + "Get torrent info for debug", + initial_timeout=10.0 + ) + if info: + console.print(f"[cyan]Actual qBittorrent save path: {info[0].save_path}") + else: + console.print("[yellow]No torrent info returned from qBittorrent") + except asyncio.TimeoutError: + console.print("[yellow]Failed to get torrent info for debug after retries") + except Exception as e: + console.print(f"[yellow]Error getting torrent info for debug: {e}") if meta['debug']: console.print(f"Added to: {save_path}") + if qbt_session: + await qbt_session.close() + def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, meta): client = DelugeRPCClient(client['deluge_url'], int(client['deluge_port']), client['deluge_user'], client['deluge_pass']) # client = LocalDelugeRPCClient() @@ -1148,13 +1550,19 @@ def add_fast_resume(self, metainfo, datapath, torrent): return metainfo - async def remote_path_map(self, meta): - if meta.get('client', None) is None: - torrent_client = self.config['DEFAULT']['default_torrent_client'] + async def remote_path_map(self, meta, torrent_client_name=None): + if isinstance(torrent_client_name, dict): + client_config = torrent_client_name + elif isinstance(torrent_client_name, str) and torrent_client_name: + try: + client_config = self.config['TORRENT_CLIENTS'][torrent_client_name] + except KeyError as exc: + raise KeyError(f"Torrent client '{torrent_client_name}' not found in TORRENT_CLIENTS") from exc else: - torrent_client = meta['client'] - local_paths = self.config['TORRENT_CLIENTS'][torrent_client].get('local_path', ['/LocalPath']) - remote_paths = self.config['TORRENT_CLIENTS'][torrent_client].get('remote_path', ['/RemotePath']) + raise ValueError("torrent_client_name must be a client name or client config dict") + + local_paths = client_config.get('local_path', ['/LocalPath']) + remote_paths = client_config.get('remote_path', ['/RemotePath']) if not isinstance(local_paths, list): local_paths = [local_paths] @@ -1185,31 +1593,75 @@ async def get_ptp_from_hash(self, meta, pathed=False): await self.get_ptp_from_hash_rtorrent(meta, pathed) return meta elif torrent_client == 'qbit': - qbt_client = qbittorrentapi.Client( - host=client['qbit_url'], - port=client['qbit_port'], - username=client['qbit_user'], - password=client['qbit_pass'], - VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True), - REQUESTS_ARGS={'timeout': 10} - ) - - try: - await asyncio.wait_for( - asyncio.to_thread(qbt_client.auth_log_in), - timeout=10.0 + proxy_url = client.get('qui_proxy_url') + qbt_client = None + qbt_session = None + + if proxy_url: + qbt_session = aiohttp.ClientSession( + timeout=aiohttp.ClientTimeout(total=10), + connector=aiohttp.TCPConnector(verify_ssl=client.get('VERIFY_WEBUI_CERTIFICATE', True)) ) - except asyncio.TimeoutError: - console.print("[bold red]Login attempt to qBittorrent timed out after 10 seconds") - return None - except qbittorrentapi.LoginFailed as e: - console.print(f"[bold red]Login failed while trying to get info hash: {e}") - exit(1) + qbt_proxy_url = proxy_url.rstrip('/') + else: + potential_qbt_client = await self.init_qbittorrent_client(client) + if not potential_qbt_client: + return meta + else: + qbt_client = potential_qbt_client info_hash_v1 = meta.get('infohash') if meta['debug']: console.print(f"[cyan]Searching for infohash: {info_hash_v1}") - torrents = qbt_client.torrents_info() + + class TorrentInfo: + def __init__(self, properties_data): + self.hash = properties_data.get('hash', info_hash_v1) + self.infohash_v1 = properties_data.get('infohash_v1', info_hash_v1) + self.name = properties_data.get('name', '') + self.comment = properties_data.get('comment', '') + self.tracker = '' + self.files = [] + + try: + if proxy_url: + async with qbt_session.get(f"{qbt_proxy_url}/api/v2/torrents/properties", + params={'hash': info_hash_v1}) as response: + if response.status == 200: + torrent_properties = await response.json() + if meta['debug']: + console.print(f"[cyan]Retrieved torrent properties via proxy for hash: {info_hash_v1}") + + torrents = [TorrentInfo(torrent_properties)] + else: + console.print(f"[bold red]Failed to get torrent properties via proxy: {response.status}") + if qbt_session: + await qbt_session.close() + return meta + else: + try: + torrent_properties = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_properties, torrent_hash=info_hash_v1), + f"Get torrent properties for hash {info_hash_v1}", + initial_timeout=14.0 + ) + if meta['debug']: + console.print(f"[cyan]Retrieved torrent properties via client for hash: {info_hash_v1}") + + torrents = [TorrentInfo(torrent_properties)] + except Exception as e: + console.print(f"[yellow]Failed to get properties: {e}") + return meta + except asyncio.TimeoutError: + console.print("[bold red]Getting torrents list timed out after retries") + if qbt_session: + await qbt_session.close() + return meta + except Exception as e: + console.print(f"[bold red]Error getting torrents list: {e}") + if qbt_session: + await qbt_session.close() + return meta found = False folder_id = os.path.basename(meta['path']) @@ -1220,106 +1672,128 @@ async def get_ptp_from_hash(self, meta, pathed=False): os.makedirs(extracted_torrent_dir, exist_ok=True) for torrent in torrents: - if torrent.get('infohash_v1') == info_hash_v1: - comment = torrent.get('comment', "") - match = None + try: + if getattr(torrent, 'infohash_v1', '') == info_hash_v1: + comment = getattr(torrent, 'comment', "") + match = None - if 'torrent_comments' not in meta: - meta['torrent_comments'] = [] + if 'torrent_comments' not in meta: + meta['torrent_comments'] = [] - comment_data = { - 'hash': torrent.get('infohash_v1', ''), - 'name': torrent.get('name', ''), - 'comment': comment, - } - meta['torrent_comments'].append(comment_data) + comment_data = { + 'hash': getattr(torrent, 'infohash_v1', ''), + 'name': getattr(torrent, 'name', ''), + 'comment': comment, + } + meta['torrent_comments'].append(comment_data) - if meta.get('debug', False): - console.print(f"[cyan]Stored comment for torrent: {comment[:100]}...") + if meta.get('debug', False): + console.print(f"[cyan]Stored comment for torrent: {comment[:100]}...") + + if "passthepopcorn.me" in comment: + match = re.search(r'torrentid=(\d+)', comment) + if match: + meta['ptp'] = match.group(1) + elif "https://aither.cc/" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['aither'] = match.group(1) + elif "https://lst.gg/" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['lst'] = match.group(1) + elif "https://onlyencodes.cc/" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['oe'] = match.group(1) + elif "https://blutopia.cc/" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['blu'] = match.group(1) + elif "https://upload.cx/" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['ulcx'] = match.group(1) + elif "https://hdbits.org/" in comment: + match = re.search(r'id=(\d+)', comment) + if match: + meta['hdb'] = match.group(1) + elif "https://broadcasthe.net/" in comment: + match = re.search(r'id=(\d+)', comment) + if match: + meta['btn'] = match.group(1) + elif "https://beyond-hd.me/" in comment: + match = re.search(r'details/(\d+)', comment) + if match: + meta['bhd'] = match.group(1) + elif "/torrents/" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['huno'] = match.group(1) - if "passthepopcorn.me" in comment: - match = re.search(r'torrentid=(\d+)', comment) - if match: - meta['ptp'] = match.group(1) - elif "https://aither.cc/" in comment: - match = re.search(r'/(\d+)$', comment) - if match: - meta['aither'] = match.group(1) - elif "https://lst.gg/" in comment: - match = re.search(r'/(\d+)$', comment) - if match: - meta['lst'] = match.group(1) - elif "https://onlyencodes.cc/" in comment: - match = re.search(r'/(\d+)$', comment) - if match: - meta['oe'] = match.group(1) - elif "https://blutopia.cc/" in comment: - match = re.search(r'/(\d+)$', comment) - if match: - meta['blu'] = match.group(1) - elif "https://upload.cx/" in comment: - match = re.search(r'/(\d+)$', comment) if match: - meta['ulcx'] = match.group(1) - elif "https://hdbits.org/" in comment: - match = re.search(r'id=(\d+)', comment) - if match: - meta['hdb'] = match.group(1) - elif "https://broadcasthe.net/" in comment: - match = re.search(r'id=(\d+)', comment) - if match: - meta['btn'] = match.group(1) - elif "https://beyond-hd.me/" in comment: - match = re.search(r'details/(\d+)', comment) - if match: - meta['bhd'] = match.group(1) - elif "/torrents/" in comment: - match = re.search(r'/(\d+)$', comment) - if match: - meta['huno'] = match.group(1) - - if match: - for tracker in ['ptp', 'bhd', 'btn', 'huno', 'blu', 'aither', 'ulcx', 'lst', 'oe', 'hdb']: - if meta.get(tracker): - console.print(f"[bold cyan]meta updated with {tracker.upper()} ID: {meta[tracker]}") - - if meta.get('torrent_comments') and meta['debug']: - console.print(f"[green]Stored {len(meta['torrent_comments'])} torrent comments for later use") - - if not pathed: - torrent_storage_dir = client.get('torrent_storage_dir') - if not torrent_storage_dir: - # Export .torrent file - torrent_hash = torrent.get('infohash_v1') - if meta.get('debug', False): - console.print(f"[cyan]Exporting .torrent file for hash: {torrent_hash}") - - try: - torrent_file_content = qbt_client.torrents_export(torrent_hash=torrent_hash) - torrent_file_path = os.path.join(extracted_torrent_dir, f"{torrent_hash}.torrent") + for tracker in ['ptp', 'bhd', 'btn', 'huno', 'blu', 'aither', 'ulcx', 'lst', 'oe', 'hdb']: + if meta.get(tracker): + console.print(f"[bold cyan]meta updated with {tracker.upper()} ID: {meta[tracker]}") + + if meta.get('torrent_comments') and meta['debug']: + console.print(f"[green]Stored {len(meta['torrent_comments'])} torrent comments for later use") + + if not pathed: + torrent_storage_dir = client.get('torrent_storage_dir') + if not torrent_storage_dir: + # Export .torrent file + torrent_hash = getattr(torrent, 'infohash_v1', '') + if meta.get('debug', False): + console.print(f"[cyan]Exporting .torrent file for hash: {torrent_hash}") - with open(torrent_file_path, "wb") as f: - f.write(torrent_file_content) + try: + if proxy_url: + async with qbt_session.post(f"{qbt_proxy_url}/api/v2/torrents/export", + data={'hash': torrent_hash}) as response: + if response.status == 200: + torrent_file_content = await response.read() + else: + console.print(f"[red]Failed to export torrent via proxy: {response.status}") + continue + else: + torrent_file_content = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_export, torrent_hash=torrent_hash), + f"Export torrent {torrent_hash}" + ) + torrent_file_path = os.path.join(extracted_torrent_dir, f"{torrent_hash}.torrent") - # Validate the .torrent file before saving as BASE.torrent - valid, torrent_path = await self.is_valid_torrent(meta, torrent_file_path, torrent_hash, 'qbit', client, print_err=False) - if not valid: - if meta['debug']: - console.print(f"[bold red]Validation failed for {torrent_file_path}") - os.remove(torrent_file_path) # Remove invalid file - else: - from src.torrentcreate import create_base_from_existing_torrent - await create_base_from_existing_torrent(torrent_file_path, meta['base_dir'], meta['uuid']) + with open(torrent_file_path, "wb") as f: + f.write(torrent_file_content) - except qbittorrentapi.APIError as e: - console.print(f"[bold red]Failed to export .torrent for {torrent_hash}: {e}") + # Validate the .torrent file before saving as BASE.torrent + valid, torrent_path = await self.is_valid_torrent(meta, torrent_file_path, torrent_hash, 'qbit', client, print_err=False) + if not valid: + if meta['debug']: + console.print(f"[bold red]Validation failed for {torrent_file_path}") + os.remove(torrent_file_path) # Remove invalid file + else: + await create_base_from_existing_torrent(torrent_file_path, meta['base_dir'], meta['uuid']) + except asyncio.TimeoutError: + console.print(f"[bold red]Failed to export .torrent for {torrent_hash} after retries") - found = True - break + found = True + break + except Exception as e: + if qbt_session: + await qbt_session.close() + console.print(f"[bold red]Error processing torrent {getattr(torrent, 'name', 'Unknown')}: {e}") + if meta.get('debug', False): + import traceback + console.print(f"[bold red]Traceback: {traceback.format_exc()}") + continue if not found: console.print("[bold red]Matching site torrent with the specified infohash_v1 not found.") + if qbt_session: + await qbt_session.close() + return meta else: return meta @@ -1441,7 +1915,6 @@ async def get_ptp_from_hash_rtorrent(self, meta, pathed=False): base_torrent_path = os.path.join(extracted_torrent_dir, "BASE.torrent") try: - from src.torrentcreate import create_base_from_existing_torrent await create_base_from_existing_torrent(resolved_path, meta['base_dir'], meta['uuid']) if meta['debug']: console.print("[green]Created BASE.torrent from existing torrent") @@ -1455,7 +1928,6 @@ async def get_ptp_from_hash_rtorrent(self, meta, pathed=False): except Exception as e: console.print(f"[bold red]Error reading torrent file: {e}") - import traceback console.print(f"[dim]{traceback.format_exc()}[/dim]") return meta @@ -1476,29 +1948,142 @@ async def get_pathed_torrents(self, path, meta): if meta['debug']: console.print("[yellow]No matching torrents for the path found in qBittorrent[/yellow]") + except asyncio.TimeoutError: + raise except Exception as e: console.print(f"[red]Error searching for torrents: {str(e)}[/red]") - import traceback console.print(f"[dim]{traceback.format_exc()}[/dim]") async def find_qbit_torrents_by_path(self, content_path, meta): if meta.get('debug'): console.print(f"[yellow]Searching for torrents in qBittorrent for path: {content_path}[/yellow]") try: - if meta.get('client', None) is None: - default_torrent_client = self.config['DEFAULT']['default_torrent_client'] + mtv_config = self.config['TRACKERS'].get('MTV') + piece_limit = self.config['DEFAULT'].get('prefer_max_16_torrent', False) + mtv_torrent = False + if isinstance(mtv_config, dict): + mtv_torrent = mtv_config.get('prefer_mtv_torrent', False) + # MTV preference takes priority as it's more restrictive (8 MiB vs 16 MiB) + if mtv_torrent: + piece_size_constraints_enabled = 'MTV' + elif piece_limit: + piece_size_constraints_enabled = '16MiB' + else: + piece_size_constraints_enabled = False else: - default_torrent_client = meta['client'] - if meta.get('client', None) == 'none': - return - if default_torrent_client == "none": - return - client_config = self.config['TORRENT_CLIENTS'][default_torrent_client] - torrent_client = client_config['torrent_client'] + piece_size_constraints_enabled = '16MiB' if piece_limit else False + + meta['piece_size_constraints_enabled'] = piece_size_constraints_enabled - if torrent_client != 'qbit': + # Determine which clients to search + clients_to_search = [] + + if meta.get('client') and meta['client'] != 'none': + # Only search the explicitly requested client + clients_to_search = [meta['client']] + else: + # Use searching_client_list if available, otherwise default client + searching_list = self.config['DEFAULT'].get('searching_client_list', []) + if searching_list and isinstance(searching_list, list) and len(searching_list) > 0: + # Filter out empty strings and 'none' values + clients_to_search = [c for c in searching_list if c and c != 'none'] + + if not clients_to_search: + default_client = self.config['DEFAULT'].get('default_torrent_client') + if default_client and default_client != 'none': + clients_to_search = [default_client] + + if not clients_to_search: + if meta.get('debug'): + console.print("[yellow]No clients configured for searching") return [] + all_matching_torrents = [] + for client_name in clients_to_search: + client_config = self.config['TORRENT_CLIENTS'].get(client_name) + if not client_config: + if meta['debug']: + console.print(f"[yellow]Client '{client_name}' not found in TORRENT_CLIENTS config") + continue + + torrent_client_type = client_config.get('torrent_client') + + if torrent_client_type != 'qbit': + if meta['debug']: + console.print(f"[yellow]Skipping non-qBit client: {client_name}") + continue + + if meta['debug']: + console.print(f"[cyan]Searching qBittorrent client: {client_name}") + + torrents = await self._search_single_qbit_client(client_config, content_path, meta, client_name) + + if torrents: + # Found matching torrents in this client + all_matching_torrents.extend(torrents) + + # Check if we should stop searching additional clients + found_piece_size = meta.get('found_preferred_piece_size', False) + constraints_enabled = meta.get('piece_size_constraints_enabled', False) + + should_stop = False + + if not constraints_enabled: + # No constraints, stop after finding any torrent + should_stop = True + if meta['debug']: + console.print(f"[green]Found {len(torrents)} matching torrent(s) in client '{client_name}' (no piece size constraints), stopping search[/green]") + elif found_piece_size == 'no_constraints': + # Found valid torrent and no constraints were set + should_stop = True + if meta['debug']: + console.print(f"[green]Found {len(torrents)} matching torrent(s) in client '{client_name}', stopping search[/green]") + elif found_piece_size == 'MTV': + # MTV constraint is always satisfied since it's most restrictive + should_stop = True + if meta['debug']: + console.print(f"[green]Found torrent with MTV preferred piece size (≤8 MiB) in client '{client_name}', stopping search[/green]") + elif found_piece_size == '16MiB' and constraints_enabled == '16MiB': + # 16MiB constraint satisfied (and MTV not required) + should_stop = True + if meta['debug']: + console.print(f"[green]Found torrent with 16 MiB piece size in client '{client_name}', stopping search[/green]") + else: + # Constraints enabled but not met, continue searching + if meta['debug']: + constraint_name = "MTV (≤8 MiB)" if constraints_enabled == 'MTV' else "16 MiB" + console.print(f"[yellow]Found {len(torrents)} torrent(s) in client '{client_name}' but no {constraint_name} piece size match, continuing search[/yellow]") + + if should_stop: + break + else: + if meta['debug']: + console.print(f"[yellow]No matching torrents found in client '{client_name}', continuing to next client[/yellow]") + + # Deduplicate by hash (in case same torrent exists in multiple clients) + seen_hashes = set() + unique_torrents = [] + for torrent in all_matching_torrents: + if torrent['hash'] not in seen_hashes: + seen_hashes.add(torrent['hash']) + unique_torrents.append(torrent) + + if meta['debug'] and len(all_matching_torrents) != len(unique_torrents): + console.print(f"[cyan]Deduplicated {len(all_matching_torrents)} torrents to {len(unique_torrents)} unique torrents") + + return unique_torrents + + except asyncio.TimeoutError: + raise + except Exception as e: + console.print(f"[bold red]Error finding torrents: {str(e)}") + if meta['debug']: + console.print(traceback.format_exc()) + return [] + + async def _search_single_qbit_client(self, client_config, content_path, meta, client_name): + """Search a single qBittorrent client for matching torrents.""" + try: tracker_patterns = { 'ptp': {"url": "passthepopcorn.me", "pattern": r'torrentid=(\d+)'}, 'aither': {"url": "https://aither.cc/", "pattern": r'/(\d+)$'}, @@ -1510,40 +2095,135 @@ async def find_qbit_torrents_by_path(self, content_path, meta): 'bhd': {"url": "https://beyond-hd.me/", "pattern": r'details/(\d+)'}, 'huno': {"url": "https://hawke.uno/", "pattern": r'/(\d+)$'}, 'ulcx': {"url": "https://upload.cx/", "pattern": r'/(\d+)$'}, + 'rf': {"url": "https://reelflix.xyz/", "pattern": r'/(\d+)$'}, + 'otw': {"url": "https://oldtoons.world/", "pattern": r'/(\d+)$'}, + 'yus': {"url": "https://yu-scene.net/", "pattern": r'/(\d+)$'}, + 'dp': {"url": "https://darkpeers.org/", "pattern": r'/(\d+)$'}, + 'sp': {"url": "https://seedpool.org/", "pattern": r'/(\d+)$'}, } - tracker_priority = ['aither', 'ulcx', 'lst', 'blu', 'oe', 'btn', 'bhd', 'huno', 'hdb', 'ptp'] - - try: - qbt_client = qbittorrentapi.Client( - host=client_config['qbit_url'], - port=int(client_config['qbit_port']), - username=client_config['qbit_user'], - password=client_config['qbit_pass'], - VERIFY_WEBUI_CERTIFICATE=client_config.get('VERIFY_WEBUI_CERTIFICATE', True), - REQUESTS_ARGS={'timeout': 10} - ) + tracker_priority = ['aither', 'ulcx', 'lst', 'blu', 'oe', 'btn', 'bhd', 'huno', 'hdb', 'rf', 'otw', 'yus', 'dp', 'sp', 'ptp'] + proxy_url = client_config.get('qui_proxy_url', '').strip() + if proxy_url: try: - await asyncio.wait_for( - asyncio.to_thread(qbt_client.auth_log_in), - timeout=10.0 + session = aiohttp.ClientSession( + timeout=aiohttp.ClientTimeout(total=10), + connector=aiohttp.TCPConnector(verify_ssl=client_config.get('VERIFY_WEBUI_CERTIFICATE', True)) ) - except asyncio.TimeoutError: - console.print("[bold red]Connection to qBittorrent timed out after 10 seconds") + + # Store session and URL for later API calls + qbt_session = session + qbt_proxy_url = proxy_url + + except Exception as e: + console.print(f"[bold red]Failed to connect to qBittorrent proxy: {e}") + if 'session' in locals(): + await session.close() + return [] + else: + potential_qbt_client = await self.init_qbittorrent_client(client_config) + if not potential_qbt_client: return [] + else: + qbt_client = potential_qbt_client - except qbittorrentapi.LoginFailed: - console.print("[bold red]Failed to login to qBittorrent - incorrect credentials") - return [] + search_term = meta['uuid'] + try: + if proxy_url: + # Build qui's enhanced filter options with expression support + qui_filters = { + "status": [], # Empty = all statuses, or specify like ["downloading","seeding"] + "excludeStatus": ["unregistered", "tracker_down"], + "categories": [], + "excludeCategories": [], + "tags": [], + "excludeTags": [], + "trackers": [], + "excludeTrackers": [], + } - except qbittorrentapi.APIConnectionError: - console.print("[bold red]Failed to connect to qBittorrent - check host/port") - return [] + # Build URL query string with standard qBittorrent API parameters + query_parts = [ + f"search={urllib.parse.quote(search_term)}", + "sort=added_on", + "reverse=true", + "limit=100" + ] + + # Add status parameters if they exist + if qui_filters.get('excludeStatus'): + # Join multiple excludeStatus filters with comma (qBittorrent style) + filter_value = ','.join(qui_filters['excludeStatus']) + query_parts.append(f"filter={urllib.parse.quote(filter_value)}") + + if qui_filters.get('categories'): + # Join multiple categories with comma + category_value = ','.join(qui_filters['categories']) + query_parts.append(f"category={urllib.parse.quote(category_value)}") + + if qui_filters.get('tags'): + # Join multiple tags with comma + tag_value = ','.join(qui_filters['tags']) + query_parts.append(f"tag={urllib.parse.quote(tag_value)}") + + query_string = "&".join(query_parts) + url = f"{qbt_proxy_url}/api/v2/torrents/search?{query_string}" - torrents = await asyncio.to_thread(qbt_client.torrents_info) - if meta['debug']: - console.print(f"[cyan]Found {len(torrents)} torrents in qBittorrent") + if meta['debug']: + console.print(f"[cyan]Searching qBittorrent via proxy: {redact_private_info(url)}...") + + async with qbt_session.get(url) as response: + if response.status == 200: + response_data = await response.json() + + # The qui proxy returns {'torrents': [...]} while standard API returns [...] + if isinstance(response_data, dict) and 'torrents' in response_data: + torrents_data = response_data['torrents'] + else: + torrents_data = response_data + + if meta['debug']: + console.print(f"[cyan]Retrieved {len(torrents_data)} torrents via proxy search for '{search_term}'") + # Convert to objects that match qbittorrentapi structure + + class MockTorrent: + def __init__(self, data): + for key, value in data.items(): + setattr(self, key, value) + if not hasattr(self, 'files'): + self.files = [] + if not hasattr(self, 'tracker'): + self.tracker = '' + if not hasattr(self, 'comment'): + self.comment = '' + torrents = [MockTorrent(torrent) for torrent in torrents_data] + else: + if response.status == 404: + if meta['debug']: + console.print(f"[yellow]No torrents found via proxy search for '[green]{search_term}' [yellow]Maybe tracker errors?") + else: + if meta['debug']: + console.print(f"[bold red]Failed to get torrents list via proxy: {response.status}") + if proxy_url and 'qbt_session' in locals(): + await qbt_session.close() + return [] + else: + torrents = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_info), + "Get torrents list", + initial_timeout=14.0 + ) + except asyncio.TimeoutError: + console.print("[bold red]Getting torrents list timed out after retries") + if proxy_url and 'qbt_session' in locals(): + await qbt_session.close() + return [] + except Exception as e: + console.print(f"[bold red]Error getting torrents list: {e}") + if proxy_url and 'qbt_session' in locals(): + await qbt_session.close() + return [] matching_torrents = [] @@ -1564,7 +2244,7 @@ async def find_qbit_torrents_by_path(self, content_path, meta): if is_disc in ("", None) and len(meta.get('filelist', [])) == 1: file_name = os.path.basename(meta['filelist'][0]) - if (torrent_name == file_name) and len(torrent.files) == 1: + if torrent_name == file_name: is_match = True elif torrent_name == meta['uuid']: is_match = True @@ -1575,22 +2255,55 @@ async def find_qbit_torrents_by_path(self, content_path, meta): if not is_match: continue - has_working_tracker = False + torrent_properties = [] if is_match: + url = torrent.tracker if torrent.tracker else [] try: - torrent_trackers = await asyncio.to_thread(qbt_client.torrents_trackers, torrent_hash=torrent.hash) - display_trackers = [] - - # Filter out DHT, PEX, LSD "trackers" - for tracker in torrent_trackers: - if tracker.get('url', []).startswith(('** [DHT]', '** [PeX]', '** [LSD]')): - continue - display_trackers.append(tracker) + if proxy_url and not torrent.comment: + if meta['debug']: + console.print(f"[cyan]Fetching torrent properties via proxy for torrent: {torrent.name}") + async with qbt_session.get(f"{qbt_proxy_url}/api/v2/torrents/properties", + params={'hash': torrent.hash}) as response: + if response.status == 200: + torrent_properties = await response.json() + torrent.comment = torrent_properties.get('comment', '') + else: + if meta['debug']: + console.print(f"[yellow]Failed to get properties for torrent {torrent.name} via proxy: {response.status}") + continue + elif not proxy_url: + torrent_trackers = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_trackers, torrent_hash=torrent.hash), + f"Get trackers for torrent {torrent.name}" + ) + except (asyncio.TimeoutError, qbittorrentapi.APIError): + if meta['debug']: + console.print(f"[yellow]Failed to get trackers for torrent {torrent.name} after retries") + continue + except Exception as e: + if meta['debug']: + console.print(f"[yellow]Error getting trackers for torrent {torrent.name}: {e}") + continue - for tracker in display_trackers: - url = tracker.get('url', 'Unknown URL') - status_code = tracker.get('status', 0) + if proxy_url: + torrent_trackers = getattr(torrent, 'trackers', []) or [] + has_working_tracker = True + else: + try: + display_trackers = [] + + # Filter out DHT, PEX, LSD "trackers" + for tracker in torrent_trackers or []: + if tracker.get('url', '').startswith(('** [DHT]', '** [PeX]', '** [LSD]')): + continue + display_trackers.append(tracker) + + # Now process the filtered trackers + has_working_tracker = False + for display_tracker in display_trackers: + url = display_tracker.get('url', 'Unknown URL') + status_code = display_tracker.get('status', 0) status_text = { 0: "Disabled", 1: "Not contacted", @@ -1603,19 +2316,20 @@ async def find_qbit_torrents_by_path(self, content_path, meta): has_working_tracker = True if meta['debug']: console.print(f"[green]Tracker working: {url[:15]} - {status_text}") - - elif meta['debug']: - msg = tracker.get('msg', '') + else: + msg = display_tracker.get('msg', '') console.print(f"[yellow]Tracker not working: {url[:15]} - {status_text}{f' - {msg}' if msg else ''}") - except qbittorrentapi.APIError as e: - if meta['debug']: - console.print(f"[red]Error fetching trackers for torrent {torrent.name}: {e}") - continue + except qbittorrentapi.APIError as e: + if meta['debug']: + console.print(f"[red]Error fetching trackers for torrent {torrent.name}: {e}") + continue if 'torrent_comments' not in meta: meta['torrent_comments'] = [] + await match_tracker_url(https://github.com/[url],%20meta) + match_info = { 'hash': torrent.hash, 'name': torrent.name, @@ -1638,7 +2352,7 @@ async def find_qbit_torrents_by_path(self, content_path, meta): if not tracker_info: continue - if tracker_info["url"] in torrent.comment: + if tracker_info["url"] in torrent.comment and has_working_tracker: match = re.search(tracker_info["pattern"], torrent.comment) if match: tracker_id_value = match.group(1) @@ -1651,7 +2365,7 @@ async def find_qbit_torrents_by_path(self, content_path, meta): if torrent.tracker and 'hawke.uno' in torrent.tracker: # Try to extract torrent ID from the comment first - if not has_working_tracker: + if has_working_tracker: huno_id = None if "/torrents/" in torrent.comment: match = re.search(r'/torrents/(\d+)', torrent.comment) @@ -1667,6 +2381,16 @@ async def find_qbit_torrents_by_path(self, content_path, meta): meta['huno'] = huno_id tracker_found = True + if torrent.tracker and 'tracker.anthelion.me' in torrent.tracker: + ant_id = 1 + if has_working_tracker: + tracker_urls.append({ + 'id': 'ant', + 'tracker_id': ant_id, + }) + meta['ant'] = ant_id + tracker_found = True + match_info['tracker_urls'] = tracker_urls match_info['has_tracker'] = tracker_found @@ -1723,16 +2447,23 @@ def get_priority_score(torrent): console.print(f"[bold cyan]Found {tracker['id'].upper()} ID: {tracker['tracker_id']} in torrent comment") if not meta.get('base_torrent_created'): - default_torrent_client = self.config['DEFAULT']['default_torrent_client'] - client = self.config['TORRENT_CLIENTS'][default_torrent_client] - torrent_client = client['torrent_client'] - torrent_storage_dir = client.get('torrent_storage_dir') + torrent_storage_dir = client_config.get('torrent_storage_dir') extracted_torrent_dir = os.path.join(meta.get('base_dir', ''), "tmp", meta.get('uuid', '')) os.makedirs(extracted_torrent_dir, exist_ok=True) - # Try the best match first - torrent_hash = best_match['hash'] + # Set up piece size preference logic + mtv_config = self.config.get('TRACKERS', {}).get('MTV', {}) + prefer_small_pieces = mtv_config.get('prefer_mtv_torrent', False) + piece_limit = self.config['DEFAULT'].get('prefer_max_16_torrent', False) + + # Use piece preference if MTV preference is true, otherwise use general piece limit + use_piece_preference = prefer_small_pieces or piece_limit + piece_size_best_match = None # Track the best match for fallback if piece preference is enabled + + # Try the best match first (from the sorted matching torrents) + best_torrent_match = matching_torrents[0] + torrent_hash = best_torrent_match['hash'] torrent_file_path = None if torrent_storage_dir: @@ -1746,8 +2477,24 @@ def get_priority_score(torrent): if meta.get('debug', False): console.print(f"[cyan]Exporting .torrent file for hash: {torrent_hash}") - try: - torrent_file_content = qbt_client.torrents_export(torrent_hash=torrent_hash) + torrent_file_content = None + if proxy_url: + qbt_proxy_url = proxy_url.rstrip('/') + try: + async with qbt_session.post(f"{qbt_proxy_url}/api/v2/torrents/export", + data={'hash': torrent_hash}) as response: + if response.status == 200: + torrent_file_content = await response.read() + else: + console.print(f"[red]Failed to export torrent via proxy: {response.status}") + except Exception as e: + console.print(f"[red]Error exporting torrent via proxy: {e}") + else: + torrent_file_content = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_export, torrent_hash=torrent_hash), + f"Export torrent {torrent_hash}" + ) + if torrent_file_content is not None: torrent_file_path = os.path.join(extracted_torrent_dir, f"{torrent_hash}.torrent") with open(torrent_file_path, "wb") as f: @@ -1755,103 +2502,564 @@ def get_priority_score(torrent): if meta.get('debug', False): console.print(f"[green]Exported .torrent file to: {torrent_file_path}") - - except qbittorrentapi.APIError as e: - console.print(f"[bold red]Failed to export .torrent for {torrent_hash}: {e}") + else: + console.print(f"[bold red]Failed to export .torrent for {torrent_hash} after retries") if torrent_file_path: - valid, torrent_path = await self.is_valid_torrent(meta, torrent_file_path, torrent_hash, 'qbit', client, print_err=False) + valid, torrent_path = await self.is_valid_torrent(meta, torrent_file_path, torrent_hash, 'qbit', client_config, print_err=False) if valid: - try: - from src.torrentcreate import create_base_from_existing_torrent - await create_base_from_existing_torrent(torrent_file_path, meta['base_dir'], meta['uuid']) - if meta['debug']: - console.print("[green]Created BASE.torrent from existing torrent") - meta['base_torrent_created'] = True - found_valid_torrent = True - except Exception as e: - console.print(f"[bold red]Error creating BASE.torrent: {e}") + if use_piece_preference: + # **Track best match based on piece size** + try: + torrent_data = Torrent.read(torrent_file_path) + piece_size = torrent_data.piece_size + # For prefer_small_pieces: prefer smallest pieces + # For piece_limit: prefer torrents with piece size <= 16 MiB (16777216 bytes) + is_better_match = False + if prefer_small_pieces: + # MTV preference: always prefer smaller pieces + is_better_match = piece_size_best_match is None or piece_size < piece_size_best_match['piece_size'] + elif piece_limit: + # General preference: prefer <= 16 MiB pieces, then smaller within that range + if piece_size <= 16777216: # 16 MiB + is_better_match = (piece_size_best_match is None or + piece_size_best_match['piece_size'] > 16777216 or + piece_size < piece_size_best_match['piece_size']) + + if is_better_match: + piece_size_best_match = { + 'hash': torrent_hash, + 'torrent_path': torrent_path if torrent_path else torrent_file_path, + 'piece_size': piece_size + } + if meta['debug']: + console.print(f"[green]Updated best match: {piece_size_best_match}") + except Exception as e: + console.print(f"[bold red]Error reading torrent data for {torrent_hash}: {e}") + if os.path.exists(torrent_file_path) and torrent_file_path.startswith(extracted_torrent_dir): + os.remove(torrent_file_path) + else: + # If piece preference is disabled, return first valid torrent + try: + await create_base_from_existing_torrent(torrent_file_path, meta['base_dir'], meta['uuid']) + if meta['debug']: + console.print(f"[green]Created BASE.torrent from first valid torrent: {torrent_hash}") + meta['base_torrent_created'] = True + meta['hash_used'] = torrent_hash + found_valid_torrent = True + except Exception as e: + console.print(f"[bold red]Error creating BASE.torrent: {e}") else: if meta['debug']: - console.print(f"[bold red]Validation failed for best match torrent {torrent_file_path}") + console.print(f"[bold red]{torrent_hash} failed validation") if os.path.exists(torrent_file_path) and torrent_file_path.startswith(extracted_torrent_dir): os.remove(torrent_file_path) - # Try other matches if the best match isn't valid - if meta['debug']: - console.print("[yellow]Trying other torrent matches...") - for torrent_match in matching_torrents[1:]: # Skip the first one since we already tried it - alt_torrent_hash = torrent_match['hash'] - alt_torrent_file_path = None - - if meta.get('debug', False): - console.print(f"[cyan]Trying alternative torrent: {alt_torrent_hash}") - - # Check if alternative torrent file exists in storage directory - if torrent_storage_dir: - alt_potential_path = os.path.join(torrent_storage_dir, f"{alt_torrent_hash}.torrent") - if os.path.exists(alt_potential_path): - alt_torrent_file_path = alt_potential_path - if meta.get('debug', False): - console.print(f"[cyan]Found existing alternative .torrent file: {alt_torrent_file_path}") - - # If not found in storage directory, export from qBittorrent - if not alt_torrent_file_path: - try: - alt_torrent_file_content = qbt_client.torrents_export(torrent_hash=alt_torrent_hash) - alt_torrent_file_path = os.path.join(extracted_torrent_dir, f"{alt_torrent_hash}.torrent") - - with open(alt_torrent_file_path, "wb") as f: - f.write(alt_torrent_file_content) - - if meta.get('debug', False): - console.print(f"[green]Exported alternative .torrent file to: {alt_torrent_file_path}") + # If first torrent fails validation, continue to try other matches + if not found_valid_torrent: + if meta['debug']: + console.print("[yellow]First torrent failed validation, trying other torrent matches...") - except qbittorrentapi.APIError as e: - console.print(f"[bold red]Failed to export alternative .torrent for {alt_torrent_hash}: {e}") - continue + # Try other matches if the best match isn't valid or if we need to find all valid torrents for piece preference + if not found_valid_torrent or (use_piece_preference and not piece_size_best_match): + if meta['debug']: + console.print("[yellow]Trying other torrent matches...") + for torrent_match in matching_torrents[1:]: # Skip the first one since we already tried it + alt_torrent_hash = torrent_match['hash'] + alt_torrent_file_path = None - # Validate the alternative torrent - if alt_torrent_file_path: - alt_valid, alt_torrent_path = await self.is_valid_torrent( - meta, alt_torrent_file_path, alt_torrent_hash, 'qbit', client, print_err=False + if meta.get('debug', False): + console.print(f"[cyan]Trying alternative torrent: {alt_torrent_hash}") + + # Check if alternative torrent file exists in storage directory + if torrent_storage_dir: + alt_potential_path = os.path.join(torrent_storage_dir, f"{alt_torrent_hash}.torrent") + if os.path.exists(alt_potential_path): + alt_torrent_file_path = alt_potential_path + if meta.get('debug', False): + console.print(f"[cyan]Found existing alternative .torrent file: {alt_torrent_file_path}") + + # If not found in storage directory, export from qBittorrent + if not alt_torrent_file_path: + alt_torrent_file_content = None + if proxy_url: + qbt_proxy_url = proxy_url.rstrip('/') + try: + async with qbt_session.post(f"{qbt_proxy_url}/api/v2/torrents/export", + data={'hash': alt_torrent_hash}) as response: + if response.status == 200: + alt_torrent_file_content = await response.read() + else: + console.print(f"[red]Failed to export alternative torrent via proxy: {response.status}") + except Exception as e: + console.print(f"[red]Error exporting alternative torrent via proxy: {e}") + else: + alt_torrent_file_content = await self.retry_qbt_operation( + lambda: asyncio.to_thread(qbt_client.torrents_export, torrent_hash=alt_torrent_hash), + f"Export alternative torrent {alt_torrent_hash}" ) - - if alt_valid: + if alt_torrent_file_content is not None: + alt_torrent_file_path = os.path.join(extracted_torrent_dir, f"{alt_torrent_hash}.torrent") + + with open(alt_torrent_file_path, "wb") as f: + f.write(alt_torrent_file_content) + + if meta.get('debug', False): + console.print(f"[green]Exported alternative .torrent file to: {alt_torrent_file_path}") + else: + console.print(f"[bold red]Failed to export alternative .torrent for {alt_torrent_hash} after retries") + continue + + # Validate the alternative torrent + if alt_torrent_file_path: + alt_valid, alt_torrent_path = await self.is_valid_torrent( + meta, alt_torrent_file_path, alt_torrent_hash, 'qbit', client_config, print_err=False + ) + + if alt_valid: + if use_piece_preference: + # **Track best match based on piece size** + try: + torrent_data = Torrent.read(alt_torrent_file_path) + piece_size = torrent_data.piece_size + # For prefer_small_pieces: prefer smallest pieces + # For piece_limit: prefer torrents with piece size <= 16 MiB (16777216 bytes) + is_better_match = False + if prefer_small_pieces: + # MTV preference: always prefer smaller pieces + is_better_match = piece_size_best_match is None or piece_size < piece_size_best_match['piece_size'] + elif piece_limit: + # General preference: prefer <= 16 MiB pieces, then smaller within that range + if piece_size <= 16777216: # 16 MiB + is_better_match = (piece_size_best_match is None or + piece_size_best_match['piece_size'] > 16777216 or + piece_size < piece_size_best_match['piece_size']) + + if is_better_match: + piece_size_best_match = { + 'hash': alt_torrent_hash, + 'torrent_path': alt_torrent_path if alt_torrent_path else alt_torrent_file_path, + 'piece_size': piece_size + } + if meta['debug']: + console.print(f"[green]Updated best match: {piece_size_best_match}") + except Exception as e: + console.print(f"[bold red]Error reading torrent data for {alt_torrent_hash}: {e}") + else: + # If piece preference is disabled, return first valid torrent try: - from src.torrentcreate import create_base_from_existing_torrent await create_base_from_existing_torrent(alt_torrent_file_path, meta['base_dir'], meta['uuid']) if meta['debug']: console.print(f"[green]Created BASE.torrent from alternative torrent {alt_torrent_hash}") - meta['infohash'] = alt_torrent_hash # Update infohash to use the valid torrent + meta['infohash'] = alt_torrent_hash meta['base_torrent_created'] = True + meta['hash_used'] = alt_torrent_hash found_valid_torrent = True break except Exception as e: console.print(f"[bold red]Error creating BASE.torrent for alternative: {e}") - else: - console.print(f"[yellow]Alternative torrent {alt_torrent_hash} also invalid") - if os.path.exists(alt_torrent_file_path) and alt_torrent_file_path.startswith(extracted_torrent_dir): - os.remove(alt_torrent_file_path) + else: + if meta['debug']: + console.print(f"[bold red]{alt_torrent_hash} failed validation") + if os.path.exists(alt_torrent_file_path) and alt_torrent_file_path.startswith(extracted_torrent_dir): + os.remove(alt_torrent_file_path) - if not found_valid_torrent: - if meta['debug']: - console.print("[bold red]No valid torrents found after checking all matches") - meta['we_checked_them_all'] = True + if not found_valid_torrent: + if meta['debug']: + console.print("[bold red]No valid torrents found after checking all matches, falling back to a best match if preference is set") + meta['we_checked_them_all'] = True + + # **Return the best match if piece preference is enabled** + if use_piece_preference and piece_size_best_match and not found_valid_torrent: + try: + preference_type = "MTV preference" if prefer_small_pieces else "16 MiB piece limit" + console.print(f"[green]Using best match torrent ({preference_type}) with hash: {piece_size_best_match['hash']}") + await create_base_from_existing_torrent(piece_size_best_match['torrent_path'], meta['base_dir'], meta['uuid']) + if meta['debug']: + piece_size_mib = piece_size_best_match['piece_size'] / 1024 / 1024 + console.print(f"[green]Created BASE.torrent from best match torrent: {piece_size_best_match['hash']} (piece size: {piece_size_mib:.1f} MiB)") + meta['infohash'] = piece_size_best_match['hash'] + meta['base_torrent_created'] = True + meta['hash_used'] = piece_size_best_match['hash'] + found_valid_torrent = True + + # Check if the best match actually meets the piece size constraint + piece_size = piece_size_best_match['piece_size'] + if prefer_small_pieces and piece_size <= 8388608: # 8 MiB + meta['found_preferred_piece_size'] = 'MTV' + elif piece_limit and piece_size <= 16777216: # 16 MiB + meta['found_preferred_piece_size'] = '16MiB' + else: + # Found a torrent but it doesn't meet the constraint + meta['found_preferred_piece_size'] = False + except Exception as e: + console.print(f"[bold red]Error creating BASE.torrent from best match: {e}") + elif use_piece_preference and not piece_size_best_match: + console.print("[yellow]No preferred torrents found matching piece size preferences.") + meta['we_checked_them_all'] = True + meta['found_preferred_piece_size'] = False + + # If piece preference is not enabled, set flag to indicate we can stop searching + if not use_piece_preference and found_valid_torrent: + meta['found_preferred_piece_size'] = 'no_constraints' # Display results summary if meta['debug']: if matching_torrents: - console.print(f"[green]Found {len(matching_torrents)} matching torrents") + console.print(f"[green]Found {len(matching_torrents)} matching torrents in {client_name}") console.print(f"[green]Torrents with working trackers: {sum(1 for t in matching_torrents if t.get('has_working_tracker', False))}") else: - console.print(f"[yellow]No matching torrents found for {torrent_name}") + console.print(f"[yellow]No matching torrents found in {client_name}") + + if proxy_url and 'qbt_session' in locals(): + await qbt_session.close() return matching_torrents + except asyncio.TimeoutError: + if proxy_url and 'qbt_session' in locals(): + await qbt_session.close() + raise except Exception as e: - console.print(f"[bold red]Error finding torrents: {str(e)}") + console.print(f"[bold red]Error finding torrents in {client_name}: {str(e)}") if meta['debug']: - import traceback console.print(traceback.format_exc()) + if proxy_url and 'qbt_session' in locals(): + await qbt_session.close() return [] + + +async def create_cross_seed_links(meta, torrent, tracker_dir, use_hardlink): + debug = meta.get('debug', False) + metainfo = getattr(torrent, 'metainfo', {}) + if not isinstance(metainfo, dict): + metainfo = {} + info_raw = metainfo.get('info') + info = info_raw if isinstance(info_raw, dict) else {} + torrent_name = info.get('name.utf-8') or info.get('name') or getattr(torrent, 'name', None) + if not torrent_name: + console.print("[bold red]Cross-seed torrent is missing an info name; cannot build link structure") + return False + + multi_file = bool(info.get('files')) + torrent_files = [] + + def decode_component(value): + if isinstance(value, bytes): + return value.decode('utf-8', errors='ignore') + return str(value) + + if multi_file: + for file_entry in info.get('files', []): + raw_path = file_entry.get('path.utf-8') or file_entry.get('path') or [] + if isinstance(raw_path, (list, tuple)): + components = [decode_component(part) for part in raw_path] + rel_path = os.path.join(*components) if components else '' + else: + rel_path = decode_component(raw_path) + rel_path = rel_path.replace('/', os.sep) + rel_path = rel_path.replace('\\', os.sep) + rel_path = os.path.normpath(rel_path) + if rel_path.startswith('..'): + rel_path = rel_path.lstrip('.\\/') + torrent_files.append({ + 'relative_path': rel_path, + 'length': file_entry.get('length') + }) + else: + torrent_files.append({ + 'relative_path': torrent_name, + 'length': info.get('length') + }) + + destination_root = os.path.join(tracker_dir, torrent_name) if multi_file else tracker_dir + if multi_file: + await asyncio.to_thread(os.makedirs, destination_root, exist_ok=True) + else: + await asyncio.to_thread(os.makedirs, tracker_dir, exist_ok=True) + + release_root = meta.get('path') + candidate_paths = [] + if release_root and os.path.isdir(release_root): + for root, _, files in os.walk(release_root): + for file in files: + candidate_paths.append(os.path.join(root, file)) + else: + candidate_paths.extend(meta.get('filelist', [])) + parent_guess = os.path.dirname(meta['filelist'][0]) if meta.get('filelist') else os.path.dirname(release_root or '') + if parent_guess and os.path.isdir(parent_guess): + for root, _, files in os.walk(parent_guess): + for file in files: + candidate_paths.append(os.path.join(root, file)) + + unique_candidates = [] + seen = set() + tracker_abs = os.path.abspath(tracker_dir) if tracker_dir else None + for candidate in candidate_paths: + if not candidate: + continue + abs_candidate = os.path.abspath(candidate) + if abs_candidate in seen: + continue + seen.add(abs_candidate) + if not os.path.isfile(abs_candidate): + continue + if tracker_abs: + try: + if os.path.commonpath([abs_candidate, tracker_abs]) == tracker_abs: + continue + except ValueError: + pass + try: + size = os.path.getsize(abs_candidate) + except OSError: + size = None + unique_candidates.append({ + 'path': abs_candidate, + 'name': os.path.basename(abs_candidate).lower(), + 'size': size, + 'used': False + }) + + if not unique_candidates: + console.print("[bold red]Unable to find source files for cross-seed linking") + return False + + def pick_candidate(filename, length): + lower_name = (filename or '').lower() + + if lower_name: + for entry in unique_candidates: + if entry['used']: + continue + if entry['name'] == lower_name and length is not None and entry['size'] == length: + entry['used'] = True + return entry['path'], 'name_size' + + if lower_name: + for entry in unique_candidates: + if entry['used']: + continue + if entry['name'] == lower_name: + entry['used'] = True + return entry['path'], 'name_only' + + if length is not None: + for entry in unique_candidates: + if entry['used']: + continue + if entry['size'] == length: + entry['used'] = True + return entry['path'], 'size_only' + + for entry in unique_candidates: + if entry['used']: + continue + entry['used'] = True + return entry['path'], 'fallback' + + return None, None + + for torrent_file in torrent_files: + relative_path = torrent_file['relative_path'] + dest_file_path = os.path.join(tracker_dir, torrent_name, relative_path) if multi_file else os.path.join(tracker_dir, torrent_name) + dest_file_path = os.path.normpath(dest_file_path) + tracker_root = os.path.abspath(tracker_dir) + try: + if os.path.commonpath([tracker_root, os.path.abspath(dest_file_path)]) != tracker_root: + console.print(f"[bold red]Refusing to create link outside tracker directory: {dest_file_path}") + return False + except ValueError: + console.print(f"[bold red]Refusing to create link outside tracker directory: {dest_file_path}") + return False + + source_file, match_reason = pick_candidate(os.path.basename(relative_path), torrent_file.get('length')) + if not source_file: + console.print(f"[bold red]Failed to map cross-seed file: {relative_path}") + return False + if match_reason == 'fallback' and debug: + console.print(f"[yellow]Cross-seed mapping fallback used for: {relative_path}") + + dest_parent = os.path.dirname(dest_file_path) + if dest_parent: + await asyncio.to_thread(os.makedirs, dest_parent, exist_ok=True) + if await asyncio.to_thread(os.path.exists, dest_file_path): + if debug: + console.print(f"[yellow]Cross-seed link already exists, keeping: {dest_file_path}") + continue + + linked = await async_link_directory(source_file, dest_file_path, use_hardlink=use_hardlink, debug=debug) + if not linked: + console.print(f"[bold red]Linking failed for cross-seed file: {relative_path}") + return False + + if debug: + console.print(f"[green]Prepared cross-seed link tree at {os.path.join(tracker_dir, torrent_name) if multi_file else tracker_dir}") + return True + + +async def async_link_directory(src, dst, use_hardlink=True, debug=False): + try: + # Create destination directory + await asyncio.to_thread(os.makedirs, os.path.dirname(dst), exist_ok=True) + + # Check if destination already exists + if await asyncio.to_thread(os.path.exists, dst): + if debug: + console.print(f"[yellow]Skipping linking, path already exists: {dst}") + return True + + # Handle file linking + if await asyncio.to_thread(os.path.isfile, src): + if use_hardlink: + try: + await asyncio.to_thread(os.link, src, dst) + if debug: + console.print(f"[green]Hard link created: {dst} -> {src}") + return True + except OSError as e: + console.print(f"[yellow]Hard link failed: {e}") + return False + else: # Use symlink + try: + if platform.system() == "Windows": + await asyncio.to_thread(os.symlink, src, dst, target_is_directory=False) + else: + await asyncio.to_thread(os.symlink, src, dst) + + if debug: + console.print(f"[green]Symbolic link created: {dst} -> {src}") + return True + except OSError as e: + console.print(f"[yellow]Symlink failed: {e}") + return False + + # Handle directory linking + else: + if use_hardlink: + # For hardlinks, we need to recreate the directory structure + await asyncio.to_thread(os.makedirs, dst, exist_ok=True) + + # Get all files in the source directory + all_items = [] + for root, dirs, files in await asyncio.to_thread(os.walk, src): + for file in files: + src_path = os.path.join(root, file) + rel_path = os.path.relpath(src_path, src) + all_items.append((src_path, os.path.join(dst, rel_path), rel_path)) + + # Create subdirectories first (to avoid race conditions) + subdirs = set() + for _, dst_path, _ in all_items: + subdir = os.path.dirname(dst_path) + if subdir and subdir not in subdirs: + subdirs.add(subdir) + await asyncio.to_thread(os.makedirs, subdir, exist_ok=True) + + # Create hardlinks for all files + success = True + for src_path, dst_path, rel_path in all_items: + try: + await asyncio.to_thread(os.link, src_path, dst_path) + if debug and rel_path == os.path.relpath(all_items[0][0], src): + console.print(f"[green]Hard link created for file: {dst_path} -> {src_path}") + except OSError as e: + console.print(f"[yellow]Hard link failed for file {rel_path}: {e}") + success = False + break + + return success + else: + # For symlinks, just link the directory itself + try: + if platform.system() == "Windows": + await asyncio.to_thread(os.symlink, src, dst, target_is_directory=True) + else: + await asyncio.to_thread(os.symlink, src, dst) + + if debug: + console.print(f"[green]Symbolic link created: {dst} -> {src}") + return True + except OSError as e: + console.print(f"[yellow]Symlink failed: {e}") + return False + + except Exception as e: + console.print(f"[bold red]Error during linking: {e}") + return False + + +async def match_tracker_url(https://github.com/tracker_urls,%20meta): + tracker_url_patterns = { + 'acm': ["https://eiga.moi/"], + 'aither': ["https://aither.cc/"], + 'ant': ["tracker.anthelion.me"], + 'ar': ["tracker.alpharatio"], + 'asc': ["amigos-share.club"], + 'az': ["tracker.avistaz.to"], + 'bhd': ["https://beyond-hd.me/", "tracker.beyond-hd.me"], + 'bjs': ["tracker.bj-share.info"], + 'blu': ["https://blutopia.cc/"], + 'bt': ["t.brasiltracker.org"], + 'btn': ["https://broadcasthe.net/"], + 'cbr': ["capybarabr.com"], + 'cz': ["tracker.cinemaz.to"], + 'dc': ["tracker.digitalcore.club", "trackerprxy.digitalcore.club"], + 'dp': ["https://darkpeers.org/"], + 'ff': ["tracker.funfile.org"], + 'fl': ["reactor.filelist", "reactor.thefl.org"], + 'fnp': ["https://fearnopeer.com/"], + 'gpw': ["https://tracker.greatposterwall.com/"], + 'hdb': ["https://tracker.hdbits.org/"], + 'hds': ["hd-space.pw"], + 'hdt': ["https://hdts-announce.ru/"], + 'hhd': ["https://homiehelpdesk.net/"], + 'huno': ["https://hawke.uno/"], + 'ihd': ["https://infinityhd.net/"], + 'is': ["https://immortalseed.me/"], + 'itt': ["https://itatorrents.xyz/"], + 'lcd': ["locadora.cc"], + 'ldu': ["theldu.to"], + 'lst': ["https://lst.gg/"], + 'lt': ["https://lat-team.com/"], + 'mtv': ["tracker.morethantv"], + 'nbl': ["tracker.nebulance"], + 'oe': ["https://onlyencodes.cc/"], + 'otw': ["https://oldtoons.world/"], + 'phd': ["tracker.privatehd"], + 'pt': ["https://portugas.org/"], + 'ptp': ["passthepopcorn.me"], + 'pts': ["https://tracker.ptskit.com/"], + 'ras': ["https://rastastugan.org/"], + 'rf': ["https://reelflix.xyz/", "https://reelflix.cc/"], + 'rtf': ["peer.retroflix"], + 'sam': ["https://samaritano.cc/"], + 'sp': ["https://seedpool.org/"], + 'spd': ["ramjet.speedapp.io", "ramjet.speedapp.to", "ramjet.speedappio.org"], + 'stc': ["https://skipthecommercials.xyz/"], + 'thr': ["torrenthr"], + 'tl': ["tracker.tleechreload", "tracker.torrentleech"], + 'tlz': ["https://tlzdigital.com/"], + 'ttr': ["https://torrenteros.org/"], + 'tvc': ["https://tvchaosuk.com/"], + 'ulcx': ["https://upload.cx/"], + 'yoink': ["yoinked.org"], + 'yus': ["https://yu-scene.net/"], + } + found_ids = set() + for tracker in tracker_urls: + for tracker_id, patterns in tracker_url_patterns.items(): + for pattern in patterns: + if pattern in tracker: + found_ids.add(tracker_id.upper()) + if meta.get('debug'): + console.print(f"[bold cyan]Matched {tracker_id.upper()} in tracker URL: {redact_private_info(tracker)}") + + if "remove_trackers" not in meta or not isinstance(meta["remove_trackers"], list): + meta["remove_trackers"] = [] + + for tracker_id in found_ids: + if tracker_id not in meta["remove_trackers"]: + meta["remove_trackers"].append(tracker_id) + if meta.get('debug'): + console.print(f"[bold cyan]Storing matched tracker IDs for later removal: {meta['remove_trackers']}") diff --git a/src/console.py b/src/console.py index 223c51181..23cee0006 100644 --- a/src/console.py +++ b/src/console.py @@ -1,2 +1,3 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 from rich.console import Console console = Console() diff --git a/src/cookie_auth.py b/src/cookie_auth.py new file mode 100644 index 000000000..7b7b6979d --- /dev/null +++ b/src/cookie_auth.py @@ -0,0 +1,561 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles +import http.cookiejar +import httpx +import os +import re +import importlib +import traceback +from bs4 import BeautifulSoup +from src.console import console +from src.trackers.COMMON import COMMON +from rich.panel import Panel +from rich.table import Table + + +class CookieValidator: + def __init__(self, config): + self.config = config + pass + + async def load_session_cookies(self, meta, tracker): + cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{tracker}.txt") + cookie_jar = http.cookiejar.MozillaCookieJar(cookie_file) + + try: + cookie_jar.load(ignore_discard=True, ignore_expires=True) + except http.cookiejar.LoadError as e: + console.print(f"{tracker}: Failed to load the cookie file: {e}") + console.print(f"{tracker}: Please ensure the cookie file is in the correct format (Netscape).") + return False + except FileNotFoundError: + # Attempt automatic login for AR tracker + if tracker == 'AR': + console.print(f"{tracker}: [yellow]Cookie file not found. Attempting automatic login...[/yellow]") + if await self.ar_login(meta, tracker, cookie_file): + # Try loading the newly created cookie file + try: + cookie_jar.load(ignore_discard=True, ignore_expires=True) + return cookie_jar + except Exception as e: + console.print(f"{tracker}: Failed to load cookies after login: {e}") + return False + else: + console.print(f"{tracker}: Automatic login failed.") + return False + + console.print( + f"{tracker}: [red]Cookie file not found.[/red]\n" + f"{tracker}: You must first log in through your usual browser and export the cookies to: [yellow]{cookie_file}[/yellow]\n" + f'{tracker}: Cookies can be exported using browser extensions like "cookies.txt" (Firefox) or "Get cookies.txt LOCALLY" (Chrome).' + ) + return False + + return cookie_jar + + async def save_session_cookies(self, tracker, cookie_jar): + """Save updated cookies after a successful validation.""" + if not cookie_jar: + console.print(f"{tracker}: Cookie jar not initialized, cannot save cookies.") + return + + try: + cookie_jar.save(ignore_discard=True, ignore_expires=True) + except Exception as e: + console.print(f"{tracker}: Failed to update the cookie file: {e}") + + async def get_ar_auth_key(self, meta, tracker): + """Retrieve the saved auth key for AR tracker.""" + cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{tracker}.txt") + auth_file = cookie_file.replace('.txt', '_auth.txt') + + if os.path.exists(auth_file): + try: + async with aiofiles.open(auth_file, 'r', encoding='utf-8') as f: + auth_key = await f.read() + auth_key = auth_key.strip() + if auth_key: + return auth_key + except Exception as e: + console.print(f"{tracker}: Error reading auth key: {e}") + + return None + + async def ar_login(self, meta, tracker, cookie_file): + """Perform automatic login to AR and save cookies in Netscape format.""" + username = self.config['TRACKERS'][tracker].get('username', '').strip() + password = self.config['TRACKERS'][tracker].get('password', '').strip() + + if not username or not password: + console.print(f"{tracker}: Username or password not configured in config.") + return False + + base_url = 'https://alpharatio.cc/' + login_url = f'{base_url}/login.php' + + headers = { + "User-Agent": f"Upload Assistant {meta.get('current_version', 'github.com/Audionut/Upload-Assistant')}" + } + + try: + async with httpx.AsyncClient(headers=headers, timeout=30.0, follow_redirects=True) as client: + # Perform login + login_data = { + "username": username, + "password": password, + "keeplogged": "1", + "login": "Login", + } + + response = await client.post(login_url, data=login_data) + + if response.status_code != 200: + console.print(f"{tracker}: Login failed with status code {response.status_code}") + return False + + # Check for login success by looking for error indicators + if 'login.php?act=recover' in response.text or 'Forgot your password' in response.text: + console.print(f"{tracker}: [red]Login failed. Please check your username and password.[/red]") + if meta.get('debug', False): + failure_path = f"{meta['base_dir']}/tmp/{meta.get('uuid', 'debug')}/[{tracker}]Failed_Login.html" + os.makedirs(os.path.dirname(failure_path), exist_ok=True) + async with aiofiles.open(failure_path, "w", encoding="utf-8") as f: + await f.write(response.text) + console.print(f"Login response saved to [yellow]{failure_path}[/yellow] for debugging.") + return False + + # Validate we're logged in by checking the torrents page + test_response = await client.get(f'{base_url}/torrents.php') + if test_response.status_code == 200: + if 'login.php?act=recover' not in test_response.text: + console.print(f"{tracker}: [green]Login successful![/green]") + + # Extract auth key from the response page + auth_key = None + soup = BeautifulSoup(test_response.text, 'html.parser') + logout_link = soup.find('a', href=True, text='Logout') + if logout_link: + href = logout_link['href'] + auth_match = re.search(r'auth=([^&]+)', href) + if auth_match: + auth_key = auth_match.group(1) + console.print(f"{tracker}: [green]Auth key extracted successfully[/green]") + + # Save cookies in Netscape format + os.makedirs(os.path.dirname(cookie_file), exist_ok=True) + cookie_jar = http.cookiejar.MozillaCookieJar(cookie_file) + + # Convert httpx cookies to MozillaCookieJar format + for cookie_name, cookie_value in client.cookies.items(): + # Get the cookie object for additional attributes + for cookie in client.cookies.jar: + if cookie.name == cookie_name: + ck = http.cookiejar.Cookie( + version=0, + name=cookie.name, + value=cookie.value, + port=None, + port_specified=False, + domain=cookie.domain if cookie.domain else '.alpharatio.cc', + domain_specified=True, + domain_initial_dot=(cookie.domain or '.alpharatio.cc').startswith('.'), + path=cookie.path if cookie.path else '/', + path_specified=True, + secure=bool(cookie._rest.get('secure')) if hasattr(cookie, '_rest') else True, + expires=None, + discard=False, + comment=None, + comment_url=None, + rest={}, + rfc2109=False + ) + cookie_jar.set_cookie(ck) + break + + cookie_jar.save(ignore_discard=True, ignore_expires=True) + console.print(f"{tracker}: [green]Cookies saved to {cookie_file}[/green]") + + # Save auth key to a separate file if found + if auth_key: + auth_file = cookie_file.replace('.txt', '_auth.txt') + async with aiofiles.open(auth_file, 'w', encoding='utf-8') as f: + await f.write(auth_key) + console.print(f"{tracker}: [green]Auth key saved to {auth_file}[/green]") + + return True + + console.print(f"{tracker}: [red]Login validation failed.[/red]") + return False + + except httpx.TimeoutException: + console.print(f"{tracker}: Connection timed out. The site may be down or unreachable.") + return False + except httpx.ConnectError: + console.print(f"{tracker}: Failed to connect. The site may be down or your connection is blocked.") + return False + except Exception as e: + console.print(f"{tracker}: Login error: {e}") + if meta.get('debug', False): + console.print(traceback.format_exc()) + return False + + async def cookie_validation( + self, + meta, + tracker, + test_url="", + status_code="", + error_text="", + success_text="", + token_pattern="", + ): + """ + Validate login cookies for a tracker by checking specific indicators on a test page. + Return False to skip the upload if credentials are invalid. + """ + cookie_jar = await self.load_session_cookies(meta, tracker) + if not cookie_jar: + return False + + headers = { + "User-Agent": f"Upload Assistant {meta.get('current_version', 'github.com/Audionut/Upload-Assistant')}" + } + + try: + async with httpx.AsyncClient(headers=headers, timeout=20.0, cookies=cookie_jar) as session: + response = await session.get(test_url) + text = response.text + # if meta.get('debug', False): + # console.print(text) + + # Check for key indicators of successful login + # This is the most precise method if you can find a unique string that only appears when logged in + if success_text and success_text not in text: + await self.handle_validation_failure(meta, tracker, text) + return False + + # Check for key indicators of failed login + # For example, “Forgot your password” <- this indicates that you are on the login page + if error_text and error_text in text: + await self.handle_validation_failure(meta, tracker, text) + return False + + # Check for status code + # This is often not very accurate, as websites may use the same status code for successful uploads and failures + if status_code and response.status_code != int(status_code): + await self.handle_validation_failure(meta, tracker, text) + return False + + # Find the auth token if it is needed + if token_pattern: + match = re.search(token_pattern, text) + if not match: + await self.handle_validation_failure(meta, tracker, text) + return False + # Dynamically set a class attribute to store the token + cls = getattr( + importlib.import_module(f'src.trackers.{tracker}'), + tracker + ) + setattr( + cls, + "secret_token", + str(match.group(1)) + ) + + # Save cookies only after a confirmed valid login + await self.save_session_cookies(tracker, cookie_jar) + return True + + except httpx.ConnectTimeout: + console.print(f"{tracker}: Connection timeout. Server took too long to respond.") + except httpx.ReadTimeout: + console.print(f"{tracker}: Read timeout. Data transfer stopped prematurely.") + except httpx.ConnectError: + console.print(f"{tracker}: Connection failed. Check URL, port, and network status.") + except httpx.ProxyError: + console.print(f"{tracker}: Proxy error. Failed to connect via proxy.") + except httpx.DecodingError: + console.print( + f"{tracker}: Decoding failed. Response content is not valid (e.g., unexpected encoding)." + ) + except httpx.TooManyRedirects: + console.print(f"{tracker}: Too many redirects. Request exceeded the maximum redirect limit.") + except httpx.HTTPStatusError as e: + status_code = e.response.status_code + reason = e.response.reason_phrase if e.response.reason_phrase else "Unknown Reason" + url = e.request.url + console.print(f"{tracker}: HTTP status error {status_code}: {reason} for {url}") + except httpx.RequestError as e: + console.print(f"{tracker}: General request error: {e}") + except Exception as e: + console.print(f"{tracker}: Unexpected validation error: {e}") + + return False + + async def handle_validation_failure(self, meta, tracker, text): + console.print( + f"{tracker}: Validation failed. The cookie appears to be expired or invalid.\n" + f"{tracker}: Please log in through your usual browser and export the cookies again." + ) + failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]Failed_Login.html" + os.makedirs(os.path.dirname(failure_path), exist_ok=True) + async with aiofiles.open(failure_path, "w", encoding="utf-8") as f: + await f.write(text) + console.print( + f"The web page has been saved to [yellow]{failure_path}[/yellow] for analysis.\n" + "[red]Do not share this file publicly[/red], as it may contain confidential information such as passkeys, IP address, e-mail, etc.\n" + "You can open this file in a web browser to see what went wrong.\n" + ) + + return + + async def find_html_token(self, tracker, token_pattern, response): + """Find the auth token in a web page using a regular expression pattern.""" + auth_match = re.search(token_pattern, response) + if not auth_match: + console.print( + f"{tracker}: The required token could not be found in the page's HTML. Pattern used: {token_pattern}\n" + f"{tracker}: This can happen if the site HTML has changed or if the login failed silently." + ) + return False + else: + return str(auth_match.group(1)) + + +class CookieAuthUploader: + def __init__(self, config): + self.config = config + self.common = COMMON(config) + pass + + async def handle_upload( + self, + meta, + tracker, + source_flag, + torrent_url, + data, + torrent_field_name, + upload_cookies, + upload_url, + default_announce="", + torrent_name="", + id_pattern="", + success_status_code="", + error_text="", + success_text="", + additional_files={}, + hash_is_id=False, + ): + """ + Upload a torrent to a tracker using cookies for authentication. + Return True if the upload is successful, False otherwise. + + 1. Create the [tracker].torrent file and set the source flag. + Uses default_announce if provided as some trackers require it. + + 2. Load the torrent file into memory. + 3. Post the torrent file and form data to the provided upload URL using the provided cookies. + 4. Check the response for success indicators. + 5. Handle success or failure accordingly. + + A successful upload will create a torrent entry with the announce URL and torrent ID (if applicable). + A failed upload will save the response HTML for analysis and also create a torrent entry with the announce URL, + as the upload may have partially succeeded. + """ + values = [success_status_code, error_text, success_text] + count = sum(bool(v) for v in values) + + if count == 0 or count > 1: + if count == 0: + error = "You must provide at least one of: success_status_code, error_text, or success_text." + else: + error = "Only one of success_status_code, error_text, or success_text should be provided." + meta["tracker_status"][tracker]["status_message"] = error + return False + + user_announce_url = self.config["TRACKERS"][tracker]["announce_url"] + + files = await self.load_torrent_file( + meta, + tracker, + torrent_field_name, + torrent_name, + source_flag, + default_announce, + ) + if additional_files: + files.update(additional_files) + + headers = { + "User-Agent": f"Upload Assistant {meta.get('current_version', 'github.com/Audionut/Upload-Assistant')}" + } + + if meta.get("debug", False): + self.upload_debug(tracker, data) + meta["tracker_status"][tracker]["status_message"] = "Debug mode enabled, not uploading" + + else: + success = False + try: + async with httpx.AsyncClient(headers=headers, timeout=30.0, cookies=upload_cookies, follow_redirects=True) as session: + response = await session.post(upload_url, data=data, files=files) + + if success_text and success_text in response.text: + success = True + + elif success_status_code: + valid_codes = { + int(code.strip()) + for code in str(success_status_code).split(",") + if code.strip().isdigit() + } + + if int(response.status_code) in valid_codes: + success = True + + elif error_text and error_text not in response.text: + success = True + + if success: + return await self.handle_successful_upload( + meta, + tracker, + response, + id_pattern, + hash_is_id, + source_flag, + user_announce_url, + torrent_url, + ) + else: + await self.handle_failed_upload( + meta, + tracker, + success_status_code, + success_text, + error_text, + response, + ) + + except httpx.ConnectTimeout: + meta["tracker_status"][tracker]["status_message"] = "Connection timed out" + except httpx.ReadTimeout: + meta["tracker_status"][tracker]["status_message"] = "Read timed out" + except httpx.ConnectError: + meta["tracker_status"][tracker]["status_message"] = "Failed to connect to the server" + except httpx.ProxyError: + meta["tracker_status"][tracker]["status_message"] = "Proxy connection failed" + except httpx.DecodingError: + meta["tracker_status"][tracker]["status_message"] = "Response decoding failed" + except httpx.TooManyRedirects: + meta["tracker_status"][tracker]["status_message"] = "Too many redirects" + except httpx.HTTPStatusError as e: + meta["tracker_status"][tracker]["status_message"] = f"HTTP error {e.response.status_code}: {e}" + except httpx.RequestError as e: + meta["tracker_status"][tracker]["status_message"] = f"Request error: {e}" + except Exception as e: + meta["tracker_status"][tracker]["status_message"] = f"Unexpected upload error: {e}" + + await self.common.add_tracker_torrent(meta, tracker, source_flag, user_announce_url, torrent_url) + return False + + def upload_debug(self, tracker, data): + try: + if isinstance(data, dict): + sensitive_keywords = ['password', 'passkey', 'auth', 'csrf', 'token'] + + table_data = Table( + title=f"{tracker}: Form Data", show_header=True, header_style="bold cyan" + ) + table_data.add_column("Key", style="cyan") + table_data.add_column("Value", style="magenta") + + for k, v in data.items(): + if any(keyword in k.lower() for keyword in sensitive_keywords): + table_data.add_row(k, "[REDACTED]") + else: + table_data.add_row(k, str(v)) + + console.print(table_data, justify="center", markup=False) + else: + data_panel = Panel(str(data), title=f"{tracker}: Form Data - DO NOT SHARE THIS", border_style="blue") + console.print(data_panel, justify="center") + except Exception as e: + console.print(f"Error displaying form data: {e}") + raise + + async def load_torrent_file( + self, meta, tracker, torrent_field_name, torrent_name, source_flag, default_announce + ): + """Load the torrent file into memory.""" + await self.common.edit_torrent(meta, tracker, source_flag, announce_url=default_announce) + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent" + async with aiofiles.open(torrent_path, "rb") as f: + file_bytes = await f.read() + + name = torrent_name if torrent_name else f"{tracker}.{meta.get('infohash', '')}.placeholder" + + return { + torrent_field_name: ( + f"{name}.torrent", + file_bytes, + "application/x-bittorrent", + ) + } + + async def handle_successful_upload( + self, meta, tracker, response, id_pattern, hash_is_id, source_flag, user_announce_url, torrent_url + ): + torrent_id = "" + if id_pattern: + # First try to match the pattern in the response URL (for redirects) + url_match = re.search(id_pattern, str(response.url)) + if url_match: + torrent_id = url_match.group(1) + meta["tracker_status"][tracker]["torrent_id"] = torrent_id + else: + # Fall back to searching in response text + text_match = re.search(id_pattern, response.text) + if text_match: + torrent_id = text_match.group(1) + meta["tracker_status"][tracker]["torrent_id"] = torrent_id + + torrent_hash = await self.common.add_tracker_torrent( + meta, tracker, source_flag, user_announce_url, torrent_url + torrent_id, hash_is_id=hash_is_id + ) + + if hash_is_id and torrent_hash is not None: + meta["tracker_status"][tracker]["torrent_id"] = torrent_hash + + meta["tracker_status"][tracker]["status_message"] = "Torrent uploaded successfully." + + return True + + async def handle_failed_upload( + self, meta, tracker, success_status_code, success_text, error_text, response + ): + message = ["data error: The upload appears to have failed. It may have uploaded, go check."] + if success_text: + message.append(f"Could not find the success text '{success_text}' in the response.") + elif error_text: + message.append(f"Found the error text '{error_text}' in the response.") + elif success_status_code: + message.append(f"Expected status code '{success_status_code}', got '{response.status_code}'.") + else: + message.append("Unknown upload error.") + + failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]Failed_Upload.html" + os.makedirs(os.path.dirname(failure_path), exist_ok=True) + async with aiofiles.open(failure_path, "w", encoding="utf-8") as f: + await f.write(response.text) + + message.append( + f"The web page has been saved to [yellow]{failure_path}[/yellow] for analysis.\n" + "[red]Do not share this file publicly[/red], as it may contain confidential information such as passkeys, IP address, e-mail, etc.\n" + "You can open this file in a web browser to see what went wrong.\n" + ) + + meta["tracker_status"][tracker]["status_message"] = "\n".join(message) + return False diff --git a/src/disc_menus.py b/src/disc_menus.py new file mode 100644 index 000000000..7f760edce --- /dev/null +++ b/src/disc_menus.py @@ -0,0 +1,82 @@ +import os +import json +from src.console import console +from src.uploadscreens import upload_screens + + +class DiscMenus: + """ + Handles the processing and uploading of disc menu images. + """ + + def __init__(self, meta, config): + self.config = config + self.path_to_menu_screenshots = meta.get('path_to_menu_screenshots', '') + + async def get_disc_menu_images(self, meta): + """ + Processes disc menu images from a local directory and uploads them. + """ + if not self.path_to_menu_screenshots: + return + + if os.path.isdir(self.path_to_menu_screenshots): + await self.get_local_images(meta) + else: + console.print(f"[red]Invalid disc menus path: {self.path_to_menu_screenshots}[/red]") + + async def get_local_images(self, meta): + """ + Uploads disc menu images from a local directory. + """ + image_paths = [] + for file in os.listdir(self.path_to_menu_screenshots): + if file.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')): + image_paths.append(os.path.join(self.path_to_menu_screenshots, file)) + + if not image_paths: + console.print("[yellow]No local menu images found to upload.[/yellow]") + return + + uploaded_images, _ = await upload_screens( + meta, + screens=len(image_paths), + img_host_num=1, + i=0, + total_screens=len(image_paths), + custom_img_list=image_paths, + return_dict={}, + retry_mode=False + ) + + meta['menu_images'] = uploaded_images + + await self.save_images_to_json(meta, uploaded_images) + + async def save_images_to_json(self, meta, image_list): + """ + Saves the uploaded disc menu images to a JSON file. + """ + if not image_list: + console.print("[yellow]No menu images found.[/yellow]") + return + + menu_images = { + "menu_images": image_list + } + + json_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], 'menu_images.json') + os.makedirs(os.path.dirname(json_path), exist_ok=True) + + with open(json_path, 'w') as f: + json.dump(menu_images, f, indent=4) + + console.print(f"[green]Saved {len(image_list)} menu images to {json_path}[/green]") + + +async def process_disc_menus(meta, config): + """ + Main function to process disc menu images. + """ + disc_menus = DiscMenus(meta, config) + await disc_menus.get_disc_menu_images(meta) diff --git a/src/discparse.py b/src/discparse.py index d76931081..0ccec3ee3 100644 --- a/src/discparse.py +++ b/src/discparse.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import os import sys import asyncio @@ -12,25 +13,39 @@ import re from langcodes import Language from collections import defaultdict -import platform from src.console import console from data.config import config +from src.exportmi import setup_mediainfo_library class DiscParse(): def __init__(self): self.config = config + self.mediainfo_config = None pass + def setup_mediainfo_for_dvd(self, base_dir, debug=False): + """Setup MediaInfo binary for DVD processing using the complete setup from exportmi""" + if self.mediainfo_config is None: + self.mediainfo_config = setup_mediainfo_library(base_dir, debug) + + if self.mediainfo_config and self.mediainfo_config['cli']: + return self.mediainfo_config['cli'] + return None + """ Get and parse bdinfo """ + async def get_bdinfo(self, meta, discs, folder_id, base_dir, meta_discs): use_largest = int(self.config['DEFAULT'].get('use_largest_playlist', False)) save_dir = f"{base_dir}/tmp/{folder_id}" if not os.path.exists(save_dir): os.mkdir(save_dir) + if meta.get('emby', False): + return discs, meta_discs + for i in range(len(discs)): bdinfo_text = None path = os.path.abspath(discs[i]['path']) @@ -143,7 +158,7 @@ async def get_bdinfo(self, meta, discs, folder_id, base_dir, meta_discs): selected_playlists = [max(valid_playlists, key=lambda p: sum(item['size'] for item in p['items']))] else: # Allow user to select playlists - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): if len(valid_playlists) == 1: console.print("[yellow]Only one valid playlist found. Automatically selecting.") selected_playlists = valid_playlists @@ -187,28 +202,47 @@ async def get_bdinfo(self, meta, discs, folder_id, base_dir, meta_discs): bdinfo_text = playlist_report_path else: try: - # Scanning playlist block (as before) + bdinfo_executable = None if sys.platform.startswith('linux') or sys.platform.startswith('darwin'): - proc = await asyncio.create_subprocess_exec( - 'mono', f"{base_dir}/bin/BDInfo/BDInfo.exe", path, '-m', playlist['file'], save_dir - ) + bdinfo_exe_path = f"{base_dir}/bin/BDInfo/BDInfo.exe" + if shutil.which("mono") and os.path.exists(bdinfo_exe_path): + bdinfo_executable = ['mono', bdinfo_exe_path, path, '-m', playlist['file'], save_dir] + elif shutil.which("bdinfo"): + bdinfo_executable = ["bdinfo", path, '-m', playlist['file'], save_dir] + elif shutil.which("BDInfo"): + bdinfo_executable = ["BDInfo", path, '-m', playlist['file'], save_dir] + else: + console.print(f"[bold red]BDInfo not found. Please install mono and place BDInfo.exe in {base_dir}/bin/BDInfo/ or install native bdinfo[/bold red]") + continue elif sys.platform.startswith('win32'): - proc = await asyncio.create_subprocess_exec( - f"{base_dir}/bin/BDInfo/BDInfo.exe", '-m', playlist['file'], path, save_dir - ) + bdinfo_exe_path = f"{base_dir}/bin/BDInfo/BDInfo.exe" + if os.path.exists(bdinfo_exe_path): + bdinfo_executable = [bdinfo_exe_path, '-m', playlist['file'], path, save_dir] + else: + console.print(f"[bold red]BDInfo.exe not found at {bdinfo_exe_path}[/bold red]") + console.print(f"[yellow]Please download BDInfo and place BDInfo.exe in {base_dir}/bin/BDInfo/[/yellow]") + continue else: console.print("[red]Unsupported platform for BDInfo.") continue - await proc.wait() - - # Rename the output to playlist_report_path - for file in os.listdir(save_dir): - if file.startswith("BDINFO") and file.endswith(".txt"): - bdinfo_text = os.path.join(save_dir, file) - shutil.move(bdinfo_text, playlist_report_path) - bdinfo_text = playlist_report_path # Update bdinfo_text to the renamed file - break + if bdinfo_executable: + proc = await asyncio.create_subprocess_exec( + *bdinfo_executable + ) + await proc.wait() + + if proc.returncode != 0: + console.print(f"[bold red]BDInfo failed with return code {proc.returncode}[/bold red]") + continue + + # Rename the output to playlist_report_path + for file in os.listdir(save_dir): + if file.startswith("BDINFO") and file.endswith(".txt"): + bdinfo_text = os.path.join(save_dir, file) + shutil.move(bdinfo_text, playlist_report_path) + bdinfo_text = playlist_report_path # Update bdinfo_text to the renamed file + break except Exception as e: console.print(f"[bold red]Error scanning playlist {playlist['file']}: {e}") continue @@ -243,19 +277,23 @@ async def get_bdinfo(self, meta, discs, folder_id, base_dir, meta_discs): summary_file = f"{save_dir}/BD_SUMMARY_{str(i).zfill(2)}_{idx}.txt" extended_summary_file = f"{save_dir}/BD_SUMMARY_EXT_{str(i).zfill(2)}_{idx}.txt" + # Strip multiple spaces to single spaces before saving + bd_summary_cleaned = re.sub(r' +', ' ', bd_summary.strip()) + ext_bd_summary_cleaned = re.sub(r' +', ' ', ext_bd_summary.strip()) + with open(summary_file, 'w', encoding="utf-8", errors="replace") as f: - f.write(bd_summary.strip()) + f.write(bd_summary_cleaned) with open(extended_summary_file, 'w', encoding="utf-8", errors="replace") as f: - f.write(ext_bd_summary.strip()) + f.write(ext_bd_summary_cleaned) - bdinfo = self.parse_bdinfo(bd_summary, files[1], path) + bdinfo = self.parse_bdinfo(bd_summary_cleaned, files[1], path) # Prompt user for custom edition if conditions are met if len(selected_playlists) > 1: current_label = bdinfo.get('label', f"Playlist {idx}") console.print(f"[bold yellow]Current label for playlist {playlist['file']}: {current_label}") - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): console.print("[bold green]You can create a custom Edition for this playlist.") user_input = input(f"Enter a new Edition title for playlist {playlist['file']} (or press Enter to keep the current label): ").strip() if user_input: @@ -267,10 +305,10 @@ async def get_bdinfo(self, meta, discs, folder_id, base_dir, meta_discs): # Save to discs array if idx == 0: - discs[i]['summary'] = bd_summary.strip() + discs[i]['summary'] = bd_summary_cleaned discs[i]['bdinfo'] = bdinfo discs[i]['playlists'] = selected_playlists - if valid_playlists and meta['unattended'] and not meta.get('unattended-confirm', False): + if valid_playlists and meta['unattended'] and not meta.get('unattended_confirm', False): simplified_playlists = [{"file": p["file"], "duration": p["duration"]} for p in valid_playlists] duration_map = {} @@ -292,7 +330,7 @@ async def get_bdinfo(self, meta, discs, folder_id, base_dir, meta_discs): if meta['debug']: console.print(f"[cyan]Stored {len(simplified_playlists)} unique playlists by duration (from {len(valid_playlists)} total)") else: - discs[i][f'summary_{idx}'] = bd_summary.strip() + discs[i][f'summary_{idx}'] = bd_summary_cleaned discs[i][f'bdinfo_{idx}'] = bdinfo except Exception: @@ -450,7 +488,10 @@ def parse_bdinfo(self, bdinfo_input, files, path): """ Parse VIDEO_TS and get mediainfos """ - async def get_dvdinfo(self, discs, base_dir=None): + + async def get_dvdinfo(self, discs, base_dir=None, debug=False): + mediainfo_binary = self.setup_mediainfo_for_dvd(base_dir, debug=debug) + for each in discs: path = each.get('path') os.chdir(path) @@ -464,14 +505,13 @@ async def get_dvdinfo(self, discs, base_dir=None): filesdict[trimmed[:2]] = [] filesdict[trimmed[:2]].append(trimmed) main_set_duration = 0 - mediainfo_binary = os.path.join(base_dir, "bin", "MI", "windows", "MediaInfo.exe") for vob_set in filesdict.values(): try: ifo_file = f"VTS_{vob_set[0][:2]}_0.IFO" try: - if platform.system() == "Windows": + if mediainfo_binary: process = await asyncio.create_subprocess_exec( mediainfo_binary, "--Output=JSON", ifo_file, stdout=asyncio.subprocess.PIPE, @@ -479,15 +519,18 @@ async def get_dvdinfo(self, discs, base_dir=None): ) stdout, stderr = await process.communicate() - if process and process.returncode == 0: + if process.returncode == 0 and stdout: vob_set_mi = stdout.decode() else: + console.print(f"[yellow]Specialized MediaInfo failed for {ifo_file}, falling back to standard[/yellow]") + if stderr: + console.print(f"[red]MediaInfo stderr: {stderr.decode()}[/red]") vob_set_mi = MediaInfo.parse(ifo_file, output='JSON') else: vob_set_mi = MediaInfo.parse(ifo_file, output='JSON') except Exception as e: - console.print(f"[yellow]Error with DVD MediaInfo binary: {str(e)}") + console.print(f"[yellow]Error with DVD MediaInfo binary for JSON: {str(e)}") # Fall back to standard MediaInfo vob_set_mi = MediaInfo.parse(ifo_file, output='JSON') @@ -520,92 +563,73 @@ async def get_dvdinfo(self, discs, base_dir=None): each['vob'] = vob = f"{path}/VTS_{set}_1.VOB" each['ifo'] = ifo = f"{path}/VTS_{set}_0.IFO" - try: - mediainfo_binary = os.path.join(base_dir, "bin", "MI", "windows", "MediaInfo.exe") + # Use basenames for mediainfo processing to avoid full paths in output + vob_basename = os.path.basename(vob) + ifo_basename = os.path.basename(ifo) + try: + # Process VOB file try: - if platform.system() == "Windows": + if mediainfo_binary: process = await asyncio.create_subprocess_exec( - mediainfo_binary, os.path.basename(vob), + mediainfo_binary, vob_basename, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE ) stdout, stderr = await process.communicate() - if process and process.returncode == 0: - each['vob_mi'] = stdout.decode().replace('\r\n', '\n') + if process.returncode == 0 and stdout: + vob_mi_output = stdout.decode().replace('\r\n', '\n') else: - each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False).replace('\r\n', '\n') + console.print("[yellow]Specialized MediaInfo failed for VOB, falling back[/yellow]") + if stderr: + console.print(f"[red]MediaInfo stderr: {stderr.decode()}[/red]") + vob_mi_output = MediaInfo.parse(vob_basename, output='STRING', full=False).replace('\r\n', '\n') else: - each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False).replace('\r\n', '\n') + vob_mi_output = MediaInfo.parse(vob_basename, output='STRING', full=False).replace('\r\n', '\n') except Exception as e: console.print(f"[yellow]Error with DVD MediaInfo binary for VOB: {str(e)}") - each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False).replace('\r\n', '\n') - - try: - if platform.system() == "Windows": - process = await asyncio.create_subprocess_exec( - mediainfo_binary, os.path.basename(ifo), - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE - ) - stdout, stderr = await process.communicate() + vob_mi_output = MediaInfo.parse(vob_basename, output='STRING', full=False).replace('\r\n', '\n') - if process and process.returncode == 0: - each['ifo_mi'] = stdout.decode().replace('\r\n', '\n') - else: - each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False).replace('\r\n', '\n') - else: - each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False).replace('\r\n', '\n') - except Exception as e: - console.print(f"[yellow]Error with DVD MediaInfo binary for IFO: {str(e)}") - each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False).replace('\r\n', '\n') + # Store VOB mediainfo (same output for both keys) + each['vob_mi'] = vob_mi_output + each['vob_mi_full'] = vob_mi_output + # Process IFO file try: - if platform.system() == "Windows": + if mediainfo_binary: process = await asyncio.create_subprocess_exec( - mediainfo_binary, vob, + mediainfo_binary, ifo_basename, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE ) stdout, stderr = await process.communicate() - if process and process.returncode == 0: - each['vob_mi_full'] = stdout.decode().replace('\r\n', '\n') + if process.returncode == 0 and stdout: + ifo_mi_output = stdout.decode().replace('\r\n', '\n') else: - each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False).replace('\r\n', '\n') + console.print("[yellow]Specialized MediaInfo failed for IFO, falling back[/yellow]") + if stderr: + console.print(f"[red]MediaInfo stderr: {stderr.decode()}[/red]") + ifo_mi_output = MediaInfo.parse(ifo_basename, output='STRING', full=False).replace('\r\n', '\n') else: - each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False).replace('\r\n', '\n') + ifo_mi_output = MediaInfo.parse(ifo_basename, output='STRING', full=False).replace('\r\n', '\n') except Exception as e: - console.print(f"[yellow]Error with DVD MediaInfo binary for full VOB: {str(e)}") - each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False).replace('\r\n', '\n') - - try: - if platform.system() == "Windows": - process = await asyncio.create_subprocess_exec( - mediainfo_binary, ifo, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE - ) - stdout, stderr = await process.communicate() + console.print(f"[yellow]Error with DVD MediaInfo binary for IFO: {str(e)}") + ifo_mi_output = MediaInfo.parse(ifo_basename, output='STRING', full=False).replace('\r\n', '\n') - if process and process.returncode == 0: - each['ifo_mi_full'] = stdout.decode().replace('\r\n', '\n') - else: - each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False).replace('\r\n', '\n') - else: - each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False).replace('\r\n', '\n') - except Exception as e: - console.print(f"[yellow]Error with DVD MediaInfo binary for full IFO: {str(e)}") - each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False).replace('\r\n', '\n') + each['ifo_mi'] = ifo_mi_output + each['ifo_mi_full'] = ifo_mi_output except Exception as e: console.print(f"[yellow]Error using DVD MediaInfo binary, falling back to standard: {e}") - # Fallback to standard MediaInfo - each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False).replace('\r\n', '\n') - each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False).replace('\r\n', '\n') - each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False).replace('\r\n', '\n') - each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False).replace('\r\n', '\n') + # Fallback to standard MediaInfo using basenames + vob_mi_output = MediaInfo.parse(vob_basename, output='STRING', full=False).replace('\r\n', '\n') + ifo_mi_output = MediaInfo.parse(ifo_basename, output='STRING', full=False).replace('\r\n', '\n') + each['vob_mi'] = vob_mi_output + each['ifo_mi'] = ifo_mi_output + each['vob_mi_full'] = vob_mi_output + each['ifo_mi_full'] = ifo_mi_output size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)) / float(1 << 30) each['disc_size'] = round(size, 2) @@ -659,7 +683,7 @@ async def get_hddvd_info(self, discs, meta): key=lambda p: p["totalSize"] ) ] - elif meta['unattended'] and not meta.get('unattended-confirm', False): + elif meta['unattended'] and not meta.get('unattended_confirm', False): console.print("[yellow]Unattended mode: Auto-selecting the largest playlist.") selected_playlists = [ max( diff --git a/src/dupe_checking.py b/src/dupe_checking.py index 97ef04c9e..7d16b1991 100644 --- a/src/dupe_checking.py +++ b/src/dupe_checking.py @@ -1,5 +1,11 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import os import re + +from cogs.redaction import redact_private_info +from data.config import config from src.console import console +from src.trackers.HUNO import HUNO async def filter_dupes(dupes, meta, tracker_name): @@ -9,12 +15,53 @@ async def filter_dupes(dupes, meta, tracker_name): """ if meta['debug']: console.log(f"[cyan]Pre-filtered dupes from {tracker_name}") - console.log(dupes) - - processed_dupes = [ - {'name': d, 'size': None} if isinstance(d, str) else {'name': d['name'], 'size': d['size']} - for d in dupes - ] + # Limit dupe output for readability + if len(dupes) > 0: + dupes_to_print = [] + for dupe in dupes: + if isinstance(dupe, dict) and 'files' in dupe and isinstance(dupe['files'], list): + # Limit files list to first 10 items + limited_dupe = redact_private_info(dupe).copy() + if len(limited_dupe['files']) > 10: + limited_dupe['files'] = limited_dupe['files'][:10] + [f"... and {len(dupe['files']) - 10} more files"] + dupes_to_print.append(limited_dupe) + else: + dupes_to_print.append(redact_private_info(dupe)) + console.log(dupes_to_print) + else: + console.log(dupes) + meta['trumpable'] = False + processed_dupes = [] + for d in dupes: + if isinstance(d, str): + # Case 1: Simple string (just name) + processed_dupes.append({'name': d, 'size': None, 'files': [], 'file_count': 0, 'trumpable': False, 'link': None, 'download': None}) + elif isinstance(d, dict): + # Create a base entry with default values + entry = { + 'name': d.get('name', ''), + 'size': d.get('size'), + 'files': [], + 'file_count': 0, + 'trumpable': d.get('trumpable', False), + 'link': d.get('link', None), + 'download': d.get('download', None) + } + + # Case 3: Dict with files and file_count + if 'files' in d: + if isinstance(d['files'], list): + entry['files'] = d['files'] + elif isinstance(d['files'], str) and d['files']: + entry['files'] = [d['files']] + entry['file_count'] = len(entry['files']) + if 'file_count' in d: + try: + entry['file_count'] = int(d['file_count']) + except (ValueError, TypeError): + entry['file_count'] = 0 + + processed_dupes.append(entry) new_dupes = [] @@ -40,14 +87,17 @@ async def filter_dupes(dupes, meta, tracker_name): is_hdtv = meta.get('type') == "HDTV" target_source = meta.get("source") is_sd = meta.get('sd') + if not meta['is_disc']: + filenames = [] + if meta.get('filelist'): + for file_path in meta.get('filelist', []): + # Extract just the filename without the path + filename = os.path.basename(file_path) + filenames.append(filename) + if meta['debug']: + console.log(f"dupe checking filenames: {filenames[:10]}{'...' if len(filenames) > 10 else ''}") attribute_checks = [ - { - "key": "repack", - "uuid_flag": has_repack_in_uuid, - "condition": lambda each: meta['tag'].lower() in each and has_repack_in_uuid and "repack" not in each.lower(), - "exclude_msg": lambda each: f"Excluding result because it lacks 'repack' and matches tag '{meta['tag']}': {each}" - }, { "key": "remux", "uuid_flag": "remux" in meta.get('name', '').lower(), @@ -71,8 +121,14 @@ async def process_exclusion(entry): Determine if an entry should be excluded. Returns True if the entry should be excluded, otherwise allowed as dupe. """ - each = entry['name'] - sized = entry['size'] + each = entry.get('name', '') + sized = entry.get('size') # This may come as a string, such as "1.5 GB" + files = entry.get('files', []) + # Handle case where files might be comma-separated strings in a list + if files and isinstance(files, list) and len(files) == 1 and ',' in str(files[0]): + # Split comma-separated string into individual filenames + files = [f.strip() for f in str(files[0]).split(',')] + file_count = entry.get('file_count', 0) normalized = await normalize_filename(each) file_hdr = await refine_hdr_terms(normalized) @@ -91,6 +147,142 @@ async def process_exclusion(entry): console.log(f" 'repack' in each.lower(): {'repack' in each.lower()}") console.log(f"[debug] meta['uuid']: {meta.get('uuid', '')}") console.log(f"[debug] normalized encoder: {normalized_encoder}") + console.log(f"[debug] link: {entry.get('link', None)}") + console.log(f"[debug] files: {files[:10]}{'...' if len(files) > 10 else ''}") + console.log(f"[debug] file_count: {file_count}") + + def remember_match(reason): + """Persist details about the dupe that triggered a match for later use.""" + matched_name_key = f"{tracker_name}_matched_name" + matched_link_key = f"{tracker_name}_matched_link" + matched_download_key = f"{tracker_name}_matched_download" + matched_reason_key = f"{tracker_name}_matched_reason" + matched_count_key = f"{tracker_name}_matched_file_count" + + meta[matched_name_key] = entry.get('name') + if entry.get('link'): + meta[matched_link_key] = entry.get('link') + if entry.get('download'): + meta[matched_download_key] = entry.get('download') + meta[matched_reason_key] = reason + if file_count: + meta[matched_count_key] = file_count + + if not meta.get('is_disc'): + for file in filenames: + if tracker_name in ["MTV", "AR", "RTF"]: + # MTV: check if any dupe file is a substring of our file (ignoring extension) + if any(f.lower() in file.lower() for f in files): + meta['filename_match'] = f"{entry.get('name')} = {entry.get('link', None)}" + remember_match('filename') + if file_count and file_count > 0 and file_count == len(meta.get('filelist', [])): + meta['file_count_match'] = file_count + remember_match('file_count') + return False + entry_size = entry.get('size') + source_size = meta.get('source_size') + if entry_size is not None and source_size is not None: + try: + if int(entry_size) == int(source_size): + meta['size_match'] = f"{entry.get('name')} = {entry.get('link', None)}" + remember_match('size') + return False + except ValueError: + if meta['debug']: + console.log(f"[debug] Size comparison failed due to ValueError: entry_size={entry_size}, source_size={source_size}") + else: + if meta['debug']: + console.log(f"[debug] Comparing file: {file} against dupe files list.") + console.log(f"[debug] Dupe files list: {files[:10]}{'...' if len(files) > 10 else files}") + if any(file.lower() == f.lower() for f in files): + meta['filename_match'] = f"{entry.get('name')} = {entry.get('link', None)}" + if meta['debug']: + console.log(f"[debug] Filename match found: {meta['filename_match']}") + remember_match('filename') + if file_count and file_count > 0 and file_count == len(meta.get('filelist', [])): + meta['file_count_match'] = file_count + if meta['debug']: + console.log(f"[debug] File count match found: {meta['file_count_match']}") + remember_match('file_count') + return False + if tracker_name in ["BHD"]: + # BHD: compare sizes + entry_size = entry.get('size') + source_size = meta.get('source_size') + if entry_size is not None and source_size is not None: + if meta['debug']: + console.log(f"[debug] Comparing sizes: Entry size {entry_size} vs Source size {source_size}") + try: + if int(entry_size) == int(source_size): + meta['size_match'] = f"{entry.get('name')} = {entry.get('link', None)}" + remember_match('size') + return False + except ValueError: + if meta['debug']: + console.log(f"[debug] Size comparison failed due to ValueError: entry_size={entry_size}, source_size={source_size}") + + else: + entry_size = entry.get('size') + source_size = meta.get('source_size') + if entry_size is not None and source_size is not None: + if meta['debug']: + console.log(f"[debug] Comparing sizes: Entry size {entry_size} vs Source size {source_size}") + try: + if int(entry_size) == int(source_size): + meta['size_match'] = f"{entry.get('name')} = {entry.get('link', None)}" + remember_match('size') + return False + except ValueError: + if meta['debug']: + console.log(f"[debug] Size comparison failed due to ValueError: entry_size={entry_size}, source_size={source_size}") + + if meta['is_disc'] and file_count and file_count < 2: + await log_exclusion("file count less than 2 for disc upload", each) + return True + + if has_repack_in_uuid and "repack" not in normalized and meta.get('tag', '').lower() in normalized: + await log_exclusion('repack release', each) + return True + + if tracker_name == "MTV": + target_name = meta.get('name').replace(' ', '.').replace('DD+', 'DDP') + dupe_name = str(entry.get('name')) + + def normalize_mtv_name(name): + # Handle audio format variations: DDP.5.1 <-> DDP5.1 + name = re.sub(r'\.DDP\.(\d)', r'.DDP\1', name) + name = re.sub(r'\.DD\.(\d)', r'.DD\1', name) + name = re.sub(r'\.AC3\.(\d)', r'.AC3\1', name) + name = re.sub(r'\.DTS\.(\d)', r'.DTS\1', name) + return name + normalized_target = normalize_mtv_name(target_name) + if normalized_target == dupe_name: + meta['filename_match'] = f"{entry.get('name')} = {entry.get('link', None)}" + return False + + if tracker_name == "BHD": + target_name = meta.get('name').replace('DD+', 'DDP') + if str(entry.get('name')) == target_name: + meta['filename_match'] = f"{entry.get('name')} = {entry.get('link', None)}" + return False + + if tracker_name == "HUNO": + huno = HUNO(config=config) + huno_name_result = await huno.get_name(meta) + if isinstance(huno_name_result, dict) and 'name' in huno_name_result: + huno_name = huno_name_result['name'] + else: + huno_name = str(huno_name_result) + if str(entry.get('name')) == huno_name: + meta['filename_match'] = f"{entry.get('name')} = {entry.get('link', None)}" + return False + + if tracker_name == "AITHER" and entry.get('trumpable', False): + meta['trumpable'] = entry.get('link', None) + + if tracker_name in ["BHD", "MTV", "RTF", "AR"]: + if ('2160p' in target_resolution and '2160p' in each) and ('framestor' in each.lower() or 'framestor' in meta['uuid'].lower()): + return False if has_is_disc and each.lower().endswith(".m2ts"): return False @@ -100,7 +292,7 @@ async def process_exclusion(entry): return True if meta.get('is_disc') == "BDMV" and tracker_name in ["AITHER", "LST", "HDB", "BHD"]: - if len(each) > 1 and tag == "": + if len(each) >= 1 and tag == "": return False if tag and tag.strip() and tag.strip() in normalized: return False @@ -150,9 +342,21 @@ async def process_exclusion(entry): if tag and tag in normalized: await log_exclusion("missing 'repack'", each) return True - elif check["uuid_flag"] != check["condition"](each): - await log_exclusion(f"{check['key']} mismatch", each) - return True + elif check["key"] == "remux": + # Bidirectional check: if your upload is a REMUX, dupe must be REMUX + # If your upload is NOT a REMUX (i.e., an encode), dupe must NOT be a REMUX + uuid_has_remux = check["uuid_flag"] + dupe_has_remux = check["condition"](normalized) + + if meta['debug']: + console.log(f"[debug] Remux check: uuid_has_remux={uuid_has_remux}, dupe_has_remux={dupe_has_remux}") + + if uuid_has_remux and not dupe_has_remux: + await log_exclusion("missing 'remux'", each) + return True + elif not uuid_has_remux and dupe_has_remux: + await log_exclusion("dupe is remux but upload is not", each) + return True if meta.get('category') == "TV": season_episode_match = await is_season_episode_match(normalized, target_season, target_episode) @@ -195,7 +399,18 @@ async def process_exclusion(entry): new_dupes.append(each) if new_dupes and not meta.get('unattended', False) and meta['debug']: - console.print(f"[cyan]Final dupes on {tracker_name}: {new_dupes}") + # Limit filtered dupe output for readability + filtered_dupes_to_print = [] + for dupe in new_dupes: + if isinstance(dupe, dict) and 'files' in dupe and isinstance(dupe['files'], list): + # Limit files list to first 10 items + limited_dupe = redact_private_info(dupe).copy() + if len(limited_dupe['files']) > 10: + limited_dupe['files'] = limited_dupe['files'][:10] + [f"... and {len(dupe['files']) - 10} more files"] + filtered_dupes_to_print.append(limited_dupe) + else: + filtered_dupes_to_print.append(redact_private_info(dupe)) + console.log(f"[yellow]Filtered dupes on {tracker_name}: {filtered_dupes_to_print}") return new_dupes @@ -270,7 +485,7 @@ def simplify_hdr(hdr_set, tracker=None): simplified = set() if any(h in hdr_set for h in {"HDR", "HDR10", "HDR10+"}): simplified.add("HDR") - if "DV" in hdr_set or "DOVI" in hdr_set: + if ".DV." in hdr_set or " DV " in hdr_set or "DOVI" in hdr_set: simplified.add("DV") if 'web' not in meta['type'].lower(): simplified.add("HDR") diff --git a/src/edition.py b/src/edition.py index a975656f7..d3fe1be00 100644 --- a/src/edition.py +++ b/src/edition.py @@ -1,7 +1,9 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 from guessit import guessit import os import re from src.console import console +from src.region import get_distributor async def get_edition(video, bdinfo, filelist, manual_edition, meta): @@ -50,7 +52,7 @@ async def get_edition(video, bdinfo, filelist, manual_edition, meta): console.print("[yellow]Edition without attributes are theatrical editions and skipped[/yellow]") if len(matching_editions) > 1: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): console.print(f"[yellow]Media file duration {formatted_duration} matches multiple editions:[/yellow]") for i, ed in enumerate(matching_editions): diff_formatted = format_duration(ed['difference']) @@ -101,7 +103,7 @@ async def get_edition(video, bdinfo, filelist, manual_edition, meta): all_playlists = [] for disc in meta['discs']: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): if disc.get('playlists'): all_playlists.extend(disc['playlists']) else: @@ -152,7 +154,7 @@ async def get_edition(video, bdinfo, filelist, manual_edition, meta): # If multiple editions match this playlist, ask the user if len(matching_editions) > 1: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): console.print(f"[yellow]Playlist edition [green]{playlist_edition} [yellow]using file [green]{playlist_file} [yellow]with duration [green]{formatted_duration} [yellow]matches multiple editions:[/yellow]") for i, ed in enumerate(matching_editions): console.print(f"[yellow]{i+1}. [green]{ed['name']} ({ed['display_name']}, diff: {ed['difference']:.2f} seconds)") @@ -200,7 +202,8 @@ async def get_edition(video, bdinfo, filelist, manual_edition, meta): # If just one edition matches, add it directly elif len(matching_editions) == 1: edition_info = matching_editions[0] - console.print(f"[green]Playlist {playlist_edition} matches edition: {edition_info['display_name']} {edition_name}[/green]") + if meta['debug']: + console.print(f"[green]Playlist {playlist_edition} matches edition: {edition_info['display_name']} {edition_name}[/green]") if edition_info['has_attributes']: if edition_info['name'] not in matched_editions_with_attributes: @@ -323,12 +326,11 @@ async def get_edition(video, bdinfo, filelist, manual_edition, meta): # Handle distributor info if edition: - from src.region import get_distributor distributors = await get_distributor(edition) bad = ['internal', 'limited', 'retail', 'version', 'remastered'] - if distributors: + if distributors and meta['is_disc']: bad.append(distributors.lower()) meta['distributor'] = distributors @@ -338,9 +340,8 @@ async def get_edition(video, bdinfo, filelist, manual_edition, meta): while ' ' in edition: edition = edition.replace(' ', ' ') - if edition != "": - if meta['debug']: - console.print(f"Final Edition: {edition}") + if edition != "" and meta['debug']: + console.print(f"Final Edition: {edition}") return edition, repack, hybrid diff --git a/src/exceptions.py b/src/exceptions.py index e5de6f944..d724a7e71 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 class LoginException(Exception): def __init__(self, *args, **kwargs): default_message = 'An error occured while logging in' diff --git a/src/exportmi.py b/src/exportmi.py index 50dcdb265..a55b5c133 100644 --- a/src/exportmi.py +++ b/src/exportmi.py @@ -1,8 +1,63 @@ -from src.console import console -from pymediainfo import MediaInfo +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles import json import os import platform +import subprocess +from pymediainfo import MediaInfo +from src.console import console + + +def setup_mediainfo_library(base_dir, debug=False): + system = platform.system().lower() + + if system == 'windows': + cli_path = os.path.join(base_dir, "bin", "MI", "windows", "MediaInfo.exe") + if os.path.exists(cli_path): + if debug: + console.print(f"[blue]Windows MediaInfo CLI: {cli_path} (found)[/blue]") + return { + 'cli': cli_path, + 'lib': None, # Windows uses CLI only + 'lib_dir': None + } + else: + if debug: + console.print(f"[yellow]Windows MediaInfo CLI: {cli_path} (not found)[/yellow]") + return None + + elif system == 'linux': + if base_dir.endswith("bin/MI") or base_dir.endswith("bin\\MI"): + lib_dir = os.path.join(base_dir, "linux") + else: + lib_dir = os.path.join(base_dir, "bin", "MI", "linux") + + mediainfo_lib = os.path.join(lib_dir, "libmediainfo.so.0") + mediainfo_cli = os.path.join(lib_dir, "mediainfo") + cli_available = os.path.exists(mediainfo_cli) + lib_available = os.path.exists(mediainfo_lib) + + if debug: + console.print(f"[blue]MediaInfo CLI binary: {mediainfo_cli} ({'found' if cli_available else 'not found'})[/blue]") + console.print(f"[blue]MediaInfo library: {mediainfo_lib} ({'found' if lib_available else 'not found'})[/blue]") + + if lib_available: + # Set library directory for LD_LIBRARY_PATH + current_ld_path = os.environ.get('LD_LIBRARY_PATH', '') + if lib_dir not in current_ld_path: + if current_ld_path: + os.environ['LD_LIBRARY_PATH'] = f"{lib_dir}:{current_ld_path}" + else: + os.environ['LD_LIBRARY_PATH'] = lib_dir + if debug: + console.print(f"[blue]Updated LD_LIBRARY_PATH to include: {lib_dir}[/blue]") + + return { + 'cli': mediainfo_cli if cli_available else None, + 'lib': mediainfo_lib if lib_available else None, + 'lib_dir': lib_dir + } + return None async def mi_resolution(res, guess, width, scan, height, actual_height): @@ -80,6 +135,7 @@ async def mi_resolution(res, guess, width, scan, height, actual_height): async def exportInfo(video, isdir, folder_id, base_dir, export_text, is_dvd=False, debug=False): + def filter_mediainfo(data): filtered = { "creatingLibrary": data.get("creatingLibrary"), @@ -185,6 +241,8 @@ def filter_mediainfo(data): "MaxFALL": track.get("MaxFALL", {}), "MaxFALL_Source": track.get("MaxFALL_Source", {}), "Encoded_Library_Settings": track.get("Encoded_Library_Settings", {}), + "Encoded_Library": track.get("Encoded_Library", {}), + "Encoded_Library_Name": track.get("Encoded_Library_Name", {}), }) elif track["@type"] == "Audio": filtered["media"]["track"].append({ @@ -253,116 +311,207 @@ def filter_mediainfo(data): return filtered mediainfo_cmd = None + mediainfo_config = None + if is_dvd: if debug: - console.print("[bold yellow]DVD detected, using specialized MediaInfo binary...") - mediainfo_binary = os.path.join(base_dir, "bin", "MI", "windows", "MediaInfo.exe") + console.print("[bold yellow]DVD detected, using specialized MediaInfo...") - if platform.system() == "windows" and os.path.exists(mediainfo_binary): - mediainfo_cmd = mediainfo_binary + current_platform = platform.system().lower() - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: - if debug: - console.print("[bold yellow]Exporting MediaInfo...") - if not isdir: - os.chdir(os.path.dirname(video)) + if current_platform in ["linux", "windows"]: + mediainfo_config = setup_mediainfo_library(base_dir, debug=debug) + if mediainfo_config: + if mediainfo_config['cli']: + mediainfo_cmd = mediainfo_config['cli'] - if mediainfo_cmd: - import subprocess - try: - # Handle both string and list command formats - if isinstance(mediainfo_cmd, list): - result = subprocess.run(mediainfo_cmd + [video], capture_output=True, text=True) + # Configure library if available (Linux only) + if mediainfo_config['lib']: + try: + if hasattr(MediaInfo, '_library_file'): + MediaInfo._library_file = mediainfo_config['lib'] + + test_parse = MediaInfo.can_parse() + if debug: + console.print(f"[green]Configured specialized MediaInfo library (can_parse: {test_parse})[/green]") + + if not test_parse: + if debug: + console.print("[yellow]Library test failed, may fall back to system MediaInfo[/yellow]") + + except Exception as e: + if debug: + console.print(f"[yellow]Could not configure specialized library: {e}[/yellow]") else: - result = subprocess.run([mediainfo_cmd, video], capture_output=True, text=True) - media_info = result.stdout - except Exception as e: - console.print(f"[bold red]Error using specialized MediaInfo binary: {e}") - console.print("[bold yellow]Falling back to standard MediaInfo...") - media_info = MediaInfo.parse(video, output="STRING", full=False) + if debug: + console.print("[yellow]MediaInfo library not available[/yellow]") + else: + if debug: + console.print("[yellow]No specialized MediaInfo components found, using system MediaInfo[/yellow]") else: + if debug: + console.print(f"[yellow]DVD processing on {current_platform} not supported with specialized MediaInfo[/yellow]") + + if debug: + console.print("[bold yellow]Exporting MediaInfo...") + if not isdir: + os.chdir(os.path.dirname(video)) + + if mediainfo_cmd and is_dvd: + try: + cmd = [mediainfo_cmd, video] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=30) + + if result.returncode == 0 and result.stdout: + media_info = result.stdout + else: + raise subprocess.CalledProcessError(result.returncode, cmd, result.stdout, result.stderr) + + except subprocess.TimeoutExpired: + console.print("[bold red]Specialized MediaInfo timed out (30s) - falling back to standard MediaInfo[/bold red]") + media_info = MediaInfo.parse(video, output="STRING", full=False) + except (subprocess.CalledProcessError, Exception) as e: + console.print(f"[bold red]Error getting text from specialized MediaInfo: {e}") + if debug and 'result' in locals(): + console.print(f"[red]Subprocess stderr: {result.stderr}[/red]") + console.print(f"[red]Subprocess returncode: {result.returncode}[/red]") + console.print("[bold yellow]Falling back to standard MediaInfo for text...") media_info = MediaInfo.parse(video, output="STRING", full=False) + else: + media_info = MediaInfo.parse(video, output="STRING", full=False) - if isinstance(media_info, str): - filtered_media_info = "\n".join( - line for line in media_info.splitlines() - if not line.strip().startswith("ReportBy") and not line.strip().startswith("Report created by ") - ) - else: - filtered_media_info = "\n".join( - line for line in media_info.splitlines() - if not line.strip().startswith("ReportBy") and not line.strip().startswith("Report created by ") - ) - - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: - export.write(filtered_media_info.replace(video, os.path.basename(video))) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: - export_cleanpath.write(filtered_media_info.replace(video, os.path.basename(video))) - if debug: - console.print("[bold green]MediaInfo Exported.") + if isinstance(media_info, str): + filtered_media_info = "\n".join( + line for line in media_info.splitlines() + if not line.strip().startswith("ReportBy") and not line.strip().startswith("Report created by ") + ) + else: + filtered_media_info = "\n".join( + line for line in media_info.splitlines() + if not line.strip().startswith("ReportBy") and not line.strip().startswith("Report created by ") + ) - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json"): - if mediainfo_cmd: - import subprocess - try: - # Handle both string and list command formats - if isinstance(mediainfo_cmd, list): - result = subprocess.run(mediainfo_cmd + ["--Output=JSON", video], capture_output=True, text=True) - else: - result = subprocess.run([mediainfo_cmd, "--Output=JSON", video], capture_output=True, text=True) + async with aiofiles.open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: + await export.write(filtered_media_info.replace(video, os.path.basename(video))) + async with aiofiles.open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: + await export_cleanpath.write(filtered_media_info.replace(video, os.path.basename(video))) + if debug: + console.print("[bold green]MediaInfo Exported.") + + if mediainfo_cmd and is_dvd: + try: + cmd = [mediainfo_cmd, "--Output=JSON", video] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=30) + + if result.returncode == 0 and result.stdout: media_info_json = result.stdout media_info_dict = json.loads(media_info_json) - except Exception as e: - console.print(f"[bold red]Error getting JSON from specialized MediaInfo binary: {e}") - console.print("[bold yellow]Falling back to standard MediaInfo for JSON...") - media_info_json = MediaInfo.parse(video, output="JSON") - media_info_dict = json.loads(media_info_json) - else: + else: + raise subprocess.CalledProcessError(result.returncode, cmd, result.stdout, result.stderr) + + except subprocess.TimeoutExpired: + console.print("[bold red]Specialized MediaInfo timed out (30s) - falling back to standard MediaInfo[/bold red]") media_info_json = MediaInfo.parse(video, output="JSON") media_info_dict = json.loads(media_info_json) + except (subprocess.CalledProcessError, json.JSONDecodeError, Exception) as e: + console.print(f"[bold red]Error getting JSON from specialized MediaInfo: {e}") + if debug and 'result' in locals(): + console.print(f"[red]Subprocess stderr: {result.stderr}[/red]") + console.print(f"[red]Subprocess returncode: {result.returncode}[/red]") + if result.stdout: + console.print(f"[red]Subprocess stdout preview: {result.stdout[:200]}...[/red]") + console.print("[bold yellow]Falling back to standard MediaInfo for JSON...[/bold yellow]") + media_info_json = MediaInfo.parse(video, output="JSON") + media_info_dict = json.loads(media_info_json) + else: + # Use standard MediaInfo library for non-DVD or when specialized CLI not available + media_info_json = MediaInfo.parse(video, output="JSON") + media_info_dict = json.loads(media_info_json) - filtered_info = filter_mediainfo(media_info_dict) - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: - json.dump(filtered_info, export, indent=4) + filtered_info = filter_mediainfo(media_info_dict) + + async with aiofiles.open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: + await export.write(json.dumps(filtered_info, indent=4)) + if debug: + console.print(f"[green]JSON file written to: {base_dir}/tmp/{folder_id}/MediaInfo.json[/green]") - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) + async with aiofiles.open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.loads(await f.read()) + + # Cleanup: Reset library configuration if we modified it + if is_dvd and platform.system().lower() in ['linux', 'windows']: + # Reset MediaInfo library file to default (Linux only) + if hasattr(MediaInfo, '_library_file'): + MediaInfo._library_file = None + if debug: + console.print("[blue]Reset MediaInfo library configuration[/blue]") return mi -def validate_mediainfo(base_dir, folder_id, path, filelist, debug): - if not (path.lower().endswith('.mkv') or any(str(f).lower().endswith('.mkv') for f in filelist)): +def validate_mediainfo(meta, debug, settings=False): + if not any(str(f).lower().endswith('.mkv') for f in meta.get('filelist', [])): if debug: - console.print(f"[yellow]Skipping {path} (not an .mkv file)[/yellow]") + console.print(f"[yellow]Skipping {meta.get('path')} (not an .mkv file)[/yellow]") return True - mediainfo_path = f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt" + unique_id = None - in_general = False + valid_settings = False if debug: - console.print(f"[cyan]Validating MediaInfo at: {mediainfo_path}") - - try: - with open(mediainfo_path, 'r', encoding='utf-8') as f: - for line in f: - if line.strip() == "General": - in_general = True - continue - if in_general: - if line.strip() == "": - break - if line.strip().startswith("Unique ID"): - unique_id = line.split(":", 1)[1].strip() - break - except FileNotFoundError: - console.print(f"[red]MediaInfo file not found: {mediainfo_path}[/red]") - return False + console.print("[cyan]Validating MediaInfo") + + mediainfo_data = meta.get('mediainfo', {}) + + if "media" in mediainfo_data and "track" in mediainfo_data["media"]: + tracks = mediainfo_data["media"]["track"] + has_audio = any(track.get("@type", "") == "Audio" for track in tracks) + + if not has_audio: + raise Exception("Upload Assistant does not support no audio media.") + + for track in tracks: + track_type = track.get("@type", "") + + if settings and track_type == "Video": + encoding_settings = track.get("Encoded_Library_Settings") + if encoding_settings and encoding_settings != {} and str(encoding_settings).strip(): + valid_settings = True + if debug: + console.print(f"[green]Found encoding settings: {encoding_settings}[/green]") + break + + elif not settings and track_type == "General": + unique_id_value = track.get("UniqueID") + if unique_id_value and unique_id_value != {} and str(unique_id_value).strip(): + unique_id = str(unique_id_value) + if debug: + console.print(f"[green]Found Unique ID: {unique_id}[/green]") + break if debug: - if unique_id: - console.print(f"[green]Found Unique ID: {unique_id}[/green]") - else: - console.print("[yellow]Unique ID not found in General section.[/yellow]") + if settings and not valid_settings: + console.print("[yellow]Mediainfo failed validation (no encoding settings)[/yellow]") + elif not settings and not unique_id: + console.print("[yellow]Mediainfo failed validation (no unique ID)[/yellow]") + + return bool(valid_settings) if settings else bool(unique_id) - return bool(unique_id) + +async def get_conformance_error(meta): + if not meta.get('is_disc') == "BDMV" and meta.get('mediainfo', {}).get('media', {}).get('track'): + general_track = next((track for track in meta['mediainfo']['media']['track'] + if track.get('@type') == 'General'), None) + if general_track and general_track.get('extra', {}).get('ConformanceErrors', {}): + try: + return True + except ValueError: + if meta['debug']: + console.print(f"[red]Unexpected value: {general_track['extra']['ConformanceErrors']}[/red]") + return True + else: + if meta['debug']: + console.print("[green]No Conformance errors found in MediaInfo General track[/green]") + return False + else: + return False diff --git a/src/get_desc.py b/src/get_desc.py index eb3f05823..605ab7162 100644 --- a/src/get_desc.py +++ b/src/get_desc.py @@ -1,105 +1,138 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles +import asyncio +import glob +import json import os -import urllib.parse +import re import requests -import glob +import urllib.parse +from jinja2 import Template + +from pymediainfo import MediaInfo +from src.bbcode import BBCODE from src.console import console +from src.languages import process_desc_language +from src.takescreens import disc_screenshots, dvd_screenshots, screenshots +from src.trackers.COMMON import COMMON +from src.uploadscreens import upload_screens async def gen_desc(meta): def clean_text(text): - return text.replace('\r\n', '').replace('\n', '').strip() + return text.replace("\r\n", "\n").strip() - desclink = meta.get('desclink') - descfile = meta.get('descfile') + description_link = meta.get("description_link") + description_file = meta.get("description_file") scene_nfo = False bhd_nfo = False - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", "w", newline="", encoding="utf8" + ) as description: description.seek(0) content_written = False - if meta.get('desc_template'): - from jinja2 import Template + if meta.get("description_template"): try: - with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: + with open(f"{meta['base_dir']}/data/templates/{meta['description_template']}.txt", "r") as f: template = Template(f.read()) template_desc = template.render(meta) - if clean_text(template_desc): + cleaned_content = clean_text(template_desc) + if cleaned_content: + if not content_written: + description.write if len(template_desc) > 0: - description.write(template_desc + "\n") + description.write(cleaned_content + "\n") + meta["description_template_content"] = cleaned_content content_written = True except FileNotFoundError: - console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") + console.print(f"[ERROR] Template '{meta['description_template']}' not found.") - base_dir = meta['base_dir'] - uuid = meta['uuid'] - path = meta['path'] + base_dir = meta["base_dir"] + uuid = meta["uuid"] + path = meta["path"] specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") source_dir_path = os.path.join(path, "*.nfo") - if meta.get('nfo') and not content_written: - if meta['debug']: + if meta.get("nfo"): + if meta["debug"]: console.print(f"specified_dir_path: {specified_dir_path}") console.print(f"sourcedir_path: {source_dir_path}") - if 'auto_nfo' in meta and meta['auto_nfo'] is True: + if "auto_nfo" in meta and meta["auto_nfo"] is True: nfo_files = glob.glob(specified_dir_path) scene_nfo = True - elif 'bhd_nfo' in meta and meta['bhd_nfo'] is True: + elif "bhd_nfo" in meta and meta["bhd_nfo"] is True: nfo_files = glob.glob(specified_dir_path) bhd_nfo = True else: nfo_files = glob.glob(source_dir_path) if not nfo_files: console.print("NFO was set but no nfo file was found") - description.write("\n") + if not content_written: + description.write("\n") return meta if nfo_files: nfo = nfo_files[0] try: - with open(nfo, 'r', encoding="utf-8") as nfo_file: + with open(nfo, "r", encoding="utf-8") as nfo_file: nfo_content = nfo_file.read() - if meta['debug']: + if meta["debug"]: console.print("NFO content read with utf-8 encoding.") except UnicodeDecodeError: - if meta['debug']: + if meta["debug"]: console.print("utf-8 decoding failed, trying latin1.") - with open(nfo, 'r', encoding="latin1") as nfo_file: + with open(nfo, "r", encoding="latin1") as nfo_file: nfo_content = nfo_file.read() - if scene_nfo is True: - description.write(f"[center][spoiler=Scene NFO:][code]{nfo_content}[/code][/spoiler][/center]\n") - elif bhd_nfo is True: - description.write(f"[center][spoiler=FraMeSToR NFO:][code]{nfo_content}[/code][/spoiler][/center]\n") - else: - description.write(f"[code]{nfo_content}[/code]\n") - meta['description'] = "CUSTOM" - content_written = True + if not content_written: + if scene_nfo is True: + description.write( + f"[center][spoiler=Scene NFO:][code]{nfo_content}[/code][/spoiler][/center]\n" + ) + elif bhd_nfo is True: + description.write( + f"[center][spoiler=FraMeSToR NFO:][code]{nfo_content}[/code][/spoiler][/center]\n" + ) + else: + description.write(f"[code]{nfo_content}[/code]\n") + + content_written = True - if desclink and not content_written: + nfo_content_utf8 = nfo_content.encode("utf-8", "ignore").decode("utf-8") + meta["description_nfo_content"] = nfo_content_utf8 + + if description_link: try: - parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) + parsed = urllib.parse.urlparse(description_link.replace("/raw/", "/")) split = os.path.split(parsed.path) - raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") + raw = parsed._replace( + path=f"{split[0]}/raw/{split[1]}" if split[0] != "/" else f"/raw{parsed.path}" + ) raw_url = urllib.parse.urlunparse(raw) - desclink_content = requests.get(raw_url).text - if clean_text(desclink_content): - description.write(desclink_content + "\n") - meta['description'] = "CUSTOM" + description_link_content = requests.get(raw_url, timeout=20).text + cleaned_content = clean_text(description_link_content) + if cleaned_content: + if not content_written: + description.write(cleaned_content + "\n") + meta["description_link_content"] = cleaned_content content_written = True except Exception as e: console.print(f"[ERROR] Failed to fetch description from link: {e}") - if descfile and os.path.isfile(descfile) and not content_written: - with open(descfile, 'r') as f: + if description_file and os.path.isfile(description_file): + with open(description_file, "r", encoding="utf-8") as f: file_content = f.read() - if clean_text(file_content): - description.write(file_content) - meta['description'] = "CUSTOM" + cleaned_content = clean_text(file_content) + if cleaned_content: + if not content_written: + description.write(file_content) + meta["description_file_content"] = cleaned_content content_written = True if not content_written: - if meta.get('description'): - description_text = meta.get('description', '').strip() + if meta.get("description"): + description_text = meta.get("description", "").strip() else: description_text = "" if description_text: @@ -109,13 +142,1159 @@ def clean_text(text): description.write("\n") # Fallback if no description is provided - if not meta.get('skip_gen_desc', False) and not content_written: - description_text = meta['description'] if meta.get('description', '') else "" - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + if not meta.get("skip_gen_desc", False) and not content_written: + description_text = meta["description"] if meta.get("description", "") else "" + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", "w", newline="", encoding="utf8" + ) as description: if len(description_text) > 0: description.write(description_text + "\n") - if meta.get('description') in ('None', '', ' '): - meta['description'] = None + if meta.get("description") in ("None", "", " "): + meta["description"] = None return meta + + +class DescriptionBuilder: + def __init__(self, config): + self.config = config + self.common = COMMON(config) + self.parser = self.common.parser + + async def get_custom_header(self, tracker): + """Returns a custom header if configured.""" + try: + custom_description_header = self.config["TRACKERS"][tracker].get( + "custom_description_header", self.config["DEFAULT"].get("custom_description_header", False) + ) + if custom_description_header: + return custom_description_header + except Exception as e: + console.print(f"[yellow]Warning: Error setting custom description header: {str(e)}[/yellow]") + + return "" + + async def get_tonemapped_header(self, meta, tracker): + try: + tonemapped_description_header = self.config["TRACKERS"][tracker].get( + "tonemapped_header", self.config["DEFAULT"].get("tonemapped_header", "") + ) + if tonemapped_description_header and meta.get("tonemapped", False): + return tonemapped_description_header + except Exception as e: + console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") + return "" + + async def get_logo_section(self, meta, tracker): + """Returns the logo URL and size if applicable.""" + logo, logo_size = "", "" + try: + if not self.config["TRACKERS"][tracker].get( + "add_logo", self.config["DEFAULT"].get("add_logo", False) + ): + return logo, logo_size + + logo = meta.get("logo", "") + logo_size = self.config["DEFAULT"].get("logo_size", "300") + + if logo: + return logo, logo_size + except Exception as e: + console.print(f"[yellow]Warning: Error getting logo section: {str(e)}[/yellow]") + + return logo, logo_size + + async def get_tv_info(self, meta, tracker, resize=False): + title, image, overview = "", "", "" + try: + if ( + not self.config["TRACKERS"][tracker].get( + "episode_overview", self.config["DEFAULT"].get("episode_overview", False) + ) + or meta["category"] != "TV" + ): + return title, image, overview + + tvmaze_episode_data = meta.get("tvmaze_episode_data", {}) + + season_name = tvmaze_episode_data.get("season_name", "") or meta.get("tvdb_season_name", "") + season_number = meta.get("season", "") + episode_number = meta.get("episode", "") + overview = tvmaze_episode_data.get("overview", "") or meta.get("overview_meta", "") + episode_title = meta.get("auto_episode_title") or tvmaze_episode_data.get("episode_name", "") + + image = "" + if meta.get("tv_pack", False): + image = tvmaze_episode_data.get("series_image", "") + if resize: + image = tvmaze_episode_data.get("series_image_medium", "") + else: + image = tvmaze_episode_data.get("image", "") + if resize: + image = tvmaze_episode_data.get("image_medium", "") + + title = "" + if season_name: + title = f"{season_name}" + if season_number: + title += f" - {season_number}{episode_number}" + + if episode_title: + if title: + title += ": " + title += f"{episode_title}" + + except Exception as e: + console.print(f"[yellow]Warning: Error getting TV info: {str(e)}[/yellow]") + + return title, image, overview + + async def get_mediainfo_section(self, meta, tracker): + """Returns the mediainfo/bdinfo section, using a cache file if available.""" + if meta.get("is_disc") == "BDMV": + return "" + + if self.config["TRACKERS"][tracker].get( + "full_mediainfo", self.config["DEFAULT"].get("full_mediainfo", False) + ): + mi_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" + if await self.common.path_exists(mi_path): + async with aiofiles.open(mi_path, "r", encoding="utf-8") as mi: + return await mi.read() + + cache_file_dir = os.path.join(meta["base_dir"], "tmp", meta["uuid"]) + cache_file_path = os.path.join(cache_file_dir, "MEDIAINFO_SHORT.txt") + + file_exists = os.path.exists(cache_file_path) + file_size = os.path.getsize(cache_file_path) if file_exists else 0 + + if file_exists and file_size > 0: + try: + async with aiofiles.open(cache_file_path, mode="r", encoding="utf-8") as f: + media_info_content = await f.read() + return media_info_content + except Exception: + pass + + video_file = meta["filelist"][0] + mi_template = os.path.join(meta["base_dir"], "data", "templates", "MEDIAINFO.txt") + mi_file_path = os.path.join(cache_file_dir, "MEDIAINFO_CLEANPATH.txt") + + template_exists = await self.common.path_exists(mi_template) + + if template_exists: + try: + media_info_result = MediaInfo.parse( + video_file, + output="STRING", + full=False, + mediainfo_options={"inform": f"file://{mi_template}"}, + ) + media_info_content = str(media_info_result) + + if media_info_content: + media_info_content = media_info_content.replace("\r\n", "\n") + try: + await self.common.makedirs(cache_file_dir) + async with aiofiles.open(cache_file_path, mode="w", encoding="utf-8") as f: + await f.write(media_info_content) + except Exception: + pass + + return media_info_content + + except Exception: + cleanpath_exists = await self.common.path_exists(mi_file_path) + if cleanpath_exists: + async with aiofiles.open(mi_file_path, "r", encoding="utf-8") as f: + return await f.read() + + else: + cleanpath_exists = await self.common.path_exists(mi_file_path) + if cleanpath_exists: + async with aiofiles.open(mi_file_path, "r", encoding="utf-8") as f: + tech_info = await f.read() + return tech_info + + return "" + + async def get_bdinfo_section(self, meta): + """Returns the bdinfo section if applicable.""" + try: + if meta.get("is_disc") == "BDMV": + bdinfo_sections = [] + if meta.get("discs"): + for disc in meta["discs"]: + file_info = disc.get("summary", "") + if file_info: + bdinfo_sections.append(file_info) + return "\n\n".join(bdinfo_sections) + except Exception as e: + console.print(f"[yellow]Warning: Error getting bdinfo section: {str(e)}[/yellow]") + + return "" + + async def screenshot_header(self, tracker): + """Returns the screenshot header if applicable.""" + try: + screenheader = self.config["TRACKERS"][tracker].get( + "custom_screenshot_header", self.config["DEFAULT"].get("screenshot_header", None) + ) + if screenheader: + return screenheader + except Exception as e: + console.print(f"[yellow]Warning: Error getting screenshot header: {str(e)}[/yellow]") + + return "" + + async def menu_screenshot_header(self, meta, tracker): + """Returns the screenshot header for menus if applicable.""" + try: + if meta.get("is_disc", "") and meta.get('menu_images', []): + disc_menu_header = self.config["TRACKERS"][tracker].get( + "disc_menu_header", self.config["DEFAULT"].get("disc_menu_header", None) + ) + if disc_menu_header: + return disc_menu_header + except Exception as e: + console.print(f"[yellow]Warning: Error getting menus screenshot header: {str(e)}[/yellow]") + + return "" + + async def get_user_description(self, meta): + """Returns the user-provided description (file or link)""" + try: + description_file_content = meta.get("description_file_content", "").strip() + description_link_content = meta.get("description_link_content", "").strip() + + if description_file_content or description_link_content: + if description_file_content: + return description_file_content + elif description_link_content: + return description_link_content + except Exception as e: + console.print(f"[yellow]Warning: Error getting user description: {str(e)}[/yellow]") + + return "" + + async def get_custom_signature(self, tracker): + custom_signature = "" + try: + custom_signature = self.config["TRACKERS"][tracker].get( + "custom_signature", self.config["DEFAULT"].get("custom_signature", None) + ) + except Exception as e: + console.print(f"[yellow]Warning: Error setting custom signature: {str(e)}[/yellow]") + return custom_signature + + async def get_bluray_section(self, meta, tracker): + release_url = "" + cover_list = [] + cover_images = "" + + try: + cover_size = int(self.config["DEFAULT"].get("bluray_image_size", "250")) + bluray_link = self.config["DEFAULT"].get("add_bluray_link", False) + + if meta.get("is_disc") in ["BDMV", "DVD"] and bluray_link and meta.get("release_url", ""): + release_url = meta["release_url"] + + covers = False + if await self.common.path_exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/covers.json"): + covers = True + + if ( + meta.get("is_disc") in ["BDMV", "DVD"] + and self.config["DEFAULT"].get("use_bluray_images", False) + and covers + ): + async with aiofiles.open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/covers.json", "r", encoding="utf-8" + ) as f: + cover_data = json.loads(await f.read()) + + if isinstance(cover_data, list): + for img_data in cover_data: + if "raw_url" in img_data and "web_url" in img_data: + web_url = img_data["web_url"] + raw_url = img_data["raw_url"] + + if tracker == "TL": + cover_list.append( + f""" """ + ) + elif tracker == "HDT": + cover_list.append( + f" " + ) + else: + cover_list.append( + f"[url={web_url}][img={cover_size}]{raw_url}[/img][/url]" + ) + + if cover_list: + cover_images = "".join(cover_list) + + except Exception as e: + console.print(f"[yellow]Warning: Error getting bluray section: {str(e)}[/yellow]") + + return release_url, cover_images + + async def unit3d_edit_desc( + self, + meta, + tracker, + signature="", + comparison=False, + desc_header="", + image_list=None, + approved_image_hosts=None, + ): + if image_list is not None: + images = image_list + multi_screens = 0 + else: + images = meta["image_list"] + multi_screens = int(self.config["DEFAULT"].get("multiScreens", 2)) + if meta.get("sorted_filelist"): + multi_screens = 0 + + desc_parts = [] + + # Custom Header + if not desc_header: + desc_header = await self.get_custom_header(tracker) + if desc_header: + desc_parts.append(desc_header + "\n") + + # Language + try: + if not meta.get("language_checked", False): + await process_desc_language(meta, desc_parts, tracker) + if meta.get("audio_languages") and meta.get("write_audio_languages"): + desc_parts.append(f"[code]Audio Language/s: {', '.join(meta['audio_languages'])}[/code]") + + if meta["subtitle_languages"] and meta["write_subtitle_languages"]: + desc_parts.append( + f"[code]Subtitle Language/s: {', '.join(meta['subtitle_languages'])}[/code]" + ) + if meta["subtitle_languages"] and meta["write_hc_languages"]: + desc_parts.append( + f"[code]Hardcoded Subtitle Language/s: {', '.join(meta['subtitle_languages'])}[/code]" + ) + except Exception as e: + console.print(f"[yellow]Warning: Error processing language: {str(e)}[/yellow]") + + # Logo + logo, logo_size = await self.get_logo_section(meta, tracker) + if logo and logo_size: + desc_parts.append(f"[center][img={logo_size}]{logo}[/img][/center]\n") + + # Blu-ray + release_url, cover_images = await self.get_bluray_section(meta, tracker) + if release_url: + desc_parts.append(f"[center]{release_url}[/center]") + if cover_images: + desc_parts.append(f"[center]{cover_images}[/center]\n") + + # TV + title, episode_image, episode_overview = await self.get_tv_info(meta, tracker) + if episode_overview: + if tracker == "HUNO": + desc_parts.append(f"[center]{title}[/center]\n") + else: + desc_parts.append(f"[center][pre]{title}[/pre][/center]\n") + + if tracker == "HUNO": + desc_parts.append(f"[center]{episode_overview}[/center]\n") + else: + desc_parts.append(f"[center][pre]{episode_overview}[/pre][/center]\n") + + # Description that may come from API requests + meta_description = meta.get("description", "") + # Add FraMeSToR NFO to Aither + if tracker == "AITHER" and "framestor" in meta and meta["framestor"]: + nfo_content = meta.get("description_nfo_content", "") + if nfo_content: + aither_framestor_nfo = f"[code]{nfo_content}[/code]" + aither_framestor_nfo = aither_framestor_nfo.replace( + "https://i.imgur.com/e9o0zpQ.png", + "https://beyondhd.co/images/2017/11/30/c5802892418ee2046efba17166f0cad9.png", + ) + images = [] + desc_parts.append(aither_framestor_nfo) + else: + # Remove NFO from description + meta_description = re.sub( + r"\[center\]\[spoiler=.*? NFO:\]\[code\](.*?)\[/code\]\[/spoiler\]\[/center\]", + "", + meta_description, + flags=re.DOTALL, + ) + if meta_description: + desc_parts.append(meta_description) + elif meta_description: + desc_parts.append(meta_description) + + # Description from file/pastebin link + desc_parts.append(await self.get_user_description(meta)) + + # Menu Screenshots + desc_parts.append(await self.menu_section(meta, tracker)) + + # Tonemapped Header + desc_parts.append(await self.get_tonemapped_header(meta, tracker)) + + # Discs and Screenshots + discs_and_screenshots = await self._handle_discs_and_screenshots( + meta, tracker, approved_image_hosts, images, multi_screens + ) + desc_parts.append(discs_and_screenshots) + + # Custom Signature + desc_parts.append(await self.get_custom_signature(tracker)) + + # UA Signature + if not signature: + signature = f"[right][url=https://github.com/Audionut/Upload-Assistant][size=4]{meta['ua_signature']}[/size][/url][/right]" + if tracker == "HUNO": + signature = signature.replace("[size=4]", "[size=8]") + desc_parts.append(signature) + + description = "\n".join( + part for part in desc_parts + if part is not None and str(part).strip() + ) + + # Formatting + bbcode = BBCODE() + description = bbcode.convert_pre_to_code(description) + description = bbcode.convert_hide_to_spoiler(description) + description = description.replace("[user]", "").replace("[/user]", "") + description = description.replace("[hr]", "").replace("[/hr]", "") + description = description.replace("[ul]", "").replace("[/ul]", "") + description = description.replace("[ol]", "").replace("[/ol]", "") + description = bbcode.remove_extra_lines(description) + if comparison is False: + description = bbcode.convert_comparison_to_collapse(description, 1000) + + if meta['debug']: + desc_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt" + console.print(f"DEBUG: Saving final description to [yellow]{desc_file}[/yellow]") + async with aiofiles.open(desc_file, "w", encoding="utf-8") as description_file: + await description_file.write(description) + + return description + + async def _check_saved_pack_image_links(self, meta, approved_image_hosts): + pack_images_file = os.path.join(meta["base_dir"], "tmp", meta["uuid"], "pack_image_links.json") + pack_images_data = {} + approved_hosts = set(approved_image_hosts or []) + if await self.common.path_exists(pack_images_file): + try: + async with aiofiles.open(pack_images_file, "r", encoding="utf-8") as f: + pack_images_data = json.loads(await f.read()) + + # Filter out keys with non-approved image hosts + keys_to_remove = [] + for key_name, key_data in pack_images_data.get("keys", {}).items(): + images_to_keep = [] + for img in key_data.get("images", []): + raw_url = img.get("raw_url", "") + # Extract hostname from URL (e.g., ptpimg.me -> ptpimg) + try: + parsed_url = urllib.parse.urlparse(raw_url) + hostname = parsed_url.netloc + # Get the main domain name (first part before the dot) + host_key = hostname.split(".")[0] if hostname else "" + + if not approved_hosts or host_key in approved_hosts: + images_to_keep.append(img) + elif meta["debug"]: + console.print( + f"[yellow]Filtering out image from non-approved host: {hostname}[/yellow]" + ) + except Exception: + # If URL parsing fails, skip this image + if meta["debug"]: + console.print(f"[yellow]Could not parse URL: {raw_url}[/yellow]") + continue + + if images_to_keep: + # Update the key with only approved images + pack_images_data["keys"][key_name]["images"] = images_to_keep + pack_images_data["keys"][key_name]["count"] = len(images_to_keep) + else: + # Mark key for removal if no approved images + keys_to_remove.append(key_name) + + # Remove keys with no approved images + for key_name in keys_to_remove: + del pack_images_data["keys"][key_name] + if meta["debug"]: + console.print( + f"[yellow]Removed key '{key_name}' - no approved image hosts[/yellow]" + ) + + # Recalculate total count + pack_images_data["total_count"] = sum( + key_data["count"] for key_data in pack_images_data.get("keys", {}).values() + ) + + if pack_images_data.get("total_count", 0) < 3: + pack_images_data = {} # Invalidate if less than 3 images total + if meta["debug"]: + console.print( + "[yellow]Invalidating pack images - less than 3 approved images total[/yellow]" + ) + else: + if meta["debug"]: + console.print(f"[green]Loaded previously uploaded images from {pack_images_file}") + console.print( + f"[blue]Found {pack_images_data.get('total_count', 0)} approved images across {len(pack_images_data.get('keys', {}))} keys[/blue]" + ) + except Exception as e: + console.print(f"[yellow]Warning: Could not load pack image data: {str(e)}[/yellow]") + return pack_images_data + + async def _handle_discs_and_screenshots(self, meta, tracker, approved_image_hosts, images, multi_screens): + try: + screenheader = await self.screenshot_header(tracker) + except Exception: + screenheader = None + + # Check for saved pack_image_links.json file + pack_images_data = await self._check_saved_pack_image_links(meta, approved_image_hosts) + + char_limit = int(self.config["DEFAULT"].get("charLimit", 14000)) + file_limit = int(self.config["DEFAULT"].get("fileLimit", 5)) + thumb_size = int(self.config["DEFAULT"].get("pack_thumb_size", "300")) + process_limit = int(self.config["DEFAULT"].get("processLimit", 10)) + + screensPerRow = await self.get_screens_per_row(tracker) + + desc_parts = [] + + discs = meta.get("discs", []) + if len(discs) == 1: + each = discs[0] + if each["type"] == "DVD": + desc_parts.append("[center]") + desc_parts.append( + f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]\n\n" + ) + desc_parts.append("[/center]") + if screenheader is not None: + desc_parts.append(screenheader + "\n") + desc_parts.append("[center]") + for img_index in range(len(images[: int(meta["screens"])])): + web_url = images[img_index]["web_url"] + raw_url = images[img_index]["raw_url"] + desc_parts.append( + f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] " + ) + if screensPerRow and (img_index + 1) % screensPerRow == 0: + desc_parts.append("\n") + desc_parts.append("[/center]") + if each["type"] == "BDMV": + bdinfo_keys = [key for key in each if key.startswith("bdinfo")] + if len(bdinfo_keys) > 1: + if "retry_count" not in meta: + meta["retry_count"] = 0 + + for i, key in enumerate(bdinfo_keys[1:], start=1): # Skip the first bdinfo + new_images_key = f"new_images_playlist_{i}" + bdinfo = each[key] + edition = bdinfo.get("edition", "Unknown Edition") + + # Find the corresponding summary for this bdinfo + summary_key = f"summary_{i}" if i > 0 else "summary" + summary = each.get(summary_key, "No summary available") + + # Check for saved images first + if ( + pack_images_data + and "keys" in pack_images_data + and new_images_key in pack_images_data["keys"] + ): + saved_images = pack_images_data["keys"][new_images_key]["images"] + if saved_images: + if meta["debug"]: + console.print( + f"[yellow]Using saved images from pack_image_links.json for {new_images_key}" + ) + + meta[new_images_key] = [] + for img in saved_images: + meta[new_images_key].append( + { + "img_url": img.get("img_url", ""), + "raw_url": img.get("raw_url", ""), + "web_url": img.get("web_url", ""), + } + ) + + if new_images_key in meta and meta[new_images_key]: + desc_parts.append("[center]\n\n") + # Use the summary corresponding to the current bdinfo + desc_parts.append( + f"[spoiler={edition}][code]{summary}[/code][/spoiler]\n\n" + ) + if meta["debug"]: + console.print("[yellow]Using original uploaded images for first disc") + desc_parts.append("[center]") + for img in meta[new_images_key]: + web_url = img["web_url"] + raw_url = img["raw_url"] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " + desc_parts.append(image_str) + desc_parts.append("[/center]\n\n") + else: + desc_parts.append("[center]\n\n") + # Use the summary corresponding to the current bdinfo + desc_parts.append( + f"[spoiler={edition}][code]{summary}[/code][/spoiler]\n\n" + ) + desc_parts.append("[/center]\n\n") + meta["retry_count"] += 1 + meta[new_images_key] = [] + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", f"PLAYLIST_{i}-*.png" + ) + if not new_screens: + use_vs = meta.get("vapoursynth", False) + try: + await disc_screenshots( + meta, + f"PLAYLIST_{i}", + bdinfo, + meta["uuid"], + meta["base_dir"], + use_vs, + [], + meta.get("ffdebug", False), + multi_screens, + True, + ) + except Exception as e: + print(f"Error during BDMV screenshot capture: {e}") + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", f"PLAYLIST_{i}-*.png" + ) + if new_screens and not meta.get("skip_imghost_upload", False): + uploaded_images, _ = await upload_screens( + meta, + multi_screens, + 1, + 0, + multi_screens, + new_screens, + {new_images_key: meta[new_images_key]}, + allowed_hosts=approved_image_hosts, + ) + if uploaded_images and not meta.get("skip_imghost_upload", False): + await self.common.save_image_links(meta, new_images_key, uploaded_images) + for img in uploaded_images: + meta[new_images_key].append( + { + "img_url": img["img_url"], + "raw_url": img["raw_url"], + "web_url": img["web_url"], + } + ) + + desc_parts.append("[center]") + for img in uploaded_images: + web_url = img["web_url"] + raw_url = img["raw_url"] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " + desc_parts.append(image_str) + desc_parts.append("[/center]\n\n") + + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + async with aiofiles.open(meta_filename, "w") as f: + await f.write(json.dumps(meta, indent=4)) + + # Handle multiple discs case + elif len(discs) > 1: + # Initialize retry_count if not already set + if "retry_count" not in meta: + meta["retry_count"] = 0 + + total_discs_to_process = min(len(discs), process_limit) + processed_count = 0 + if multi_screens != 0: + console.print("[cyan]Processing screenshots for packed content (multiScreens)[/cyan]") + console.print(f"[cyan]{total_discs_to_process} files (processLimit)[/cyan]") + + for i, each in enumerate(discs): + # Set a unique key per disc for managing images + new_images_key = f"new_images_disc_{i}" + + if i == 0: + desc_parts.append("[center]") + if each["type"] == "BDMV": + desc_parts.append(f"{each.get('name', 'BDINFO')}\n\n") + elif each["type"] == "DVD": + desc_parts.append(f"{each['name']}:\n") + desc_parts.append( + f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]" + ) + desc_parts.append( + f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n" + ) + # For the first disc, use images from `meta['image_list']` and add screenheader if applicable + if meta["debug"]: + console.print("[yellow]Using original uploaded images for first disc") + if screenheader is not None: + desc_parts.append("[/center]\n\n") + desc_parts.append(screenheader + "\n") + desc_parts.append("[center]") + for img_index in range(len(images[: int(meta["screens"])])): + web_url = images[img_index]["web_url"] + raw_url = images[img_index]["raw_url"] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + desc_parts.append(image_str) + if screensPerRow and (img_index + 1) % screensPerRow == 0: + desc_parts.append("\n") + desc_parts.append("[/center]\n\n") + else: + if multi_screens != 0: + processed_count += 1 + disc_name = each.get("name", f"Disc {i}") + print( + f"\rProcessing disc {processed_count}/{total_discs_to_process}: {disc_name[:40]}{'...' if len(disc_name) > 40 else ''}", + end="", + flush=True, + ) + # Check if screenshots exist for the current disc key + # Check for saved images first + if ( + pack_images_data + and "keys" in pack_images_data + and new_images_key in pack_images_data["keys"] + ): + saved_images = pack_images_data["keys"][new_images_key]["images"] + if saved_images: + if meta["debug"]: + console.print( + f"[yellow]Using saved images from pack_image_links.json for {new_images_key}" + ) + + meta[new_images_key] = [] + for img in saved_images: + meta[new_images_key].append( + { + "img_url": img.get("img_url", ""), + "raw_url": img.get("raw_url", ""), + "web_url": img.get("web_url", ""), + } + ) + if new_images_key in meta and meta[new_images_key]: + if meta["debug"]: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") + desc_parts.append("[center]") + if each["type"] == "BDMV": + desc_parts.append( + f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n" + ) + elif each["type"] == "DVD": + desc_parts.append(f"{each['name']}:\n") + desc_parts.append( + f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] " + ) + desc_parts.append( + f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n" + ) + desc_parts.append("[/center]\n\n") + # Use existing URLs from meta to write to descfile + desc_parts.append("[center]") + for img in meta[new_images_key]: + web_url = img["web_url"] + raw_url = img["raw_url"] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + desc_parts.append(image_str) + desc_parts.append("[/center]\n\n") + else: + # Increment retry_count for tracking but use unique disc keys for each disc + meta["retry_count"] += 1 + meta[new_images_key] = [] + desc_parts.append("[center]") + if each["type"] == "BDMV": + desc_parts.append( + f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n" + ) + elif each["type"] == "DVD": + desc_parts.append(f"{each['name']}:\n") + desc_parts.append( + f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] " + ) + desc_parts.append( + f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n" + ) + desc_parts.append("[/center]\n\n") + # Check if new screenshots already exist before running prep.screenshots + if each["type"] == "BDMV": + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png" + ) + elif each["type"] == "DVD": + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", + f"{meta['discs'][i]['name']}-*.png", + ) + if not new_screens: + if meta["debug"]: + console.print( + f"[yellow]No new screens for {new_images_key}; creating new screenshots" + ) + # Run prep.screenshots if no screenshots are present + if each["type"] == "BDMV": + use_vs = meta.get("vapoursynth", False) + try: + await disc_screenshots( + meta, + f"FILE_{i}", + each["bdinfo"], + meta["uuid"], + meta["base_dir"], + use_vs, + [], + meta.get("ffdebug", False), + multi_screens, + True, + ) + except Exception as e: + print(f"Error during BDMV screenshot capture: {e}") + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png" + ) + if each["type"] == "DVD": + try: + await dvd_screenshots(meta, i, multi_screens, True) + except Exception as e: + print(f"Error during DVD screenshot capture: {e}") + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", + f"{meta['discs'][i]['name']}-*.png", + ) + + if new_screens and not meta.get("skip_imghost_upload", False): + uploaded_images, _ = await upload_screens( + meta, + multi_screens, + 1, + 0, + multi_screens, + new_screens, + {new_images_key: meta[new_images_key]}, + allowed_hosts=approved_image_hosts, + ) + if uploaded_images and not meta.get("skip_imghost_upload", False): + await self.common.save_image_links(meta, new_images_key, uploaded_images) + # Append each uploaded image's data to `meta[new_images_key]` + for img in uploaded_images: + meta[new_images_key].append( + { + "img_url": img["img_url"], + "raw_url": img["raw_url"], + "web_url": img["web_url"], + } + ) + + # Write new URLs to descfile + desc_parts.append("[center]") + for img in uploaded_images: + web_url = img["web_url"] + raw_url = img["raw_url"] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + desc_parts.append(image_str) + desc_parts.append("[/center]\n\n") + + # Save the updated meta to `meta.json` after upload + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + async with aiofiles.open(meta_filename, "w") as f: + await f.write(json.dumps(meta, indent=4)) + console.print() + + # Handle single file case + filelist = meta.get("filelist", []) + if len(filelist) == 1: + if meta.get("comparison") and meta.get("comparison_groups"): + desc_parts.append("[center]") + comparison_groups = meta.get("comparison_groups", {}) + sorted_group_indices = sorted(comparison_groups.keys(), key=lambda x: int(x)) + + comp_sources = [] + for group_idx in sorted_group_indices: + group_data = comparison_groups[group_idx] + group_name = group_data.get("name", f"Group {group_idx}") + comp_sources.append(group_name) + + sources_string = ", ".join(comp_sources) + desc_parts.append(f"[comparison={sources_string}]\n") + + images_per_group = min( + [len(comparison_groups[idx].get("urls", [])) for idx in sorted_group_indices] + ) + + for img_idx in range(images_per_group): + for group_idx in sorted_group_indices: + group_data = comparison_groups[group_idx] + urls = group_data.get("urls", []) + if img_idx < len(urls): + img_url = urls[img_idx].get("raw_url", "") + if img_url: + desc_parts.append(f"{img_url}\n") + + desc_parts.append("[/comparison][/center]\n\n") + + if screenheader is not None: + desc_parts.append(screenheader + "\n") + desc_parts.append("[center]") + for img_index in range(len(images[: int(meta["screens"])])): + web_url = images[img_index]["web_url"] + raw_url = images[img_index]["raw_url"] + desc_parts.append( + f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] " + ) + if screensPerRow and (img_index + 1) % screensPerRow == 0: + desc_parts.append("\n") + desc_parts.append("[/center]") + + # Handle multiple files case + # Initialize character counter + char_count = 0 + max_char_limit = char_limit # Character limit + other_files_spoiler_open = False # Track if "Other files" spoiler has been opened + total_files_to_process = min(len(filelist), process_limit) + processed_count = 0 + if multi_screens != 0 and total_files_to_process > 1: + console.print("[cyan]Processing screenshots for packed content (multiScreens)[/cyan]") + console.print(f"[cyan]{total_files_to_process} files (processLimit)[/cyan]") + + # First Pass: Create and Upload Images for Each File + for i, file in enumerate(filelist): + if i >= process_limit: + # console.print("[yellow]Skipping processing more files as they exceed the process limit.") + continue + if multi_screens != 0: + if total_files_to_process > 1: + processed_count += 1 + filename = os.path.basename(file) + print( + f"\rProcessing file {processed_count}/{total_files_to_process}: {filename[:40]}{'...' if len(filename) > 40 else ''}", + end="", + flush=True, + ) + if i > 0: + new_images_key = f"new_images_file_{i}" + # Check for saved images first + if ( + pack_images_data + and "keys" in pack_images_data + and new_images_key in pack_images_data["keys"] + ): + saved_images = pack_images_data["keys"][new_images_key]["images"] + if saved_images: + if meta["debug"]: + console.print( + f"[yellow]Using saved images from pack_image_links.json for {new_images_key}" + ) + + meta[new_images_key] = [] + for img in saved_images: + meta[new_images_key].append( + { + "img_url": img.get("img_url", ""), + "raw_url": img.get("raw_url", ""), + "web_url": img.get("web_url", ""), + } + ) + if new_images_key not in meta or not meta[new_images_key]: + meta[new_images_key] = [] + # Proceed with image generation if not already present + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + + # If no screenshots exist, create them + if not new_screens: + if meta["debug"]: + console.print( + f"[yellow]No existing screenshots for {new_images_key}; generating new ones." + ) + try: + await screenshots( + file, + f"FILE_{i}", + meta["uuid"], + meta["base_dir"], + meta, + multi_screens, + True, + None, + ) + await asyncio.sleep(0.1) + except Exception as e: + print(f"Error during generic screenshot capture: {e}") + + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + + # Upload generated screenshots + if new_screens and not meta.get("skip_imghost_upload", False): + uploaded_images, _ = await upload_screens( + meta, + multi_screens, + 1, + 0, + multi_screens, + new_screens, + {new_images_key: meta[new_images_key]}, + allowed_hosts=approved_image_hosts, + ) + if uploaded_images and not meta.get("skip_imghost_upload", False): + await self.common.save_image_links(meta, new_images_key, uploaded_images) + for img in uploaded_images: + meta[new_images_key].append( + { + "img_url": img["img_url"], + "raw_url": img["raw_url"], + "web_url": img["web_url"], + } + ) + + await asyncio.sleep(0.1) + + await asyncio.sleep(0.05) + + # Save updated meta + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + async with aiofiles.open(meta_filename, "w") as f: + await f.write(json.dumps(meta, indent=4)) + await asyncio.sleep(0.1) + + # Second Pass: Process MediaInfo and Write Descriptions + if len(filelist) > 1: + for i, file in enumerate(filelist): + if i >= process_limit: + continue + # Extract filename directly from the file path + filename = ( + os.path.splitext(os.path.basename(file.strip()))[0].replace("[", "").replace("]", "") + ) + + # If we are beyond the file limit, add all further files in a spoiler + if multi_screens != 0: + if i >= file_limit: + if not other_files_spoiler_open: + desc_parts.append("[center][spoiler=Other files]\n") + char_count += len("[center][spoiler=Other files]\n") + other_files_spoiler_open = True + + # Write filename in BBCode format with MediaInfo in spoiler if not the first file + if multi_screens != 0: + if i > 0 and char_count < max_char_limit: + mi_dump = MediaInfo.parse( + file, output="STRING", full=False, mediainfo_options={"inform_version": "1"} + ) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + desc_parts.append( + f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n" + ) + char_count += len( + f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n" + ) + else: + if i == 0 and images and screenheader is not None: + desc_parts.append(screenheader + "\n") + char_count += len(screenheader + "\n") + desc_parts.append(f"[center]{filename}\n[/center]\n") + char_count += len(f"[center]{filename}\n[/center]\n") + + # Write images if they exist + new_images_key = f"new_images_file_{i}" + if i == 0: # For the first file, use 'image_list' key and add screenheader if applicable + if images: + if screenheader is not None: + desc_parts.append(screenheader + "\n") + char_count += len(screenheader + "\n") + desc_parts.append("[center]") + char_count += len("[center]") + for img_index in range(len(images)): + web_url = images[img_index]["web_url"] + raw_url = images[img_index]["raw_url"] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " + desc_parts.append(image_str) + char_count += len(image_str) + if screensPerRow and (img_index + 1) % screensPerRow == 0: + desc_parts.append("\n") + desc_parts.append("[/center]\n\n") + char_count += len("[/center]\n\n") + elif multi_screens != 0: + if new_images_key in meta and meta[new_images_key]: + desc_parts.append("[center]") + char_count += len("[center]") + for img in meta[new_images_key]: + web_url = img["web_url"] + raw_url = img["raw_url"] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " + desc_parts.append(image_str) + char_count += len(image_str) + desc_parts.append("[/center]\n\n") + char_count += len("[/center]\n\n") + + if other_files_spoiler_open: + desc_parts.append("[/spoiler][/center]\n") + char_count += len("[/spoiler][/center]\n") + + if char_count >= 1 and meta["debug"]: + console.print(f"[yellow]Total characters written to description: {char_count}") + if total_files_to_process > 1: + console.print() + + description = "".join(part for part in desc_parts if part.strip()) + + return description + + async def get_screens_per_row(self, tracker): + try: + # If screensPerRow is set, use that to determine how many screenshots should be on each row. Otherwise, use 2 as default + screensPerRow = int(self.config["DEFAULT"].get("screens_per_row", 2)) + if tracker == "HUNO": + width = int(self.config["DEFAULT"].get("thumbnail_size", "350")) + # Adjust screensPerRow to keep total width below 1100 + while screensPerRow * width > 1100 and screensPerRow > 1: + screensPerRow -= 1 + except Exception: + screensPerRow = 2 + return screensPerRow + + async def menu_section(self, meta, tracker): + menu_image_section = "" + try: + disc_menu_header = await self.menu_screenshot_header(meta, tracker) + screensPerRow = await self.get_screens_per_row(tracker) + if meta.get("is_disc"): + menu_parts = [] + menu_images = meta.get("menu_images", []) + if disc_menu_header and menu_images: + menu_parts.append(disc_menu_header + "\n") + if menu_images: + menu_parts.append("[center]") + for img_index, image in enumerate(menu_images): + web_url = image.get("web_url") + raw_url = image.get("raw_url") + if not web_url or not raw_url: + continue + menu_parts.append( + f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] " + ) + if screensPerRow and (img_index + 1) % screensPerRow == 0: + menu_parts.append("\n") + menu_parts.append("[/center]\n\n") + menu_image_section = "".join(menu_parts) + except Exception as e: + console.print(f"[yellow]Warning: Error processing disc menu section: {str(e)}[/yellow]") + + return menu_image_section diff --git a/src/get_disc.py b/src/get_disc.py index 7ad61529a..e5a76ab1a 100644 --- a/src/get_disc.py +++ b/src/get_disc.py @@ -1,5 +1,7 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import os import itertools +from bin.MI.get_linux_mi import download_dvd_mediainfo from src.discparse import DiscParse @@ -45,18 +47,16 @@ async def get_disc(meta): } discs.append(disc) if is_disc == "BDMV": + if meta.get('site_check', False): + print('BDMV disc checking is not supported in site_check mode, yet.') + return Exception if meta.get('edit', False) is False: discs, bdinfo = await parse.get_bdinfo(meta, discs, meta['uuid'], meta['base_dir'], meta.get('discs', [])) else: discs, bdinfo = await parse.get_bdinfo(meta, meta['discs'], meta['uuid'], meta['base_dir'], meta['discs']) - elif is_disc == "DVD": - discs = await parse.get_dvdinfo(discs, base_dir=meta['base_dir']) - export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') - export.write(discs[0]['ifo_mi']) - export.close() - export_clean = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') - export_clean.write(discs[0]['ifo_mi']) - export_clean.close() + elif is_disc == "DVD" and not meta.get('emby', False): + download_dvd_mediainfo(meta['base_dir'], debug=meta['debug']) + discs = await parse.get_dvdinfo(discs, base_dir=meta['base_dir'], debug=meta['debug']) elif is_disc == "HDDVD": discs = await parse.get_hddvd_info(discs, meta) export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') diff --git a/src/get_name.py b/src/get_name.py index 45e652355..cc88dc8b9 100644 --- a/src/get_name.py +++ b/src/get_name.py @@ -1,7 +1,38 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import anitopy +import cli_ui +import os +import re +import sys + +from guessit import guessit + +from data.config import config +from src.cleanup import cleanup, reset_terminal from src.console import console +from src.trackers.COMMON import COMMON + +TRACKER_DISC_REQUIREMENTS = { + 'ULCX': {'region': 'mandatory', 'distributor': 'mandatory'}, + 'SHRI': {'region': 'mandatory', 'distributor': 'optional'}, + 'OTW': {'region': 'mandatory', 'distributor': 'optional'}, +} async def get_name(meta): + active_trackers = [ + tracker for tracker in TRACKER_DISC_REQUIREMENTS.keys() + if tracker in meta.get('trackers', []) + ] + if active_trackers: + region, distributor, trackers_to_remove = await missing_disc_info(meta, active_trackers) + for tracker in trackers_to_remove: + if tracker in meta['trackers']: + meta['trackers'].remove(tracker) + if distributor and 'SKIPPED' not in distributor: + meta['distributor'] = distributor + if region and 'SKIPPED' not in region: + meta['region'] = region type = meta.get('type', "").upper() title = meta.get('title', "") alt_title = meta.get('aka', "") @@ -31,9 +62,9 @@ async def get_name(meta): episode_title = "" if meta.get('is_disc', "") == "BDMV": # Disk video_codec = meta.get('video_codec', "") - region = meta.get('region', "") + region = meta.get('region', "") if meta.get('region', "") is not None else "" elif meta.get('is_disc', "") == "DVD": - region = meta.get('region', "") + region = meta.get('region', "") if meta.get('region', "") is not None else "" dvd_size = meta.get('dvd_size', "") else: video_codec = meta.get('video_codec', "") @@ -71,7 +102,7 @@ async def get_name(meta): name = f"{title} {alt_title} {year} {three_d} {edition} {hybrid} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] elif meta['is_disc'] == 'DVD': - name = f"{title} {alt_title} {year} {edition} {repack} {source} {dvd_size} {audio}" + name = f"{title} {alt_title} {year} {repack} {edition} {region} {source} {dvd_size} {audio}" potential_missing = ['edition', 'distributor'] elif meta['is_disc'] == 'HDDVD': name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" @@ -103,7 +134,7 @@ async def get_name(meta): name = f"{title} {year} {alt_title} {season}{episode} {three_d} {edition} {hybrid} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] if meta['is_disc'] == 'DVD': - name = f"{title} {alt_title} {season}{episode}{three_d} {edition} {repack} {source} {dvd_size} {audio}" + name = f"{title} {year} {alt_title} {season}{episode}{three_d} {repack} {edition} {region} {source} {dvd_size} {audio}" potential_missing = ['edition', 'distributor'] elif meta['is_disc'] == 'HDDVD': name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" @@ -127,7 +158,7 @@ async def get_name(meta): name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] elif type == "DVDRIP": - name = f"{title} {alt_title} {season} {source} DVDRip {video_encode}" + name = f"{title} {year} {alt_title} {season} {source} DVDRip {audio} {video_encode}" potential_missing = [] try: @@ -151,3 +182,283 @@ async def clean_filename(name): for char in invalid: name = name.replace(char, '-') return name + + +async def extract_title_and_year(meta, filename): + basename = os.path.basename(filename) + basename = os.path.splitext(basename)[0] + + secondary_title = None + year = None + + # Check for AKA patterns first + aka_patterns = [' AKA ', '.aka.', ' aka ', '.AKA.'] + for pattern in aka_patterns: + if pattern in basename: + aka_parts = basename.split(pattern, 1) + if len(aka_parts) > 1: + primary_title = aka_parts[0].strip() + secondary_part = aka_parts[1].strip() + + # Look for a year in the primary title + year_match_primary = re.search(r'\b(19|20)\d{2}\b', primary_title) + if year_match_primary: + year = year_match_primary.group(0) + + # Process secondary title + secondary_match = re.match(r"^(\d+)", secondary_part) + if secondary_match: + secondary_title = secondary_match.group(1) + else: + # Catch everything after AKA until it hits a year or release info + year_or_release_match = re.search(r'\b(19|20)\d{2}\b|\bBluRay\b|\bREMUX\b|\b\d+p\b|\bDTS-HD\b|\bAVC\b', secondary_part) + if year_or_release_match: + # Check if we found a year in the secondary part + if re.match(r'\b(19|20)\d{2}\b', year_or_release_match.group(0)): + # If no year was found in primary title, or we want to override + if not year: + year = year_or_release_match.group(0) + + secondary_title = secondary_part[:year_or_release_match.start()].strip() + else: + secondary_title = secondary_part + + primary_title = primary_title.replace('.', ' ') + secondary_title = secondary_title.replace('.', ' ') + return primary_title, secondary_title, year + + # if not AKA, catch titles that begin with a year + year_start_match = re.match(r'^(19|20)\d{2}', basename) + if year_start_match: + title = year_start_match.group(0) + rest = basename[len(title):].lstrip('. _-') + # Look for another year in the rest of the title + year_match = re.search(r'\b(19|20)\d{2}\b', rest) + year = year_match.group(0) if year_match else None + if year: + return title, None, year + + folder_name = os.path.basename(meta['uuid']) if meta['uuid'] else "" + if meta['debug']: + console.print(f"[cyan]Extracting title and year from folder name: {folder_name}[/cyan]") + # lets do some subsplease handling + if 'subsplease' in folder_name.lower(): + parsed_title = anitopy.parse( + guessit(folder_name, {"excludes": ["country", "language"]})['title'] + )['anime_title'] + if parsed_title: + return parsed_title, None, None + + year_pattern = r'(18|19|20)\d{2}' + res_pattern = r'\b(480|576|720|1080|2160)[pi]\b' + type_pattern = r'(WEBDL|BluRay|REMUX|HDRip|Blu-Ray|Web-DL|webrip|web-rip|DVD|BD100|BD50|BD25|HDTV|UHD|HDR|DOVI|REPACK|Season)(?=[._\-\s]|$)' + season_pattern = r'\bS(\d{1,3})\b' + season_episode_pattern = r'\bS(\d{1,3})E(\d{1,3})\b' + date_pattern = r'\b(20\d{2})\.(\d{1,2})\.(\d{1,2})\b' + extension_pattern = r'\.(mkv|mp4)$' + + # Check for the specific pattern: year.year (e.g., "1970.2014") + double_year_pattern = r'\b(18|19|20)\d{2}\.(18|19|20)\d{2}\b' + double_year_match = re.search(double_year_pattern, folder_name) + + if double_year_match: + full_match = double_year_match.group(0) + years = full_match.split('.') + first_year = years[0] + second_year = years[1] + + if meta['debug']: + console.print(f"[cyan]Found double year pattern: {full_match}, using {second_year} as year[/cyan]") + + modified_folder_name = folder_name.replace(full_match, first_year) + year_match = None + res_match = re.search(res_pattern, modified_folder_name, re.IGNORECASE) + season_pattern_match = re.search(season_pattern, modified_folder_name, re.IGNORECASE) + season_episode_match = re.search(season_episode_pattern, modified_folder_name, re.IGNORECASE) + extension_match = re.search(extension_pattern, modified_folder_name, re.IGNORECASE) + type_match = re.search(type_pattern, modified_folder_name, re.IGNORECASE) + + indices = [('year', double_year_match.end(), second_year)] + if res_match: + indices.append(('res', res_match.start(), res_match.group())) + if season_pattern_match: + indices.append(('season', season_pattern_match.start(), season_pattern_match.group())) + if season_episode_match: + indices.append(('season_episode', season_episode_match.start(), season_episode_match.group())) + if extension_match: + indices.append(('extension', extension_match.start(), extension_match.group())) + if type_match: + indices.append(('type', type_match.start(), type_match.group())) + + folder_name_for_title = modified_folder_name + actual_year = second_year + + else: + date_match = re.search(date_pattern, folder_name) + year_match = re.search(year_pattern, folder_name) + res_match = re.search(res_pattern, folder_name, re.IGNORECASE) + season_pattern_match = re.search(season_pattern, folder_name, re.IGNORECASE) + season_episode_match = re.search(season_episode_pattern, folder_name, re.IGNORECASE) + extension_match = re.search(extension_pattern, folder_name, re.IGNORECASE) + type_match = re.search(type_pattern, folder_name, re.IGNORECASE) + + indices = [] + if date_match: + indices.append(('date', date_match.start(), date_match.group())) + if year_match and not date_match: + indices.append(('year', year_match.start(), year_match.group())) + if res_match: + indices.append(('res', res_match.start(), res_match.group())) + if season_pattern_match: + indices.append(('season', season_pattern_match.start(), season_pattern_match.group())) + if season_episode_match: + indices.append(('season_episode', season_episode_match.start(), season_episode_match.group())) + if extension_match: + indices.append(('extension', extension_match.start(), extension_match.group())) + if type_match: + indices.append(('type', type_match.start(), type_match.group())) + + folder_name_for_title = folder_name + actual_year = year_match.group() if year_match and not date_match else None + + if indices: + indices.sort(key=lambda x: x[1]) + first_type, first_index, first_value = indices[0] + title_part = folder_name_for_title[:first_index] + title_part = re.sub(r'[\.\-_ ]+$', '', title_part) + # Handle unmatched opening parenthesis + if title_part.count('(') > title_part.count(')'): + paren_pos = title_part.rfind('(') + content_after_paren = folder_name[paren_pos + 1:first_index].strip() + + if content_after_paren: + secondary_title = content_after_paren + + title_part = title_part[:paren_pos].rstrip() + else: + title_part = folder_name + + replacements = { + '_': ' ', + '.': ' ', + 'DVD9': '', + 'DVD5': '', + 'DVDR': '', + 'BDR': '', + 'HDDVD': '', + 'WEB-DL': '', + 'WEBRip': '', + 'WEB': '', + 'BluRay': '', + 'Blu-ray': '', + 'HDTV': '', + 'DVDRip': '', + 'REMUX': '', + 'HDR': '', + 'UHD': '', + '4K': '', + 'DVD': '', + 'HDRip': '', + 'BDMV': '', + 'R1': '', + 'R2': '', + 'R3': '', + 'R4': '', + 'R5': '', + 'R6': '', + "Director's Cut": '', + "Extended Edition": '', + "directors cut": '', + "director cut": '', + "itunes": '', + } + filename = re.sub(r'\s+', ' ', filename) + filename = await multi_replace(title_part, replacements) + secondary_title = await multi_replace(secondary_title or '', replacements) + if not secondary_title: + secondary_title = None + if filename: + # Look for content in parentheses + bracket_pattern = r'\s*\(([^)]+)\)\s*' + bracket_match = re.search(bracket_pattern, filename) + + if bracket_match: + bracket_content = bracket_match.group(1).strip() + bracket_content = await multi_replace(bracket_content, replacements) + + # Only add to secondary_title if we don't already have one + if not secondary_title and bracket_content: + secondary_title = bracket_content + secondary_title = re.sub(r'[\.\-_ ]+$', '', secondary_title) + + filename = re.sub(bracket_pattern, ' ', filename) + filename = re.sub(r'\s+', ' ', filename).strip() + + if filename: + return filename, secondary_title, actual_year + + # If no pattern match works but there's still a year in the filename, extract it + year_match = re.search(r'(?= cooldown_seconds: + available.append(tracker) + else: + wait_time = cooldown_seconds - time_since_last + waiting.append((tracker, wait_time)) + + return available, waiting + + +async def get_tracker_data(video, meta, search_term=None, search_file_folder=None, cat=None, only_id=False): + found_match = False + base_dir = meta['base_dir'] if search_term: # Check if a specific tracker is already set in meta - tracker_keys = { - 'aither': 'AITHER', - 'blu': 'BLU', - 'lst': 'LST', - 'ulcx': 'ULCX', - 'oe': 'OE', - 'huno': 'HUNO', - 'btn': 'BTN', - 'bhd': 'BHD', - 'hdb': 'HDB', - 'ptp': 'PTP', - } + if not meta.get('emby', False): + tracker_keys = { + # preference some unit3d based trackers first + # since they can return tmdb/imdb/tvdb ids + 'aither': 'AITHER', + 'blu': 'BLU', + 'lst': 'LST', + 'ulcx': 'ULCX', + 'oe': 'OE', + 'huno': 'HUNO', + 'ant': 'ANT', + 'btn': 'BTN', + 'bhd': 'BHD', + 'hdb': 'HDB', + 'sp': 'SP', + 'rf': 'RF', + 'otw': 'OTW', + 'yus': 'YUS', + 'dp': 'DP', + 'ptp': 'PTP', + } + else: + # Preference trackers with lesser overall torrents + # Leaving the more complete trackers free when really needed + tracker_keys = { + 'sp': 'SP', + 'otw': 'OTW', + 'dp': 'DP', + 'yus': 'YUS', + 'rf': 'RF', + 'oe': 'OE', + 'ulcx': 'ULCX', + 'huno': 'HUNO', + 'lst': 'LST', + 'ant': 'ANT', + 'hdb': 'HDB', + 'bhd': 'BHD', + 'blu': 'BLU', + 'aither': 'AITHER', + 'btn': 'BTN', + 'ptp': 'PTP', + } specific_tracker = [tracker_keys[key] for key in tracker_keys if meta.get(key) is not None] - async def process_tracker(tracker_name, meta, only_id): - nonlocal found_match - if tracker_class_map is None: - print(f"Tracker class for {tracker_name} not found.") + # Filter out trackers that don't have valid config or api_key/announce_url + if specific_tracker: + valid_trackers = [] + for tracker in specific_tracker: + if "BTN" in tracker: + valid_trackers.append(tracker) + continue + else: + tracker_config = config.get('TRACKERS', {}).get(tracker, {}) + api_key = tracker_config.get('api_key', '') + announce_url = tracker_config.get('announce_url', '') + + if not tracker_config: + if meta.get('debug'): + console.print(f"[yellow]Tracker {tracker} not found in config, skipping[/yellow]") + continue + + # Accept tracker if it has either a valid api_key or announce_url + has_api_key = api_key and api_key.strip() != '' + has_announce_url = announce_url and announce_url.strip() != '' + + if not has_api_key and not has_announce_url: + if meta.get('debug'): + console.print(f"[yellow]Tracker {tracker} has no api_key or announce_url set, skipping[/yellow]") + continue + + valid_trackers.append(tracker) + + specific_tracker = valid_trackers + + if meta['debug']: + console.print(f"[blue]Specific trackers to check: {specific_tracker}[/blue]") + + if specific_tracker: + if meta.get('is_disc', False) and "ANT" in specific_tracker: + specific_tracker.remove("ANT") + if meta.get('category') == "MOVIE" and "BTN" in specific_tracker: + specific_tracker.remove("BTN") + + meta_trackers = meta.get('trackers', []) + if isinstance(meta_trackers, str): + meta_trackers = [t.strip().upper() for t in meta_trackers.split(',')] + elif isinstance(meta_trackers, list): + meta_trackers = [t.upper() if isinstance(t, str) else str(t).upper() for t in meta_trackers] + + # for just searching, remove any specific trackers already in meta['trackers'] + # since that tracker was found in client, and remove it from meta['trackers'] + for tracker in list(specific_tracker): + if tracker in meta_trackers and meta.get('site_check', False): + specific_tracker.remove(tracker) + meta_trackers.remove(tracker) + + # Update meta['trackers'] preserving list format + if meta_trackers: + meta['trackers'] = meta_trackers + else: + meta['trackers'] = [] + + async def process_tracker(tracker_name, meta, only_id): + nonlocal found_match + if tracker_class_map is None: + print(f"Tracker class for {tracker_name} not found.") + return meta + + tracker_instance = tracker_class_map[tracker_name](config=config) + try: + updated_meta, match = await update_metadata_from_tracker( + tracker_name, tracker_instance, meta, search_term, search_file_folder, only_id + ) + if match: + found_match = True + if meta.get('debug'): + console.print(f"[green]Match found on tracker: {tracker_name}[/green]") + meta['matched_tracker'] = tracker_name + await save_tracker_timestamp(tracker_name, base_dir=base_dir) + return updated_meta + except aiohttp.ClientSSLError: + await save_tracker_timestamp(tracker_name, base_dir=base_dir) + print(f"{tracker_name} tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + await save_tracker_timestamp(tracker_name, base_dir=base_dir) + print(f"{tracker_name} tracker request failed due to connection error: {conn_err}") return meta - tracker_instance = tracker_class_map[tracker_name](config=config) - try: - updated_meta, match = await update_metadata_from_tracker( - tracker_name, tracker_instance, meta, search_term, search_file_folder, only_id - ) - if match: - found_match = True - if meta.get('debug'): - console.print(f"[green]Match found on tracker: {tracker_name}[/green]") - meta['matched_tracker'] = tracker_name - return updated_meta - except aiohttp.ClientSSLError: - print(f"{tracker_name} tracker request failed due to SSL error.") - except requests.exceptions.ConnectionError as conn_err: - print(f"{tracker_name} tracker request failed due to connection error: {conn_err}") - return meta - - # If a specific tracker is found, process only that tracker - for tracker in specific_tracker: - if tracker == "BTN": - btn_id = meta.get('btn') - btn_api = config['DEFAULT'].get('btn_api') - await get_btn_torrents(btn_api, btn_id, meta) - if meta.get('imdb_id') != 0: - found_match = True - break + while not found_match and specific_tracker: + meta_trackers = meta.get('trackers', []) + if isinstance(meta_trackers, str): + meta_trackers = [t.strip().upper() for t in meta_trackers.split(',')] + elif isinstance(meta_trackers, list): + meta_trackers = [t.upper() if isinstance(t, str) else t for t in meta_trackers] + + available_trackers, waiting_trackers = await get_available_trackers(specific_tracker, base_dir, debug=meta['debug']) + + if available_trackers: + if meta['debug'] or meta.get('emby', False): + console.print(f"[green]Available trackers: {', '.join(available_trackers)}[/green]") + tracker_to_process = available_trackers[0] + else: + if waiting_trackers: + waiting_trackers.sort(key=lambda x: x[1]) + tracker_to_process, wait_time = waiting_trackers[0] + + cooldown_info = ", ".join( + f"{tracker} ({wait_time:.1f}s)" for tracker, wait_time in waiting_trackers + ) + for remaining in range(int(wait_time), -1, -1): + msg = (f"[yellow]All specific trackers in cooldown. " + f"Waiting {remaining:.1f} seconds for {tracker_to_process}. " + f"Cooldowns: {cooldown_info}[/yellow]") + console.print(msg, end='\r') + time.sleep(1) + console.print() + + else: + if meta['debug']: + console.print("[red]No specific trackers available[/red]") + break + + # Process the selected tracker + if tracker_to_process == "BTN": + btn_id = meta.get('btn') + btn_api = config['DEFAULT'].get('btn_api') + if btn_api and len(btn_api) > 25: + imdb, tvdb = await get_btn_torrents(btn_api, btn_id, meta) + if imdb != 0 or tvdb != 0: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f"[green]Found BTN IDs: IMDb={imdb}, TVDb={tvdb}[/green]") + try: + if cli_ui.ask_yes_no("Do you want to use these ids?", default=True): + if imdb != 0: + meta['imdb_id'] = int(imdb) + if tvdb != 0: + meta['tvdb_id'] = int(tvdb) + found_match = True + meta['matched_tracker'] = "BTN" + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + else: + if imdb != 0: + meta['imdb_id'] = int(imdb) + if tvdb != 0: + meta['tvdb_id'] = int(tvdb) + found_match = True + meta['matched_tracker'] = "BTN" + await save_tracker_timestamp("BTN", base_dir=base_dir) + elif tracker_to_process == "ANT": + imdb_tmdb_list = await tracker_class_map['ANT'](config=config).get_data_from_files(meta) + if imdb_tmdb_list: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f"[green]Found ANT IDs: {imdb_tmdb_list}[/green]") + try: + if cli_ui.ask_yes_no("Do you want to use these ids?", default=True): + for d in imdb_tmdb_list: + meta.update(d) + found_match = True + meta['matched_tracker'] = "ANT" + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + await save_tracker_timestamp("ANT", base_dir=base_dir) + else: + meta = await process_tracker(tracker_to_process, meta, only_id) + + if not found_match: + if tracker_to_process in specific_tracker: + specific_tracker.remove(tracker_to_process) + remaining_available, remaining_waiting = await get_available_trackers(specific_tracker, base_dir, debug=meta['debug']) + + if remaining_available or remaining_waiting: + if meta['debug'] or meta.get('emby', False): + console.print(f"[yellow]No match found with {tracker_to_process}. Checking remaining trackers...[/yellow]") + else: + if meta['debug']: + console.print(f"[yellow]No match found with {tracker_to_process}. No more trackers available to check.[/yellow]") + break + + if found_match: + if meta.get('debug'): + console.print(f"[green]Successfully found match using tracker: {meta.get('matched_tracker', 'Unknown')}[/green]") else: - meta = await process_tracker(tracker, meta, only_id) - if found_match: - break + if meta['debug']: + console.print("[yellow]No matches found on any available specific trackers.[/yellow]") + else: # Process all trackers with API = true if no specific tracker is set in meta - tracker_order = ["PTP", "BHD", "BLU", "AITHER", "LST", "OE", "HDB", "HUNO", "ULCX"] + tracker_order = ["PTP", "HDB", "BHD", "BLU", "AITHER", "HUNO", "LST", "OE", "ULCX"] if cat == "TV" or meta.get('category') == "TV": if meta['debug']: console.print("[yellow]Detected TV content, skipping PTP tracker check") tracker_order = [tracker for tracker in tracker_order if tracker != "PTP"] + async def process_tracker(tracker_name, meta, only_id): + nonlocal found_match + if tracker_class_map is None: + print(f"Tracker class for {tracker_name} not found.") + return meta + + tracker_instance = tracker_class_map[tracker_name](config=config) + try: + updated_meta, match = await update_metadata_from_tracker( + tracker_name, tracker_instance, meta, search_term, search_file_folder, only_id + ) + if match: + found_match = True + if meta.get('debug'): + console.print(f"[green]Match found on tracker: {tracker_name}[/green]") + meta['matched_tracker'] = tracker_name + return updated_meta + except aiohttp.ClientSSLError: + print(f"{tracker_name} tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"{tracker_name} tracker request failed due to connection error: {conn_err}") + return meta + for tracker_name in tracker_order: if not found_match: # Stop checking once a match is found tracker_config = config['TRACKERS'].get(tracker_name, {}) if str(tracker_config.get('useAPI', 'false')).lower() == "true": meta = await process_tracker(tracker_name, meta, only_id) - if not found_match: - console.print("[yellow]No matches found on any trackers.[/yellow]") + if not found_match: + meta['no_tracker_match'] = True + if meta['debug']: + console.print("[yellow]No matches found on any trackers.[/yellow]") else: console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") + return meta + async def ping_unit3d(meta): from src.trackers.COMMON import COMMON diff --git a/src/getseasonep.py b/src/getseasonep.py index 7ce1341aa..a77c1a975 100644 --- a/src/getseasonep.py +++ b/src/getseasonep.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 from src.console import console from guessit import guessit import anitopy @@ -89,7 +90,7 @@ async def get_season_episode(video, meta): # if the mal id is set, then we've already run get_romaji in tmdb.py if meta.get('mal_id') == 0 and meta['category'] == "TV": parsed = anitopy.parse(Path(video).name) - romaji, mal_id, eng_title, seasonYear, anilist_episodes, meta['demographic'] = await get_romaji(parsed['anime_title'], meta.get('mal_id', 0)) + romaji, mal_id, eng_title, seasonYear, anilist_episodes, meta['demographic'] = await get_romaji(parsed['anime_title'], meta.get('mal_id', 0), meta) if mal_id: meta['mal_id'] = mal_id if meta.get('tmdb_id') == 0: @@ -216,7 +217,12 @@ async def get_season_episode(video, meta): console.print(f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct") await asyncio.sleep(3) else: - return meta + console.print("[bold red]Error determining if TV show is anime or not[/bold red]") + console.print("[bold yellow]Set manual season and episode[/bold yellow]") + season_int = 1 + season = "S01" + episode_int = 1 + episode = "E01" if meta.get('manual_season', None) is None: meta['season'] = season diff --git a/src/imdb.py b/src/imdb.py index b823eb8b0..415ede4e0 100644 --- a/src/imdb.py +++ b/src/imdb.py @@ -1,79 +1,17 @@ -from src.console import console -import json +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import asyncio +import cli_ui import httpx -from datetime import datetime -import os -import re - - -async def get_imdb_aka_api(imdb_id, manual_language=None): - if imdb_id == 0: - return "", None - if not str(imdb_id).startswith("tt"): - imdb_id = f"tt{imdb_id:07d}" - query = { - "query": f""" - query {{ - title(id: "{imdb_id}") {{ - id - titleText {{ - text - isOriginalTitle - }} - originalTitleText {{ - text - }} - countriesOfOrigin {{ - countries {{ - id - }} - }} - }} - }} - """ - } - - async with httpx.AsyncClient() as client: - try: - response = await client.post("https://api.graphql.imdb.com/", json=query, headers={"Content-Type": "application/json"}, timeout=10) - response.raise_for_status() - data = response.json() - except httpx.HTTPStatusError as e: - console.print(f"[red]IMDb API error: {e.response.status_code}[/red]") - return "", None - except httpx.RequestError as e: - console.print(f"[red]IMDb API Network error: {e}[/red]") - return "", None - - # Check if `data` and `title` exist - title_data = data.get("data", {}).get("title") - if title_data is None: - console.print("Title data is missing from response") - return "", None - - # Extract relevant fields from the response - aka_check = title_data.get("originalTitleText", {}) - if aka_check: - aka = title_data.get("originalTitleText", {}).get("text", "") - else: - return "", None - title_txt_check = title_data.get("titleText", {}) - if title_txt_check: - is_original = title_data.get("titleText", {}).get("isOriginalTitle", False) - title_text = title_data.get("titleText", {}).get("text", "") - else: - return "", None - if manual_language: - original_language = manual_language - else: - original_language = None +import json +import sys - if title_text != aka: - aka = f" AKA {aka}" - elif is_original and aka: - aka = f" AKA {aka}" +from anitopy import parse as anitopy_parse +from datetime import datetime +from difflib import SequenceMatcher +from guessit import guessit - return aka, original_language +from src.cleanup import cleanup, reset_terminal +from src.console import console async def safe_get(data, path, default=None): @@ -116,6 +54,7 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): }} releaseYear {{ year + endYear }} titleType {{ id @@ -163,27 +102,37 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): }} episodes {{ episodes(first: 500) {{ - edges {{ - node {{ - id - titleText {{ - text - }} - releaseYear {{ - year - }} - releaseDate {{ - year - month - day + edges {{ + node {{ + id + series {{ + displayableEpisodeNumber {{ + displayableSeason {{ + season + }} + episodeNumber {{ + text + }} + }} + }} + titleText {{ + text + }} + releaseYear {{ + year + }} + releaseDate {{ + year + month + day + }} + }} }} + pageInfo {{ + hasNextPage + hasPreviousPage }} - }} - pageInfo {{ - hasNextPage - hasPreviousPage - }} - total + total }} }} runtimes(first: 10) {{ @@ -287,9 +236,17 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): language {{ text }} + attributes {{ + text + }} }} }} }} + countriesOfOrigin {{ + countries {{ + text + }} + }} }} }} """ @@ -312,9 +269,19 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): return imdb_info # Return empty if no data found imdb_info['imdbID'] = imdbID + imdb_info['imdb_url'] = f"https://www.imdb.com/title/%7BimdbID%7D" imdb_info['title'] = await safe_get(title_data, ['titleText', 'text']) - imdb_info['country'] = await safe_get(title_data, ['titleText', 'country', 'text']) + countries_list = await safe_get(title_data, ['countriesOfOrigin', 'countries'], []) + if isinstance(countries_list, list) and countries_list: + # First country for 'country' + imdb_info['country'] = countries_list[0].get('text', '') + # All countries joined for 'country_list' + imdb_info['country_list'] = ', '.join([c.get('text', '') for c in countries_list if isinstance(c, dict) and 'text' in c]) + else: + imdb_info['country'] = '' + imdb_info['country_list'] = '' imdb_info['year'] = await safe_get(title_data, ['releaseYear', 'year']) + imdb_info['end_year'] = await safe_get(title_data, ['releaseYear', 'endYear']) original_title = await safe_get(title_data, ['originalTitleText', 'text'], '') imdb_info['aka'] = original_title if original_title and original_title != imdb_info['title'] else imdb_info['title'] imdb_info['type'] = await safe_get(title_data, ['titleType', 'id'], None) @@ -329,19 +296,36 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): imdb_info['rating'] = await safe_get(title_data, ['ratingsSummary', 'aggregateRating'], 'N/A') - imdb_info['directors'] = [] - principal_credits = await safe_get(title_data, ['principalCredits'], []) - if isinstance(principal_credits, list): + async def get_credits(title_data, category_keyword): + people_list = [] + people_id_list = [] + principal_credits = await safe_get(title_data, ['principalCredits'], []) + + if not isinstance(principal_credits, list): + return people_list, people_id_list + for pc in principal_credits: category_text = await safe_get(pc, ['category', 'text'], '') - if 'Direct' in category_text: + + if category_keyword in category_text: credits = await safe_get(pc, ['credits'], []) for c in credits: - name_id = await safe_get(c, ['name', 'id'], '') - if name_id.startswith('nm'): - imdb_info['directors'].append(name_id) + name_obj = await safe_get(c, ['name'], {}) + person_id = await safe_get(name_obj, ['id'], '') + person_name = await safe_get(name_obj, ['nameText', 'text'], '') + + if person_id and person_name: + people_list.append(person_name) + people_id_list.append(person_id) break + return people_list, people_id_list + + imdb_info['directors'], imdb_info['directors_id'] = await get_credits(title_data, 'Direct') + imdb_info['creators'], imdb_info['creators_id'] = await get_credits(title_data, 'Creat') + imdb_info['writers'], imdb_info['writers_id'] = await get_credits(title_data, 'Writ') + imdb_info['stars'], imdb_info['stars_id'] = await get_credits(title_data, 'Star') + editions = await safe_get(title_data, ['runtimes', 'edges'], []) if editions: edition_list = [] @@ -378,6 +362,7 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): "title": await safe_get(edge, ['node', 'text']), "country": await safe_get(edge, ['node', 'country', 'text']), "language": await safe_get(edge, ['node', 'language', 'text']), + "attributes": await safe_get(edge, ['node', 'attributes'], []), } for edge in akas_edges ] @@ -391,6 +376,11 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): edges = await safe_get(episodes_data, ['edges'], []) for edge in edges: node = await safe_get(edge, ['node'], {}) + + series_info = await safe_get(node, ['series', 'displayableEpisodeNumber'], {}) + season_info = await safe_get(series_info, ['displayableSeason'], {}) + episode_number_info = await safe_get(series_info, ['episodeNumber'], {}) + episode_info = { 'id': await safe_get(node, ['id'], ''), 'title': await safe_get(node, ['titleText', 'text'], 'Unknown Title'), @@ -399,17 +389,52 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): 'year': await safe_get(node, ['releaseDate', 'year'], None), 'month': await safe_get(node, ['releaseDate', 'month'], None), 'day': await safe_get(node, ['releaseDate', 'day'], None), - } + }, + 'season': await safe_get(season_info, ['season'], 'unknown'), + 'episode_number': await safe_get(episode_number_info, ['text'], '') } imdb_info['episodes'].append(episode_info) + if imdb_info['episodes']: + seasons_data = {} + + for episode in imdb_info['episodes']: + season_str = episode.get('season', 'unknown') + release_year = episode.get('release_year') + + try: + season_int = int(season_str) if season_str != 'unknown' and season_str else None + except (ValueError, TypeError): + season_int = None + + if season_int is not None and release_year and isinstance(release_year, int): + if season_int not in seasons_data: + seasons_data[season_int] = set() + seasons_data[season_int].add(release_year) + + seasons_summary = [] + for season_num in sorted(seasons_data.keys()): + years = sorted(list(seasons_data[season_num])) + season_entry = { + 'season': season_num, + 'year': years[0], + 'year_range': f"{years[0]}" if len(years) == 1 else f"{years[0]}-{years[-1]}" + } + seasons_summary.append(season_entry) + + imdb_info['seasons_summary'] = seasons_summary + else: + imdb_info['seasons_summary'] = [] + sound_mixes = await safe_get(title_data, ['technicalSpecifications', 'soundMixes', 'items'], []) imdb_info['sound_mixes'] = [sm.get('text', '') for sm in sound_mixes if isinstance(sm, dict) and 'text' in sm] episodes = imdb_info.get('episodes', []) current_year = datetime.now().year release_years = [episode['release_year'] for episode in episodes if 'release_year' in episode and isinstance(episode['release_year'], int)] - if release_years: + if imdb_info['end_year']: + imdb_info['tv_year'] = imdb_info['end_year'] + elif release_years: closest_year = min(release_years, key=lambda year: abs(year - current_year)) imdb_info['tv_year'] = closest_year else: @@ -421,89 +446,204 @@ async def get_imdb_info_api(imdbID, manual_language=None, debug=False): return imdb_info -async def search_imdb(filename, search_year, quickie=False, category=None, debug=False, secondary_title=None, path=None): - if secondary_title is not None: - filename = secondary_title - else: - folder_name = os.path.basename(path) if path else "" - year_pattern = r'(19|20)\d{2}' - res_pattern = r'\b(480|576|720|1080|2160)[pi]\b' - year_match = re.search(year_pattern, folder_name) - res_match = re.search(res_pattern, folder_name, re.IGNORECASE) - - indices = [] - if year_match: - indices.append(('year', year_match.start(), year_match.group())) - if res_match: - indices.append(('res', res_match.start(), res_match.group())) - - if indices: - indices.sort(key=lambda x: x[1]) - first_type, first_index, first_value = indices[0] - title_part = folder_name[:first_index] - title_part = re.sub(r'[\.\-_ ]+$', '', title_part) - else: - title_part = folder_name - - filename = title_part.replace('.', ' ') - - filename = re.sub(r'\s+[A-Z]{2}$', '', filename.strip()) +async def search_imdb(filename, search_year, quickie=False, category=None, debug=False, secondary_title=None, path=None, untouched_filename=None, attempted=0, duration=None, unattended=False): + search_results = [] + imdbID = imdb_id = 0 + if attempted is None: + attempted = 0 if debug: console.print(f"[yellow]Searching IMDb for {filename} and year {search_year}...[/yellow]") - imdbID = imdb_id = 0 - url = "https://api.graphql.imdb.com/" - query = { - "query": f""" - {{ - advancedTitleSearch( - first: 10, - constraints: {{ titleTextConstraint: {{ searchTerm: "{filename}" }} }} - ) {{ - total - edges {{ - node {{ - title {{ - id - titleText {{ - text - }} - titleType {{ - text - }} - releaseYear {{ - year - }} - plot {{ - plotText {{ - plainText + if attempted: + await asyncio.sleep(1) # Whoa baby, slow down + + async def run_imdb_search(filename, search_year, category=None, debug=False, attempted=0, duration=None, wide_search=False): + search_results = [] + if secondary_title is not None: + filename = secondary_title + if attempted is None: + attempted = 0 + if attempted: + await asyncio.sleep(1) # Whoa baby, slow down + url = "https://api.graphql.imdb.com/" + if category == "MOVIE": + filename = filename.replace('and', '&').replace('And', '&').replace('AND', '&').strip() + + constraints_parts = [f'titleTextConstraint: {{searchTerm: "{filename}"}}'] + + # Add release date constraint if search_year is provided + if not wide_search and search_year: + search_year_int = int(search_year) + start_year = search_year_int - 1 + end_year = search_year_int + 1 + constraints_parts.append(f'releaseDateConstraint: {{releaseDateRange: {{start: "{start_year}-01-01", end: "{end_year}-12-31"}}}}') + + if not wide_search and duration: + if isinstance(duration, int): + duration = str(duration) + start_duration = int(duration) - 10 + end_duration = int(duration) + 10 + constraints_parts.append(f'runtimeConstraint: {{runtimeRangeMinutes: {{min: {start_duration}, max: {end_duration}}}}}') + + constraints_string = ', '.join(constraints_parts) + + query = { + "query": f""" + {{ + advancedTitleSearch( + first: 10, + constraints: {{{constraints_string}}} + ) {{ + total + edges {{ + node {{ + title {{ + id + titleText {{ + text + }} + titleType {{ + text + }} + releaseYear {{ + year + }} + plot {{ + plotText {{ + plainText + }} }} }} }} }} }} }} - }} - """ - } - - try: - async with httpx.AsyncClient() as client: - response = await client.post(url, json=query, headers={"Content-Type": "application/json"}, timeout=10) - response.raise_for_status() - data = response.json() - except Exception as e: - console.print(f"[red]IMDb GraphQL API error: {e}[/red]") - return 0 - - results = await safe_get(data, ["data", "advancedTitleSearch", "edges"], []) + """ + } - if debug: - console.print(f"[yellow]Found {len(results)} results...[/yellow]") - console.print(f"quickie: {quickie}, category: {category}, search_year: {search_year}") + try: + async with httpx.AsyncClient() as client: + response = await client.post(url, json=query, headers={"Content-Type": "application/json"}, timeout=10) + response.raise_for_status() + data = response.json() + except Exception as e: + console.print(f"[red]IMDb GraphQL API error: {e}[/red]") + return 0 + + results = await safe_get(data, ["data", "advancedTitleSearch", "edges"], []) + search_results = results + + if debug: + console.print(f"[yellow]Found {len(results)} results...[/yellow]") + console.print(f"quickie: {quickie}, category: {category}, search_year: {search_year}") + return search_results + + if not search_results: + result = await run_imdb_search(filename, search_year, category, debug, attempted, duration, wide_search=False) + if result and len(result) > 0: + search_results = result + + if not search_results and secondary_title: + if debug: + console.print(f"[yellow]Trying IMDb with secondary title: {secondary_title}[/yellow]") + result = await run_imdb_search(secondary_title, search_year, category, debug, attempted, duration, wide_search=True) + if result and len(result) > 0: + search_results = result + + # remove 'the' from the beginning of the title if it exists + if not search_results: + try: + words = filename.split() + bad_words = ['the'] + words_lower = [word.lower() for word in words] + + if words_lower and words_lower[0] in bad_words: + words.pop(0) + words_lower.pop(0) + title = ' '.join(words) + if debug: + console.print(f"[bold yellow]Trying IMDb with the prefix removed: {title}[/bold yellow]") + result = await run_imdb_search(title, search_year, category, debug, attempted + 1, wide_search=False) + if result and len(result) > 0: + search_results = result + except Exception as e: + console.print(f"[bold red]Reduced name search error:[/bold red] {e}") + search_results = {"results": []} + + # relax the constraints + if not search_results: + if debug: + console.print("[yellow]No results found, trying with a wider search...[/yellow]") + try: + result = await run_imdb_search(filename, search_year, category, debug, attempted + 1, wide_search=True) + if result and len(result) > 0: + search_results = result + except Exception as e: + console.print(f"[red]Error during wide search: {e}[/red]") + + # Try parsed title (anitopy + guessit) + if not search_results: + try: + parsed = guessit(untouched_filename, {"excludes": ["country", "language"]}) + parsed_title = anitopy_parse(parsed['title'])['anime_title'] + if debug: + console.print(f"[bold yellow]Trying IMDB with parsed title: {parsed_title}[/bold yellow]") + result = await run_imdb_search(parsed_title, search_year, category, debug, attempted + 1, wide_search=True) + if result and len(result) > 0: + search_results = result + except Exception: + console.print("[bold red]Guessit failed parsing title, trying another method[/bold red]") + + # Try with less words in the title + if not search_results: + try: + words = filename.split() + extensions = ['mp4', 'mkv', 'avi', 'webm', 'mov', 'wmv'] + words_lower = [word.lower() for word in words] + + for ext in extensions: + if ext in words_lower: + ext_index = words_lower.index(ext) + words.pop(ext_index) + words_lower.pop(ext_index) + break + + if len(words) > 1: + reduced_title = ' '.join(words[:-1]) + if debug: + console.print(f"[bold yellow]Trying IMDB with reduced name: {reduced_title}[/bold yellow]") + result = await run_imdb_search(reduced_title, search_year, category, debug, attempted + 1, wide_search=True) + if result and len(result) > 0: + search_results = result + except Exception as e: + console.print(f"[bold red]Reduced name search error:[/bold red] {e}") + + # Try with even fewer words + if not search_results: + try: + words = filename.split() + extensions = ['mp4', 'mkv', 'avi', 'webm', 'mov', 'wmv'] + words_lower = [word.lower() for word in words] + + for ext in extensions: + if ext in words_lower: + ext_index = words_lower.index(ext) + words.pop(ext_index) + words_lower.pop(ext_index) + break + + if len(words) > 2: + further_reduced_title = ' '.join(words[:-2]) + if debug: + console.print(f"[bold yellow]Trying IMDB with further reduced name: {further_reduced_title}[/bold yellow]") + result = await run_imdb_search(further_reduced_title, search_year, category, debug, attempted + 1, wide_search=True) + if result and len(result) > 0: + search_results = result + except Exception as e: + console.print(f"[bold red]Further reduced name search error:[/bold red] {e}") if quickie: - if results: - first_result = results[0] + if search_results: + first_result = search_results[0] if debug: console.print(f"[cyan]Quickie search result: {first_result}[/cyan]") node = await safe_get(first_result, ["node"], {}) @@ -540,41 +680,260 @@ async def search_imdb(filename, search_year, quickie=False, category=None, debug if not type_matches and debug: console.print(f"[yellow]Type mismatch: found {type_info.get('text', '')}, expected {category}[/yellow]") imdbID = 0 - else: - console.print("[yellow]No results found for quickie search[/yellow]") - imdbID = 0 - return imdbID + return imdbID if imdbID else 0 else: - for idx, edge in enumerate(results): - node = await safe_get(edge, ["node"], {}) - title = await safe_get(node, ["title"], {}) - title_text = await safe_get(title, ["titleText", "text"], "") - year = await safe_get(title, ["releaseYear", "year"], None) - imdb_id = await safe_get(title, ["id"], "") - title_type = await safe_get(title, ["titleType", "text"], "") - plot = await safe_get(title, ["plot", "plotText", "plainText"], "") + if len(search_results) == 1: + imdb_id = await safe_get(search_results[0], ["node", "title", "id"], "") + if imdb_id: + imdbID = int(imdb_id.replace('tt', '').strip()) + return imdbID + elif len(search_results) > 1: + # Calculate similarity for all results + results_with_similarity = [] + filename_norm = filename.lower().strip() + search_year_int = int(search_year) if search_year else 0 + + for r in search_results: + node = await safe_get(r, ["node"], {}) + title = await safe_get(node, ["title"], {}) + title_text = await safe_get(title, ["titleText", "text"], "") + result_year = await safe_get(title, ["releaseYear", "year"], 0) + + similarity = SequenceMatcher(None, filename_norm, title_text.lower().strip()).ratio() + + # Only boost similarity if titles are very similar (>= 0.99) AND years match + if similarity >= 0.99 and search_year_int > 0 and result_year > 0: + if result_year == search_year_int: + similarity += 0.1 # Full boost for exact year match + elif result_year == search_year_int - 1: + similarity += 0.05 # Half boost for -1 year + + results_with_similarity.append((r, similarity)) + + # Sort by similarity (highest first) + results_with_similarity.sort(key=lambda x: x[1], reverse=True) + + # Filter results: if we have high similarity matches (>= 0.90), hide low similarity ones (< 0.75) + best_similarity = results_with_similarity[0][1] + if best_similarity >= 0.90: + filtered_results_with_similarity = [ + (result, sim) for result, sim in results_with_similarity + if sim >= 0.75 + ] + results_with_similarity = filtered_results_with_similarity + + if debug: + console.print(f"[yellow]Filtered out low similarity results (< 0.70) since best match has {best_similarity:.2f} similarity[/yellow]") + + sorted_results = [r[0] for r in results_with_similarity] + + # Check if the best match is significantly better than others + best_similarity = results_with_similarity[0][1] + similarity_threshold = 0.85 + + if best_similarity >= similarity_threshold: + second_best = results_with_similarity[1][1] if len(results_with_similarity) > 1 else 0.0 + + if best_similarity - second_best >= 0.10: + if debug: + console.print(f"[green]Auto-selecting best match: {await safe_get(sorted_results[0], ['node', 'title', 'titleText', 'text'], '')} (similarity: {best_similarity:.2f})[/green]") + imdb_id = await safe_get(sorted_results[0], ["node", "title", "id"], "") + if imdb_id: + imdbID = int(imdb_id.replace('tt', '').strip()) + return imdbID - console.print(f"[cyan]Result {idx+1}: {title_text} - ({year}) - {imdb_id} - Type: {title_type}[/cyan]") - if plot: - console.print(f"[green]Plot: {plot}[/green]") + if unattended: + imdb_id = await safe_get(sorted_results[0], ["node", "title", "id"], "") + if imdb_id: + imdbID = int(imdb_id.replace('tt', '').strip()) + if debug: + console.print(f"[green]Unattended mode: auto-selected IMDb ID {imdbID}[/green]") + return imdbID - if results: - console.print("[yellow]Enter the number of the correct entry, or 0 for none:[/yellow]") - try: - user_input = input("> ").strip() - if user_input.isdigit(): - selection = int(user_input) - if 1 <= selection <= len(results): - selected = results[selection - 1] - imdb_id = await safe_get(selected, ["node", "title", "id"], "") - if imdb_id: - imdbID = int(imdb_id.replace('tt', '').strip()) - return imdbID - - except Exception as e: - console.print(f"[red]Error reading input: {e}[/red]") - imdbID = 0 + # Show sorted results to user + console.print("[bold yellow]Multiple IMDb results found. Please select the correct entry:[/bold yellow]") + + for idx, result in enumerate(sorted_results): + node = await safe_get(result, ["node"], {}) + title = await safe_get(node, ["title"], {}) + title_text = await safe_get(title, ["titleText", "text"], "") + year = await safe_get(title, ["releaseYear", "year"], None) + imdb_id = await safe_get(title, ["id"], "") + title_type = await safe_get(title, ["titleType", "text"], "") + plot = await safe_get(title, ["plot", "plotText", "plainText"], "") + similarity_score = results_with_similarity[idx][1] + + console.print(f"[cyan]{idx+1}.[/cyan] [bold]{title_text}[/bold] ({year}) [yellow]ID:[/yellow] {imdb_id} [yellow]Type:[/yellow] {title_type} [dim](similarity: {similarity_score:.2f})[/dim]") + if plot: + console.print(f"[green]Plot:[/green] {plot[:200]}{'...' if len(plot) > 200 else ''}") + console.print() + + if sorted_results: + selection = None + while True: + try: + selection = cli_ui.ask_string("Enter the number of the correct entry, 0 for none, or manual IMDb ID (tt1234567): ") + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + try: + # Check if it's a manual IMDb ID entry + if selection.lower().startswith('tt') and len(selection) >= 3: + try: + manual_imdb_id = selection.lower().replace('tt', '').strip() + if manual_imdb_id.isdigit(): + console.print(f"[green]Using manual IMDb ID: {selection}[/green]") + return int(manual_imdb_id) + else: + console.print("[bold red]Invalid IMDb ID format. Please try again.[/bold red]") + continue + except Exception as e: + console.print(f"[bold red]Error parsing IMDb ID: {e}. Please try again.[/bold red]") + continue + + # Handle numeric selection + selection_int = int(selection) + if 1 <= selection_int <= len(sorted_results): + selected = sorted_results[selection_int - 1] + imdb_id = await safe_get(selected, ["node", "title", "id"], "") + if imdb_id: + imdbID = int(imdb_id.replace('tt', '').strip()) + return imdbID + elif selection_int == 0: + console.print("[bold red]Skipping IMDb[/bold red]") + return 0 + else: + console.print("[bold red]Selection out of range. Please try again.[/bold red]") + except ValueError: + console.print("[bold red]Invalid input. Please enter a number or IMDb ID (tt1234567).[/bold red]") + + else: + if not unattended: + try: + selection = cli_ui.ask_string("No results found. Please enter a manual IMDb ID (tt1234567) or 0 to skip: ") + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + if selection.lower().startswith('tt') and len(selection) >= 3: + try: + manual_imdb_id = selection.lower().replace('tt', '').strip() + if manual_imdb_id.isdigit(): + console.print(f"[green]Using manual IMDb ID: {selection}[/green]") + return int(manual_imdb_id) + else: + console.print("[bold red]Invalid IMDb ID format. Please try again.[/bold red]") + except Exception as e: + console.print(f"[bold red]Error parsing IMDb ID: {e}. Please try again.[/bold red]") + else: + console.print("[bold red]No IMDb results found in unattended mode. Skipping IMDb.[/bold red]") + + return imdbID if imdbID else 0 + + +async def get_imdb_from_episode(imdb_id, debug=False): + if not imdb_id or imdb_id == 0: + return None + + if not str(imdb_id).startswith("tt"): + try: + imdb_id_int = int(imdb_id) + imdb_id = f"tt{imdb_id_int:07d}" + except Exception: + imdb_id = f"tt{str(imdb_id).zfill(7)}" + + query = { + "query": f""" + {{ + title(id: "{imdb_id}") {{ + id + titleText {{ text }} + series {{ + displayableEpisodeNumber {{ + displayableSeason {{ + id + season + text + }} + episodeNumber {{ + id + text + }} + }} + nextEpisode {{ + id + titleText {{ text }} + }} + previousEpisode {{ + id + titleText {{ text }} + }} + series {{ + id + titleText {{ text }} + }} + }} + }} + }} + """ + } + + async with httpx.AsyncClient() as client: + try: + response = await client.post( + "https://api.graphql.imdb.com/", + json=query, + headers={"Content-Type": "application/json"}, + timeout=10 + ) + response.raise_for_status() + data = response.json() + except Exception as e: + if debug: + print(f"[red]IMDb API error: {e}[/red]") + return None + + title_data = await safe_get(data, ["data", "title"], {}) + if not title_data: + return None + + result = { + "id": await safe_get(title_data, ["id"]), + "title": await safe_get(title_data, ["titleText", "text"]), + "series": {}, + "next_episode": {}, + "previous_episode": {}, + } - return imdbID + series_info = await safe_get(title_data, ["series"], {}) + if series_info: + displayable = await safe_get(series_info, ["displayableEpisodeNumber"], {}) + season_info = await safe_get(displayable, ["displayableSeason"], {}) + episode_info = await safe_get(displayable, ["episodeNumber"], {}) + result["series"]["season_id"] = await safe_get(season_info, ["id"]) + result["series"]["season"] = await safe_get(season_info, ["season"]) + result["series"]["season_text"] = await safe_get(season_info, ["text"]) + result["series"]["episode_id"] = await safe_get(episode_info, ["id"]) + result["series"]["episode_text"] = await safe_get(episode_info, ["text"]) + + # Next episode + next_ep = await safe_get(series_info, ["nextEpisode"], {}) + result["next_episode"]["id"] = await safe_get(next_ep, ["id"]) + result["next_episode"]["title"] = await safe_get(next_ep, ["titleText", "text"]) + + # Previous episode + prev_ep = await safe_get(series_info, ["previousEpisode"], {}) + result["previous_episode"]["id"] = await safe_get(prev_ep, ["id"]) + result["previous_episode"]["title"] = await safe_get(prev_ep, ["titleText", "text"]) + + # Series info + series_obj = await safe_get(series_info, ["series"], {}) + result["series"]["series_id"] = await safe_get(series_obj, ["id"]) + result["series"]["series_title"] = await safe_get(series_obj, ["titleText", "text"]) + + return result diff --git a/src/is_scene.py b/src/is_scene.py index aa44c3744..26de9b61d 100644 --- a/src/is_scene.py +++ b/src/is_scene.py @@ -1,7 +1,10 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import os import re -import urllib.parse import requests +import urllib.parse +from bs4 import BeautifulSoup +from data.config import config from src.console import console @@ -15,7 +18,7 @@ async def is_scene(video, meta, imdb=None, lower=False): base = match.group(1) is_all_lowercase = base.islower() base = urllib.parse.quote(base) - if 'scene' not in meta and not lower: + if 'scene' not in meta and not lower and not meta.get('emby_debug', False): url = f"https://api.srrdb.com/v1/search/r:%7Bbase%7D" if meta['debug']: console.print("Using SRRDB url", url) @@ -34,20 +37,33 @@ async def is_scene(video, meta, imdb=None, lower=False): meta['we_need_tag'] = True if first_result.get('imdbId'): imdb_str = first_result['imdbId'] - imdb = int(imdb_str) if imdb_str.isdigit() else 0 + imdb = int(imdb_str) if (imdb_str.isdigit() and not meta.get('imdb_manual')) else 0 # NFO Download Handling - if not meta.get('nfo'): + if not meta.get('nfo') and not meta.get('emby', False): if first_result.get("hasNFO") == "yes": try: release = first_result['release'] release_lower = release.lower() + + release_details_url = f"https://api.srrdb.com/v1/details/%7Brelease%7D" + release_details_response = requests.get(release_details_url, timeout=30) + if release_details_response.status_code == 200: + try: + release_details_dict = release_details_response.json() + for file in release_details_dict['files']: + if file['name'].endswith('.nfo'): + release_lower = os.path.splitext(file['name'])[0] + except (KeyError, ValueError): + pass + nfo_url = f"https://www.srrdb.com/download/file/%7Brelease%7D/%7Brelease_lower%7D.nfo" # Define path and create directory save_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid']) os.makedirs(save_path, exist_ok=True) nfo_file_path = os.path.join(save_path, f"{release_lower}.nfo") + meta['scene_nfo_file'] = nfo_file_path # Download the NFO file nfo_response = requests.get(nfo_url, timeout=30) @@ -69,7 +85,7 @@ async def is_scene(video, meta, imdb=None, lower=False): except Exception as e: console.print(f"[yellow]SRRDB: No match found, or request has timed out: {e}") - elif not scene and lower: + elif not scene and lower and not meta.get('emby_debug', False): release_name = None name = meta.get('filename', None).replace(" ", ".") tag = meta.get('tag', None).replace("-", "") @@ -120,4 +136,50 @@ async def is_scene(video, meta, imdb=None, lower=False): console.print(f"[yellow]SRRDB search failed: {e}") return None + check_predb = config['DEFAULT'].get('check_predb', False) + if not scene and check_predb and not meta.get('emby_debug', False): + if meta['debug']: + console.print("[yellow]SRRDB: No scene match found, checking predb") + scene = await predb_check(meta, video) + return video, scene, imdb + + +async def predb_check(meta, video): + url = f"https://predb.pw/search.php?search={urllib.parse.quote(os.path.basename(video))}" + if meta['debug']: + console.print("Using predb url", url) + try: + response = requests.get(url, timeout=10) + if response.status_code == 200: + soup = BeautifulSoup(response.text, "lxml") + found = False + video_base = os.path.basename(video).lower() + for row in soup.select('table.zebra-striped tbody tr'): + tds = row.find_all('td') + if len(tds) >= 3: + # The 3rd contains the release name link + release_a = tds[2].find('a', title=True) + if release_a: + release_name = release_a['title'].strip().lower() + if meta['debug']: + console.print(f"[yellow]Predb: Checking {release_name} against {video_base}") + if release_name == video_base: + found = True + meta['scene_name'] = release_a['title'].strip() + console.print("[green]Predb: Match found") + # The 4th contains the group + if len(tds) >= 4: + group_a = tds[3].find('a') + if group_a: + meta['tag'] = group_a.text.strip() + return True + if not found: + console.print("[yellow]Predb: No match found") + return False + else: + console.print(f"[red]Predb: Error {response.status_code} while checking") + return False + except requests.RequestException as e: + console.print(f"[red]Predb: Request failed: {e}") + return False diff --git a/src/languages.py b/src/languages.py index b63d14aed..632860cdc 100644 --- a/src/languages.py +++ b/src/languages.py @@ -1,10 +1,113 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import aiofiles -import os import cli_ui +import langcodes +import os import re +import sys + +from src.cleanup import cleanup, reset_terminal from src.console import console +async def parse_blu_ray(meta): + try: + bd_summary_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" + if not os.path.exists(bd_summary_file): + console.print(f"[yellow]BD_SUMMARY_00.txt not found at {bd_summary_file}[/yellow]") + return {} + + async with aiofiles.open(bd_summary_file, 'r', encoding='utf-8') as f: + content = await f.read() + except Exception as e: + console.print(f"[red]Error reading BD_SUMMARY file: {e}[/red]") + return {} + + parsed_data = { + 'disc_info': {}, + 'playlist_info': {}, + 'video': {}, + 'audio': [], + 'subtitles': [] + } + + lines = content.strip().split('\n') + + for line in lines: + line = line.strip() + if not line: + continue + + if ':' in line: + key, value = line.split(':', 1) + key = key.strip() + value = value.strip() + + if key in ['Disc Title', 'Disc Label', 'Disc Size', 'Protection']: + parsed_data['disc_info'][key.lower().replace(' ', '_')] = value + + elif key in ['Playlist', 'Size', 'Length', 'Total Bitrate']: + parsed_data['playlist_info'][key.lower().replace(' ', '_')] = value + + elif key == 'Video': + video_parts = [part.strip() for part in value.split('/')] + if len(video_parts) >= 6: + parsed_data['video'] = { + 'format': video_parts[0], + 'bitrate': video_parts[1], + 'resolution': video_parts[2], + 'framerate': video_parts[3], + 'aspect_ratio': video_parts[4], + 'profile': video_parts[5] + } + else: + parsed_data['video']['format'] = value + + elif key == 'Audio' or (key.startswith('*') and 'Audio' in key): + is_commentary = key.startswith('*') + audio_parts = [part.strip() for part in value.split('/')] + + audio_track = { + 'is_commentary': is_commentary + } + + if len(audio_parts) >= 1: + audio_track['language'] = audio_parts[0] + if len(audio_parts) >= 2: + audio_track['format'] = audio_parts[1] + if len(audio_parts) >= 3: + audio_track['channels'] = audio_parts[2] + if len(audio_parts) >= 4: + audio_track['sample_rate'] = audio_parts[3] + if len(audio_parts) >= 5: + bitrate_str = audio_parts[4].strip() + bitrate_match = re.search(r'(\d+)\s*kbps', bitrate_str) + if bitrate_match: + audio_track['bitrate_num'] = int(bitrate_match.group(1)) + audio_track['bitrate'] = bitrate_str + if len(audio_parts) >= 6: + audio_track['bit_depth'] = audio_parts[5].split('(')[0].strip() + + parsed_data['audio'].append(audio_track) + + elif key == 'Subtitle' or (key.startswith('*') and 'Subtitle' in key): + is_commentary = key.startswith('*') + subtitle_parts = [part.strip() for part in value.split('/')] + + subtitle_track = { + 'is_commentary': is_commentary + } + + if len(subtitle_parts) >= 1: + subtitle_track['language'] = subtitle_parts[0] + if len(subtitle_parts) >= 2: + subtitle_track['bitrate'] = subtitle_parts[1] + + parsed_data['subtitles'].append(subtitle_track) + + return parsed_data + + async def parsed_mediainfo(meta): try: mediainfo_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" @@ -73,6 +176,8 @@ async def parsed_mediainfo(meta): async def process_desc_language(meta, desc=None, tracker=None): + if 'language_checked' not in meta: + meta['language_checked'] = False if 'tracker_status' not in meta: meta['tracker_status'] = {} if tracker not in meta['tracker_status']: @@ -85,6 +190,12 @@ async def process_desc_language(meta, desc=None, tracker=None): meta['no_subs'] = False if 'write_hc_languages' not in meta: meta['write_hc_languages'] = False + if 'write_audio_languages' not in meta: + meta['write_audio_languages'] = False + if 'write_subtitle_languages' not in meta: + meta['write_subtitle_languages'] = False + if 'write_hc_languages' not in meta: + meta['write_hc_languages'] = False if not meta['is_disc'] == "BDMV": try: parsed_info = await parsed_mediainfo(meta) @@ -104,55 +215,121 @@ async def process_desc_language(meta, desc=None, tracker=None): meta['subtitle_languages'] = [] if not audio_languages or not subtitle_languages: if not meta.get('unattended_audio_skip', False) and (not audio_languages or audio_languages is None): - for audio_track in parsed_info.get('audio', []): - if 'language' not in audio_track: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - console.print("No audio language/s found, you must enter (comma-separated) languages") + found_any_language = False + tracks_without_language = [] + + for track_index, audio_track in enumerate(parsed_info.get('audio', []), 1): + language_found = None + + # Skip commentary tracks + if "title" in audio_track and "commentary" in audio_track['title'].lower(): + if meta['debug']: + console.print(f"Skipping commentary track: {audio_track['title']}") + continue + + if 'language' in audio_track: + language_found = audio_track['language'] + + if not language_found and 'title' in audio_track: + if meta['debug']: + console.print(f"Attempting to extract language from title: {audio_track['title']}") + title_language = extract_language_from_title(audio_track['title']) + if title_language: + language_found = title_language + console.print(f"Extracted language: {title_language}") + + if language_found: + meta['audio_languages'].append(language_found) + found_any_language = True + else: + + track_info = f"Track #{track_index}" + if 'title' in audio_track: + track_info += f" (Title: {audio_track['title']})" + tracks_without_language.append(track_info) + + if not found_any_language: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print("No audio language/s found for the following tracks:") + for track_info in tracks_without_language: + console.print(f" - {track_info}") + console.print("You must enter (comma-separated) languages") + try: audio_lang = cli_ui.ask_string('for all audio tracks, eg: English, Spanish:') - if audio_lang: - audio_languages.extend([lang.strip() for lang in audio_lang.split(',')]) - meta['audio_languages'] = audio_languages - meta['write_audio_languages'] = True - else: - meta['audio_languages'] = None - meta['unattended_audio_skip'] = True - meta['tracker_status'][tracker]['skip_upload'] = True + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + if audio_lang: + audio_languages.extend([lang.strip() for lang in audio_lang.split(',')]) + meta['audio_languages'] = audio_languages + meta['write_audio_languages'] = True else: + meta['audio_languages'] = None meta['unattended_audio_skip'] = True meta['tracker_status'][tracker]['skip_upload'] = True else: - if "title" in audio_track and "commentary" not in audio_track['title']: - meta['audio_languages'].append(audio_track['language']) - elif "title" not in audio_track: - meta['audio_languages'].append(audio_track['language']) - if meta['audio_languages']: - meta['audio_languages'] = [lang.split()[0] for lang in meta['audio_languages']] + meta['unattended_audio_skip'] = True + meta['tracker_status'][tracker]['skip_upload'] = True + if meta['debug']: + meta['audio_languages'] = ['English, Portuguese'] + + if meta['audio_languages']: + meta['audio_languages'] = [lang.split()[0] for lang in meta['audio_languages']] if (not meta.get('unattended_subtitle_skip', False) or not meta.get('unattended_audio_skip', False)) and (not subtitle_languages or subtitle_languages is None): if 'text' in parsed_info: - for text_track in parsed_info.get('text', []): + tracks_without_language = [] + + for track_index, text_track in enumerate(parsed_info.get('text', []), 1): if 'language' not in text_track: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - console.print("No subtitle language/s found, you must enter (comma-separated) languages") + track_info = f"Track #{track_index}" + if 'title' in text_track: + track_info += f" (Title: {text_track['title']})" + tracks_without_language.append(track_info) + else: + meta['subtitle_languages'].append(text_track['language']) + + if tracks_without_language: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print("No subtitle language/s found for the following tracks:") + for track_info in tracks_without_language: + console.print(f" - {track_info}") + console.print("You must enter (comma-separated) languages") + try: subtitle_lang = cli_ui.ask_string('for all subtitle tracks, eg: English, Spanish:') - if subtitle_lang: - subtitle_languages.extend([lang.strip() for lang in subtitle_lang.split(',')]) - meta['subtitle_languages'] = subtitle_languages - meta['write_subtitle_languages'] = True - else: - meta['subtitle_languages'] = None - meta['unattended_subtitle_skip'] = True - meta['tracker_status'][tracker]['skip_upload'] = True + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + if subtitle_lang: + subtitle_languages.extend([lang.strip() for lang in subtitle_lang.split(',')]) + meta['subtitle_languages'] = subtitle_languages + meta['write_subtitle_languages'] = True else: + meta['subtitle_languages'] = None meta['unattended_subtitle_skip'] = True meta['tracker_status'][tracker]['skip_upload'] = True else: - meta['subtitle_languages'].append(text_track['language']) - if meta['subtitle_languages']: - meta['subtitle_languages'] = [lang.split()[0] for lang in meta['subtitle_languages']] + meta['unattended_subtitle_skip'] = True + meta['tracker_status'][tracker]['skip_upload'] = True + if meta['debug']: + meta['subtitle_languages'] = ['English, Portuguese'] + + if meta['subtitle_languages']: + meta['subtitle_languages'] = [lang.split()[0] for lang in meta['subtitle_languages']] + if meta.get('hardcoded-subs', False): - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - hc_lang = cli_ui.ask_string("What language/s are the hardcoded subtitles?") + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + try: + hc_lang = cli_ui.ask_string("What language/s are the hardcoded subtitles?") + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) if hc_lang: meta['subtitle_languages'] = [hc_lang] meta['write_hc_languages'] = True @@ -166,20 +343,15 @@ async def process_desc_language(meta, desc=None, tracker=None): if 'text' not in parsed_info and not meta.get('hardcoded-subs', False): meta['no_subs'] = True - if meta['audio_languages'] and meta['write_audio_languages'] and desc is not None: - await desc.write(f"[code]Audio Language/s: {', '.join(meta['audio_languages'])}[/code]\n") - - if meta['subtitle_languages'] and meta['write_subtitle_languages'] and desc is not None: - await desc.write(f"[code]Subtitle Language/s: {', '.join(meta['subtitle_languages'])}[/code]\n") - if meta['subtitle_languages'] and meta['write_hc_languages'] and desc is not None: - await desc.write(f"[code]Hardcoded Subtitle Language/s: {', '.join(meta['subtitle_languages'])}[/code]\n") - except Exception as e: console.print(f"[red]Error processing mediainfo languages: {e}[/red]") + meta['language_checked'] = True return desc if desc is not None else None elif meta['is_disc'] == "BDMV": + if "language_checked" not in meta: + meta['language_checked'] = False if 'bluray_audio_skip' not in meta: meta['bluray_audio_skip'] = False audio_languages = [] @@ -187,9 +359,19 @@ async def process_desc_language(meta, desc=None, tracker=None): audio_languages = meta['audio_languages'] else: meta['audio_languages'] = [] + if meta.get('subtitle_languages'): + subtitle_languages = meta['subtitle_languages'] + else: + meta['subtitle_languages'] = [] try: - bdinfo = meta.get('bdinfo', {}) - audio_tracks = bdinfo.get("audio", []) + bluray = await parse_blu_ray(meta) + audio_tracks = bluray.get("audio", []) + commentary_tracks = [track for track in audio_tracks if track.get("is_commentary")] + if commentary_tracks: + for track in commentary_tracks: + if meta['debug']: + console.print(f"Skipping commentary track: {track}") + audio_tracks.remove(track) audio_languages = {track.get("language", "") for track in audio_tracks if "language" in track} for track in audio_tracks: bitrate_str = track.get("bitrate", "") @@ -209,15 +391,27 @@ async def process_desc_language(meta, desc=None, tracker=None): lang = track.get("language", "") if bitrate_num is not None and bitrate_num < 258: if lang and lang in audio_languages and len(lang) > 1 and not meta['bluray_audio_skip']: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): console.print(f"Audio track '{lang}' has a bitrate of {bitrate_num} kbps. Probably commentary and should be removed.") - if cli_ui.ask_yes_no(f"Remove '{lang}' from audio languages?", default=True): - audio_languages.discard(lang) if isinstance(audio_languages, set) else audio_languages.remove(lang) + try: + if cli_ui.ask_yes_no(f"Remove '{lang}' from audio languages?", default=True): + audio_languages.discard(lang) if isinstance(audio_languages, set) else audio_languages.remove(lang) + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) else: audio_languages.discard(lang) if isinstance(audio_languages, set) else audio_languages.remove(lang) meta['bluray_audio_skip'] = True - subtitle_tracks = bdinfo.get("subtitles", []) + subtitle_tracks = bluray.get("subtitles", []) + sub_commentary_tracks = [track for track in subtitle_tracks if track.get("is_commentary")] + if sub_commentary_tracks: + for track in sub_commentary_tracks: + if meta['debug']: + console.print(f"Skipping commentary subtitle track: {track}") + subtitle_tracks.remove(track) if subtitle_tracks and isinstance(subtitle_tracks[0], dict): subtitle_languages = {track.get("language", "") for track in subtitle_tracks if "language" in track} else: @@ -229,9 +423,11 @@ async def process_desc_language(meta, desc=None, tracker=None): except Exception as e: console.print(f"[red]Error processing BDInfo languages: {e}[/red]") + meta['language_checked'] = True return desc if desc is not None else None else: + meta['language_checked'] = True return desc if desc is not None else None @@ -242,3 +438,22 @@ async def has_english_language(languages): if not languages: return False return any('english' in lang.lower() for lang in languages) + + +def extract_language_from_title(title): + """Extract language from title field using langcodes library""" + if not title: + return None + + title_lower = title.lower() + words = re.findall(r'\b[a-zA-Z]+\b', title_lower) + + for word in words: + try: + lang = langcodes.find(word) + if lang and lang.is_valid(): + return lang.display_name() + except (langcodes.LanguageTagError, LookupError): + continue + + return None diff --git a/src/manualpackage.py b/src/manualpackage.py index 8d179cef8..2b55fa343 100644 --- a/src/manualpackage.py +++ b/src/manualpackage.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import shutil import requests import os diff --git a/src/metadata_searching.py b/src/metadata_searching.py index 6a11d1646..c38576119 100644 --- a/src/metadata_searching.py +++ b/src/metadata_searching.py @@ -1,13 +1,19 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import re import asyncio +from data.config import config from src.console import console from src.tvmaze import search_tvmaze, get_tvmaze_episode_data from src.imdb import get_imdb_info_api -from src.tmdb import tmdb_other_meta, get_tmdb_from_imdb, get_episode_details -from src.tvdb import get_tvdb_episode_data, get_tvdb_series_data, get_tvdb_series_episodes, get_tvdb_series +from src.tmdb import tmdb_other_meta, get_tmdb_from_imdb, get_episode_details, get_season_details +from src.tvdb import tvdb_data +tvdb_handler = tvdb_data(config) -async def all_ids(meta, tvdb_api=None, tvdb_token=None): + +async def all_ids(meta): + if meta['debug']: + console.print("[yellow]Starting metadata retrieval with all IDs present[/yellow]") # Create a list of all tasks to run in parallel all_tasks = [ # Core metadata tasks @@ -34,24 +40,18 @@ async def all_ids(meta, tvdb_api=None, tvdb_token=None): ) ] + # Always add get_tvdb_episodes for TV category + if meta.get('category') == 'TV': + tvdb_episodes_task = tvdb_handler.get_tvdb_episodes( + meta['tvdb_id'], + meta.get('debug', False) + ) + all_tasks.append(tvdb_episodes_task) + # Add episode-specific tasks if this is a TV show with episodes if (meta['category'] == 'TV' and not meta.get('tv_pack', False) and 'season_int' in meta and 'episode_int' in meta and meta.get('episode_int') != 0): - # Add TVDb task if we have credentials - if tvdb_api and tvdb_token: - all_tasks.append( - get_tvdb_episode_data( - meta['base_dir'], - tvdb_token, - meta.get('tvdb_id'), - meta.get('season_int'), - meta.get('episode_int'), - api_key=tvdb_api, - debug=meta.get('debug', False) - ) - ) - # Add TVMaze episode details task all_tasks.append( get_tvmaze_episode_data( @@ -69,17 +69,15 @@ async def all_ids(meta, tvdb_api=None, tvdb_token=None): debug=meta.get('debug', False) ) ) - elif meta.get('category') == 'TV' and meta.get('tv_pack', False): - if tvdb_api and tvdb_token: - all_tasks.append( - get_tvdb_series_data( - meta['base_dir'], - tvdb_token, - meta.get('tvdb_id'), - api_key=tvdb_api, - debug=meta.get('debug', False) - ) + elif meta['category'] == 'TV' and meta.get('tv_pack', False) and 'season_int' in meta: + # For TV packs, we might want to get season details instead + all_tasks.append( + get_season_details( + meta.get('tmdb_id'), + meta.get('season_int'), + debug=meta.get('debug', False) ) + ) # Execute all tasks in parallel try: @@ -105,7 +103,6 @@ async def all_ids(meta, tvdb_api=None, tvdb_token=None): # Process IMDB info if isinstance(imdb_info, dict): meta['imdb_info'] = imdb_info - meta['tv_year'] = imdb_info.get('tv_year', None) elif isinstance(imdb_info, Exception): console.print(f"[red]IMDb API call failed: {imdb_info}[/red]") @@ -114,78 +111,31 @@ async def all_ids(meta, tvdb_api=None, tvdb_token=None): console.print("[red]Unexpected IMDb response, setting imdb_info to empty.[/red]") meta['imdb_info'] = {} - # Process episode data if this is a TV show - if meta['category'] == 'TV' and not meta.get('tv_pack', False) and meta.get('episode_int', 0) != 0: - # Process TVDb episode data (if included) - if tvdb_api and tvdb_token: - tvdb_episode_data = results[result_index] - result_index += 1 - - if tvdb_episode_data and not isinstance(tvdb_episode_data, Exception): - meta['tvdb_episode_data'] = tvdb_episode_data + # Process TVDB episodes data if it was requested for TV category + if meta.get('category') == 'TV': + tvdb_episode_data = results[result_index] + result_index += 1 + if tvdb_episode_data and not isinstance(tvdb_episode_data, Exception): + # tvdb_episode_data is a tuple: (episodes_list, series_name) + if isinstance(tvdb_episode_data, tuple) and len(tvdb_episode_data) == 2: + episodes_data, series_name = tvdb_episode_data + meta['tvdb_episode_data'] = episodes_data + if series_name: + meta['tvdb_series_name'] = series_name meta['we_checked_tvdb'] = True + else: + console.print(f"[yellow]Unexpected TVDb data format: {type(tvdb_episode_data)}[/yellow]") + elif isinstance(tvdb_episode_data, Exception): + console.print(f"[yellow]TVDb episode data retrieval failed: {tvdb_episode_data}[/yellow]") - # Process episode name - if meta['tvdb_episode_data'].get('episode_name'): - episode_name = meta['tvdb_episode_data'].get('episode_name') - if episode_name and isinstance(episode_name, str) and episode_name.strip(): - if 'episode' in episode_name.lower(): - meta['auto_episode_title'] = None - meta['tvdb_episode_title'] = None - else: - meta['tvdb_episode_title'] = episode_name.strip() - meta['auto_episode_title'] = episode_name.strip() - else: - meta['auto_episode_title'] = None - - # Process overview - if meta['tvdb_episode_data'].get('overview'): - overview = meta['tvdb_episode_data'].get('overview') - if overview and isinstance(overview, str) and overview.strip(): - meta['overview_meta'] = overview.strip() - else: - meta['overview_meta'] = None - else: - meta['overview_meta'] = None - - # Process season and episode numbers - if meta['tvdb_episode_data'].get('season_name'): - meta['tvdb_season_name'] = meta['tvdb_episode_data'].get('season_name') - - if meta['tvdb_episode_data'].get('season_number'): - meta['tvdb_season_number'] = meta['tvdb_episode_data'].get('season_number') - - if meta['tvdb_episode_data'].get('episode_number'): - meta['tvdb_episode_number'] = meta['tvdb_episode_data'].get('episode_number') - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('series_name'): - year = meta['tvdb_episode_data'].get('series_name') - year_match = re.search(r'\b(19\d\d|20[0-3]\d)\b', year) - if year_match: - meta['search_year'] = year_match.group(0) - else: - meta['search_year'] = "" - - elif isinstance(tvdb_episode_data, Exception): - console.print(f"[yellow]TVDb episode data retrieval failed: {tvdb_episode_data}") - + # Process episode data if this is a TV show + if meta['category'] == 'TV' and not meta.get('tv_pack', False) and meta.get('episode_int', 0) != 0: # Process TVMaze episode data tvmaze_episode_data = results[result_index] result_index += 1 if not isinstance(tvmaze_episode_data, Exception) and tvmaze_episode_data: meta['tvmaze_episode_data'] = tvmaze_episode_data - - # Only set title if not already set - if meta.get('auto_episode_title') is None and tvmaze_episode_data.get('name') is not None: - if 'episode' in tvmaze_episode_data.get('name', '').lower(): - meta['auto_episode_title'] = None - else: - meta['auto_episode_title'] = tvmaze_episode_data['name'] - - # Only set overview if not already set - if meta.get('overview_meta') is None and tvmaze_episode_data.get('overview') is not None: - meta['overview_meta'] = tvmaze_episode_data.get('overview', None) meta['we_asked_tvmaze'] = True elif isinstance(tvmaze_episode_data, Exception): console.print(f"[yellow]TVMaze episode data retrieval failed: {tvmaze_episode_data}") @@ -197,36 +147,24 @@ async def all_ids(meta, tvdb_api=None, tvdb_token=None): if not isinstance(tmdb_episode_data, Exception) and tmdb_episode_data: meta['tmdb_episode_data'] = tmdb_episode_data meta['we_checked_tmdb'] = True - - # Only set title if not already set - if meta.get('auto_episode_title') is None and tmdb_episode_data.get('name') is not None: - if 'episode' in tmdb_episode_data.get('name', '').lower(): - meta['auto_episode_title'] = None - else: - meta['auto_episode_title'] = tmdb_episode_data['name'] - - # Only set overview if not already set - if meta.get('overview_meta') is None and tmdb_episode_data.get('overview') is not None: - meta['overview_meta'] = tmdb_episode_data.get('overview', None) elif isinstance(tmdb_episode_data, Exception): console.print(f"[yellow]TMDb episode data retrieval failed: {tmdb_episode_data}") - elif meta.get('category') == 'TV' and meta.get('tv_pack', False): - if tvdb_api and tvdb_token: - # Process TVDb series data - tvdb_series_data = results[result_index] - result_index += 1 + elif meta['category'] == 'TV' and meta.get('tv_pack', False) and 'season_int' in meta: + # Process TMDb season data for TV packs + tmdb_season_data = results[result_index] + result_index += 1 - if tvdb_series_data and not isinstance(tvdb_series_data, Exception): - meta['tvdb_series_name'] = tvdb_series_data - meta['we_checked_tvdb'] = True + if not isinstance(tmdb_season_data, Exception) and tmdb_season_data: + meta['tmdb_season_data'] = tmdb_season_data + meta['we_checked_tmdb'] = True + elif isinstance(tmdb_season_data, Exception): + console.print(f"[yellow]TMDb season data retrieval failed: {tmdb_season_data}[/yellow]") - elif isinstance(tvdb_series_data, Exception): - console.print(f"[yellow]TVDb series data retrieval failed: {tvdb_series_data}") return meta -async def imdb_tmdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): +async def imdb_tmdb_tvdb(meta, filename): if meta['debug']: console.print("[yellow]IMDb, TMDb, and TVDb IDs are all present[/yellow]") # Core metadata tasks that run in parallel @@ -266,30 +204,31 @@ async def imdb_tmdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): # Filter out None tasks tasks = [task for task in tasks if task is not None] - if (meta.get('category') == 'TV' and not meta.get('tv_pack', False) and - 'season_int' in meta and 'episode_int' in meta and meta.get('episode_int') != 0): + if meta.get('category') == 'TV': + tvdb_task = tvdb_handler.get_tvdb_episodes( + meta['tvdb_id'], + meta.get('debug', False) + ) + tasks.append(tvdb_task) - if tvdb_api and tvdb_token: - tvdb_task = get_tvdb_episode_data( - meta['base_dir'], tvdb_token, meta.get('tvdb_id'), - meta.get('season_int'), meta.get('episode_int'), - api_key=tvdb_api, debug=meta.get('debug', False) + if not meta.get('tv_pack', False) and 'season_int' in meta and 'episode_int' in meta and meta.get('episode_int') != 0: + # Add TMDb episode details task + tmdb_episode_task = get_episode_details( + meta.get('tmdb_id'), + meta.get('season_int'), + meta.get('episode_int'), + debug=meta.get('debug', False) ) - tasks.append(tvdb_task) + tasks.append(tmdb_episode_task) - tasks.append( - get_episode_details( - meta.get('tmdb_id'), meta.get('season_int'), meta.get('episode_int'), + if meta.get('tv_pack') and 'season_int' in meta: + # For TV packs, we might want to get season details instead + tmdb_season_task = get_season_details( + meta.get('tmdb_id'), + meta.get('season_int'), debug=meta.get('debug', False) ) - ) - - elif meta.get('category') == 'TV' and meta.get('tv_pack', False) and tvdb_api and tvdb_token: - tvdb_series_task = get_tvdb_series_data( - meta['base_dir'], tvdb_token, meta.get('tvdb_id'), - api_key=tvdb_api, debug=meta.get('debug', False) - ) - tasks.append(tvdb_series_task) + tasks.append(tmdb_season_task) # Execute all tasks in parallel results = await asyncio.gather(*tasks, return_exceptions=True) @@ -309,7 +248,6 @@ async def imdb_tmdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): result_index += 1 if isinstance(imdb_info, dict): meta['imdb_info'] = imdb_info - meta['tv_year'] = imdb_info.get('tv_year', None) elif isinstance(imdb_info, Exception): console.print(f"[red]IMDb API call failed: {imdb_info}[/red]") @@ -328,56 +266,27 @@ async def imdb_tmdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): console.print(f"[yellow]TVMaze ID retrieval failed: {tvmaze_id}[/yellow]") meta['tvmaze_id'] = 0 - if meta.get('category') == 'TV' and not meta.get('tv_pack', False) and meta.get('episode_int') != 0: - if tvdb_api and tvdb_token and len(results) > result_index: + if meta.get('category') == 'TV': + if len(results) > result_index: tvdb_episode_data = results[result_index] result_index += 1 if tvdb_episode_data and not isinstance(tvdb_episode_data, Exception): - meta['tvdb_episode_data'] = tvdb_episode_data - meta['we_checked_tvdb'] = True - - if meta['tvdb_episode_data'].get('episode_name'): - episode_name = meta['tvdb_episode_data'].get('episode_name') - if episode_name and isinstance(episode_name, str) and episode_name.strip(): - if 'episode' in episode_name.lower(): - meta['auto_episode_title'] = None - meta['tvdb_episode_title'] = None - else: - meta['tvdb_episode_title'] = episode_name.strip() - meta['auto_episode_title'] = episode_name.strip() - else: - meta['auto_episode_title'] = None - - if meta['tvdb_episode_data'].get('overview'): - overview = meta['tvdb_episode_data'].get('overview') - if overview and isinstance(overview, str) and overview.strip(): - meta['overview_meta'] = overview.strip() - else: - meta['overview_meta'] = None + # tvdb_episode_data is a tuple: (episodes_list, series_name) + if isinstance(tvdb_episode_data, tuple) and len(tvdb_episode_data) == 2: + episodes_data, series_name = tvdb_episode_data + meta['tvdb_episode_data'] = episodes_data + if series_name: + meta['tvdb_series_name'] = series_name + meta['we_checked_tvdb'] = True else: - meta['overview_meta'] = None - - if meta['tvdb_episode_data'].get('season_name'): - meta['tvdb_season_name'] = meta['tvdb_episode_data'].get('season_name') - - if meta['tvdb_episode_data'].get('season_number'): - meta['tvdb_season_number'] = meta['tvdb_episode_data'].get('season_number') - - if meta['tvdb_episode_data'].get('episode_number'): - meta['tvdb_episode_number'] = meta['tvdb_episode_data'].get('episode_number') - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('series_name'): - year = meta['tvdb_episode_data'].get('series_name') - year_match = re.search(r'\b(19\d\d|20[0-3]\d)\b', year) - if year_match: - meta['search_year'] = year_match.group(0) - else: - meta['search_year'] = "" + console.print(f"[yellow]Unexpected TVDb data format: {type(tvdb_episode_data)}[/yellow]") elif isinstance(tvdb_episode_data, Exception): console.print(f"[yellow]TVDb episode data retrieval failed: {tvdb_episode_data}[/yellow]") - if len(results) > result_index: + # Process TMDb episode data only if we added that task + if (not meta.get('tv_pack', False) and 'season_int' in meta and + 'episode_int' in meta and meta.get('episode_int') != 0 and len(results) > result_index): tmdb_episode_data = results[result_index] result_index += 1 @@ -385,31 +294,25 @@ async def imdb_tmdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): meta['tmdb_episode_data'] = tmdb_episode_data meta['we_checked_tmdb'] = True - if meta.get('auto_episode_title') is None and tmdb_episode_data.get('name') is not None: - if 'episode' in tmdb_episode_data.get('name', '').lower(): - meta['auto_episode_title'] = None - else: - meta['auto_episode_title'] = tmdb_episode_data['name'] - - if meta.get('overview_meta') is None and tmdb_episode_data.get('overview') is not None: - meta['overview_meta'] = tmdb_episode_data.get('overview', None) elif isinstance(tmdb_episode_data, Exception): console.print(f"[yellow]TMDb episode data retrieval failed: {tmdb_episode_data}[/yellow]") - elif meta.get('category') == 'TV' and meta.get('tv_pack', False) and tvdb_api and tvdb_token: - tvdb_series_data = results[result_index] - result_index += 1 + # Process TMDb season data for TV packs + elif (meta.get('tv_pack', False) and 'season_int' in meta and len(results) > result_index): + tmdb_season_data = results[result_index] + result_index += 1 - if tvdb_series_data and not isinstance(tvdb_series_data, Exception): - meta['tvdb_series_name'] = tvdb_series_data - meta['we_checked_tvdb'] = True - elif isinstance(tvdb_series_data, Exception): - console.print(f"[yellow]TVDb series data retrieval failed: {tvdb_series_data}[/yellow]") + if not isinstance(tmdb_season_data, Exception) and tmdb_season_data: + meta['tmdb_season_data'] = tmdb_season_data + meta['we_checked_tmdb'] = True + + elif isinstance(tmdb_season_data, Exception): + console.print(f"[yellow]TMDb season data retrieval failed: {tmdb_season_data}[/yellow]") return meta -async def imdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): +async def imdb_tvdb(meta, filename): if meta['debug']: console.print("[yellow]Both IMDb and TVDB IDs are present[/yellow]") tasks = [ @@ -436,81 +339,12 @@ async def imdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): ) ] - # Add TVDb tasks if we have credentials and it's a TV show with episodes - add_tvdb_tasks = ( - tvdb_api and tvdb_token and - 'season_int' in meta and 'episode_int' in meta and - meta.get('category') == 'TV' and - not meta.get('tv_pack', False) and - meta.get('episode_int') != 0 - ) - - if add_tvdb_tasks: - tvdb_episode_data = await get_tvdb_episode_data( - meta['base_dir'], - tvdb_token, - meta.get('tvdb_id'), - meta.get('season_int'), - meta.get('episode_int'), - api_key=tvdb_api, - debug=meta.get('debug', False) - ) - - if tvdb_episode_data: - console.print("[green]TVDB episode data retrieved successfully.[/green]") - meta['tvdb_episode_data'] = tvdb_episode_data - meta['we_checked_tvdb'] = True - - # Process episode name - if meta['tvdb_episode_data'].get('episode_name'): - episode_name = meta['tvdb_episode_data'].get('episode_name') - if episode_name and isinstance(episode_name, str) and episode_name.strip(): - if 'episode' in episode_name.lower(): - meta['auto_episode_title'] = None - meta['tvdb_episode_title'] = None - else: - meta['tvdb_episode_title'] = episode_name.strip() - meta['auto_episode_title'] = episode_name.strip() - else: - meta['auto_episode_title'] = None - - # Process overview - if meta['tvdb_episode_data'].get('overview'): - overview = meta['tvdb_episode_data'].get('overview') - if overview and isinstance(overview, str) and overview.strip(): - meta['overview_meta'] = overview.strip() - else: - meta['overview_meta'] = None - else: - meta['overview_meta'] = None - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('series_name'): - year = meta['tvdb_episode_data'].get('series_name') - year_match = re.search(r'\b(19\d\d|20[0-3]\d)\b', year) - if year_match: - meta['search_year'] = year_match.group(0) - else: - meta['search_year'] = "" - - add_name_tasks = ( - tvdb_api and tvdb_token and - meta.get('category') == 'TV' and - meta.get('tv_pack', False) - ) - - if add_name_tasks: - tvdb_series_data = await get_tvdb_series_data( - meta['base_dir'], - tvdb_token, - meta.get('tvdb_id'), - api_key=tvdb_api, - debug=meta.get('debug', False) + if meta.get('category') == 'TV': + tvdb_episodes_task = tvdb_handler.get_tvdb_episodes( + meta['tvdb_id'], + meta.get('debug', False) ) - - if tvdb_series_data: - console.print("[green]TVDB series data retrieved successfully.[/green]") - meta['tvdb_series_name'] = tvdb_series_data - meta['we_checked_tvdb'] = True + tasks.append(tvdb_episodes_task) results = await asyncio.gather(*tasks, return_exceptions=True) tmdb_result, tvmaze_id, imdb_info_result = results[:3] @@ -521,7 +355,6 @@ async def imdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): if isinstance(imdb_info_result, dict): meta['imdb_info'] = imdb_info_result - meta['tv_year'] = imdb_info_result.get('tv_year', None) elif isinstance(imdb_info_result, Exception): console.print(f"[red]IMDb API call failed: {imdb_info_result}[/red]") @@ -529,6 +362,23 @@ async def imdb_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): else: console.print("[red]Unexpected IMDb response, setting imdb_info to empty.[/red]") meta['imdb_info'] = {} + + # Process TVDB episodes data if it was requested + if meta.get('category') == 'TV' and len(results) > 3: + tvdb_episode_data = results[3] + if tvdb_episode_data and not isinstance(tvdb_episode_data, Exception): + # tvdb_episode_data is a tuple: (episodes_list, series_name) + if isinstance(tvdb_episode_data, tuple) and len(tvdb_episode_data) == 2: + episodes_data, series_name = tvdb_episode_data + meta['tvdb_episode_data'] = episodes_data + if series_name: + meta['tvdb_series_name'] = series_name + meta['we_checked_tvdb'] = True + else: + console.print(f"[yellow]Unexpected TVDb data format: {type(tvdb_episode_data)}[/yellow]") + elif isinstance(tvdb_episode_data, Exception): + console.print(f"[yellow]TVDb episode data retrieval failed: {tvdb_episode_data}[/yellow]") + return meta @@ -583,6 +433,14 @@ async def imdb_tmdb(meta, filename): debug=meta.get('debug', False) ) ) + elif meta.get('tv_pack', False) and 'season_int' in meta: + coroutines.append( + get_season_details( + meta.get('tmdb_id'), + meta.get('season_int'), + debug=meta.get('debug', False) + ) + ) # Gather results results = await asyncio.gather(*coroutines, return_exceptions=True) @@ -609,7 +467,6 @@ async def imdb_tmdb(meta, filename): # Process IMDb info if isinstance(imdb_info_result, dict): meta['imdb_info'] = imdb_info_result - meta['tv_year'] = imdb_info_result.get('tv_year', None) elif isinstance(imdb_info_result, Exception): console.print(f"[red]IMDb API call failed: {imdb_info_result}[/red]") @@ -643,36 +500,51 @@ async def imdb_tmdb(meta, filename): meta['tvmaze_id'] = 0 # Process TMDb episode details if they were included - if len(results) > 3: - episode_details_result = results[3] - if isinstance(episode_details_result, dict): - meta['tmdb_episode_data'] = episode_details_result - meta['we_checked_tmdb'] = True + if not meta.get('tv_pack', False): + if len(results) > 3: + episode_details_result = results[3] + if isinstance(episode_details_result, dict): + meta['tmdb_episode_data'] = episode_details_result + meta['we_checked_tmdb'] = True + + elif isinstance(episode_details_result, Exception): + console.print(f"[red]TMDb episode details API call failed: {episode_details_result}[/red]") + else: + if 'season_int' in meta and len(results) > 3: + season_details_result = results[3] + if isinstance(season_details_result, dict): + meta['tmdb_season_data'] = season_details_result + meta['we_checked_tmdb'] = True + + elif isinstance(season_details_result, Exception): + console.print(f"[red]TMDb season details API call failed: {season_details_result}[/red]") - elif isinstance(episode_details_result, Exception): - console.print(f"[red]TMDb episode details API call failed: {episode_details_result}[/red]") return meta -async def get_tvmaze_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): - if meta['debug']: - console.print("[yellow]Both TVMaze and TVDb IDs are present[/yellow]") +async def get_tvmaze_tvdb(filename, search_year, imdb, tmdb, manual_date=None, tvmaze_manual=None, year='', debug=False, tv_movie=False): + tvdb_data = None + tvmaze = 0 + tvdb = 0 + if debug: + console.print("[yellow]Finding both TVMaze and TVDb IDs[/yellow]") # Core metadata tasks that run in parallel tasks = [ search_tvmaze( - filename, meta['search_year'], meta.get('imdb_id', 0), meta.get('tvdb_id', 0), - manual_date=meta.get('manual_date'), - tvmaze_manual=meta.get('tvmaze_manual'), - debug=meta.get('debug', False), - return_full_tuple=False + filename, search_year, imdb, 0, + manual_date=manual_date, + tvmaze_manual=tvmaze_manual, + debug=debug, + return_full_tuple=True ) ] - if tvdb_api and tvdb_token: + if (imdb and imdb != 0) or (tmdb and tmdb != 0): tasks.append( - get_tvdb_series( - meta['base_dir'], meta.get('title', ''), meta.get('year', ''), - apikey=tvdb_api, token=tvdb_token, debug=meta.get('debug', False) - ) + tvdb_handler.get_tvdb_by_external_id(imdb=imdb, tmdb=tmdb, debug=debug, tv_movie=tv_movie) + ) + else: + tasks.append( + tvdb_handler.search_tvdb_series(filename=filename, year=year, debug=debug) ) results = await asyncio.gather(*tasks, return_exceptions=True) @@ -681,138 +553,243 @@ async def get_tvmaze_tvdb(meta, filename, tvdb_api=None, tvdb_token=None): tvmaze_result = results[0] if isinstance(tvmaze_result, tuple) and len(tvmaze_result) == 3: # Handle tuple return: (tvmaze_id, imdbID, tvdbID) - tvmaze_id, imdb_id, tvdb_id = tvmaze_result - meta['tvmaze_id'] = tvmaze_id if isinstance(tvmaze_id, int) else 0 - - # Set tvdb_id if not already set and we got a valid one - if not meta.get('tvdb_id', 0) and isinstance(tvdb_id, int) and tvdb_id > 0: - meta['tvdb_id'] = tvdb_id - if meta.get('debug'): - console.print(f"[green]Set TVDb ID from TVMaze: {tvdb_id}[/green]") - if not meta.get('imdb_id', 0) and isinstance(imdb_id, str) and imdb_id.strip(): - meta['imdb_id'] = imdb_id - if meta.get('debug'): - console.print(f"[green]Set IMDb ID from TVMaze: {imdb_id}[/green]") + tvmaze = tvmaze_result[0] if isinstance(tvmaze_result[0], int) else 0 elif isinstance(tvmaze_result, int): - meta['tvmaze_id'] = tvmaze_result + tvmaze = tvmaze_result elif isinstance(tvmaze_result, Exception): console.print(f"[red]TVMaze API call failed: {tvmaze_result}[/red]") - meta['tvmaze_id'] = 0 # Set default value if an exception occurred + tvmaze = 0 # Set default value if an exception occurred else: console.print(f"[yellow]Unexpected TVMaze result type: {type(tvmaze_result)}[/yellow]") - meta['tvmaze_id'] = 0 + tvmaze = 0 # Process TVDb results if we added that task - if len(results) > 1 and tvdb_api and tvdb_token: + if (imdb and imdb != 0) or (tmdb and tmdb != 0): + tvdb = results[1] + elif len(results) > 1: tvdb_result = results[1] if tvdb_result and not isinstance(tvdb_result, Exception): - meta['tvdb_id'] = tvdb_result - if meta.get('debug'): - console.print(f"[green]Got TVDb series data: {tvdb_result}[/green]") + # Handle tuple return: (series_results, series_id) + if isinstance(tvdb_result, tuple) and len(tvdb_result) == 2: + series_results, series_id = tvdb_result + if series_id: + tvdb = int(series_id) + if debug: + console.print(f"[green]Got TVDb series ID: {series_id}[/green]") + if series_results: + tvdb_data = series_results + else: + console.print(f"[yellow]Unexpected TVDb result format: {tvdb_result}[/yellow]") elif isinstance(tvdb_result, Exception): console.print(f"[yellow]TVDb series data retrieval failed: {tvdb_result}[/yellow]") - return meta + if not tvdb and tvmaze and isinstance(tvmaze_result, tuple) and len(tvmaze_result) == 3: + tvdb = tvmaze_result[2] if isinstance(tvmaze_result[2], int) else 0 + if debug: + console.print(f"[blue]TVMaze ID: {tvmaze} | TVDb ID: {tvdb}[/blue]") + + return tvmaze, tvdb, tvdb_data -async def get_tv_data(meta, base_dir, tvdb_api=None, tvdb_token=None): +async def get_tv_data(meta): + meta['tvdb_series_name'] = None if not meta.get('tv_pack', False) and meta.get('episode_int') != 0: - if not meta.get('auto_episode_title') or not meta.get('overview_meta'): - # prioritze tvdb metadata if available - if tvdb_api and tvdb_token and not meta.get('we_checked_tvdb', False): - if meta['debug']: - console.print("[yellow]Fetching TVDb metadata...") - if meta.get('tvdb_id') and meta['tvdb_id'] != 0: - meta['tvdb_season_int'], meta['tvdb_episode_int'] = await get_tvdb_series_episodes(base_dir, tvdb_token, meta.get('tvdb_id'), meta.get('season_int'), meta.get('episode_int'), tvdb_api, debug=meta.get('debug', False)) - tvdb_episode_data = await get_tvdb_episode_data(base_dir, tvdb_token, meta['tvdb_id'], meta.get('tvdb_season_int'), meta.get('tvdb_episode_int'), api_key=tvdb_api, debug=meta.get('debug', False)) - if tvdb_episode_data: - meta['tvdb_episode_data'] = tvdb_episode_data - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('episode_name') and meta.get('auto_episode_title') is None: - episode_name = meta['tvdb_episode_data'].get('episode_name') - if episode_name and isinstance(episode_name, str) and episode_name.strip(): - if 'episode' in episode_name.lower(): - meta['auto_episode_title'] = None - meta['tvdb_episode_title'] = None - else: - meta['tvdb_episode_title'] = episode_name.strip() - meta['auto_episode_title'] = episode_name.strip() - else: - meta['auto_episode_title'] = None - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('overview') and meta.get('original_language', "") == "en": - overview = meta['tvdb_episode_data'].get('overview') - if overview and isinstance(overview, str) and overview.strip(): - meta['overview_meta'] = overview.strip() - else: - meta['overview_meta'] = None - elif meta.get('original_language') != "en": - meta['overview_meta'] = None - else: - meta['overview_meta'] = None - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('season_name'): - meta['tvdb_season_name'] = meta['tvdb_episode_data'].get('season_name') - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('season_number'): - meta['tvdb_season_number'] = meta['tvdb_episode_data'].get('season_number') - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('episode_number'): - meta['tvdb_episode_number'] = meta['tvdb_episode_data'].get('episode_number') - - if meta.get('tvdb_episode_data') and meta['tvdb_episode_data'].get('series_name'): - year = meta['tvdb_episode_data'].get('series_name') - year_match = re.search(r'\b(19\d\d|20[0-3]\d)\b', year) - if year_match: - meta['search_year'] = year_match.group(0) - else: - meta['search_year'] = "" - - # fallback to tvmaze data if tvdb data is available - if meta.get('auto_episode_title') is None or meta.get('overview_meta') is None and (not meta.get('we_asked_tvmaze', False) and meta.get('episode_overview', None)): - tvmaze_episode_data = await get_tvmaze_episode_data(meta.get('tvmaze_id'), meta.get('season_int'), meta.get('episode_int')) - if tvmaze_episode_data: - meta['tvmaze_episode_data'] = tvmaze_episode_data - if meta.get('auto_episode_title') is None and tvmaze_episode_data.get('name') is not None: - if 'episode' in tvmaze_episode_data.get("name").lower(): - meta['auto_episode_title'] = None - else: - meta['auto_episode_title'] = tvmaze_episode_data['name'] - if meta.get('overview_meta') is None and tvmaze_episode_data.get('overview') is not None: - meta['overview_meta'] = tvmaze_episode_data.get('overview', None) - - # fallback to tmdb data if no other data is not available - if (meta.get('auto_episode_title') is None or meta.get('overview_meta') is None) and (not meta.get('we_checked_tmdb', False) and meta.get('episode_overview', None)): - if 'tvdb_episode_int' in meta and meta.get('tvdb_episode_int') != 0 and meta.get('tvdb_episode_int') != meta.get('episode_int'): - episode = meta.get('episode_int') - season = meta.get('tvdb_season_int') - if meta['debug']: - console.print(f"[yellow]Using absolute episode number from TVDb: {episode}[/yellow]") - console.print(f"[yellow]Using matching season number from TVDb: {season}[/yellow]") + if (not meta.get('we_checked_tvdb', False) and not meta.get('we_asked_tvmaze', False)) and meta.get('tvmaze_id') != 0 and meta['tvmaze_id'] != 0 and not meta.get('anime', False): + meta = await get_tvdb_tvmaze_tmdb_episode_data(meta) + elif meta.get('tvdb_id', 0) and not meta.get('we_checked_tvdb', False): + tvdb_episode_data, tvdb_name = await tvdb_handler.get_tvdb_episodes(meta['tvdb_id']) + if tvdb_episode_data: + meta['tvdb_episode_data'] = tvdb_episode_data + if tvdb_name: + meta['tvdb_series_name'] = tvdb_name + + if meta.get('tvdb_series_name', None): + year = meta['tvdb_series_name'] + year_match = re.search(r'\b(19\d\d|20[0-3]\d)\b', year) + if year_match: + meta['search_year'] = year_match.group(0) + else: + meta['search_year'] = "" + + if meta.get('tvdb_episode_data', None) and meta.get('tvdb_id', 0): + try: + meta['tvdb_season_name'], meta['tvdb_episode_name'], meta['tvdb_overview'], meta['tvdb_season'], meta['tvdb_episode'], meta['tvdb_episode_year'], meta['tvdb_episode_id'] = await tvdb_handler.get_specific_episode_data(meta['tvdb_episode_data'], meta.get('season_int', None), meta.get('episode_int', None), debug=meta.get('debug', False)) + except Exception as e: + console.print(f"[red]Error fetching TVDb episode data: {e}[/red]") + + if meta.get('tvdb_episode_name', None): + if 'episode' in meta['tvdb_episode_name'].lower() or 'tba' in meta['tvdb_episode_name'].lower(): + meta['auto_episode_title'] = None + else: + meta['auto_episode_title'] = meta['tvdb_episode_name'] + if meta.get('tvdb_overview', None): + meta['overview_meta'] = meta['tvdb_overview'] + if meta.get('tvdb_season', None) is not None and meta['tvdb_season'] != meta.get('season_int', None) and not meta.get('season', None) and not meta.get('no_season', False) and not meta.get('manual_date', None): + meta['season_int'] = int(meta['tvdb_season']) + meta['season'] = f"S{meta['tvdb_season']:02d}" + if meta.get('tvdb_episode', None) is not None and meta['tvdb_episode'] != meta.get('episode_int', None) and not meta.get('episode', None) and not meta.get('manual_date', None): + meta['episode_int'] = int(meta['tvdb_episode']) + meta['episode'] = f"E{meta['tvdb_episode']:02d}" + + # fallback to tvmaze data if tvdb data is available + if 'tvmaze_episode_data' not in meta or meta['tvmaze_episode_data'] is None: + meta['tvmaze_episode_data'] = {} + tvmaze_episode_data = await get_tvmaze_episode_data(meta.get('tvmaze_id'), meta.get('season_int'), meta.get('episode_int'), meta) + if tvmaze_episode_data: + meta['tvmaze_episode_data'] = tvmaze_episode_data + if meta.get('auto_episode_title') is None or meta.get('overview_meta') is None: + if meta.get('auto_episode_title') is None and meta['tvmaze_episode_data'].get('name') is not None: + if 'episode' in meta['tvmaze_episode_data'].get("name").lower() or 'tba' in meta['tvmaze_episode_data'].get("name").lower(): + meta['auto_episode_title'] = None else: - episode = meta.get('episode_int') - season = meta.get('season_int') - if not meta.get('we_checked_tmdb', False): - if meta['debug']: - console.print("[yellow]Fetching TMDb episode metadata...") - episode_details = await get_episode_details(meta.get('tmdb_id'), season, episode, debug=meta.get('debug', False)) + meta['auto_episode_title'] = meta['tvmaze_episode_data']['name'] + if meta.get('overview_meta') is None and meta['tvmaze_episode_data'].get('overview') is not None: + meta['overview_meta'] = meta['tvmaze_episode_data'].get('overview', None) + + # fallback to tmdb data if no other data is not available + if (meta.get('auto_episode_title') is None or meta.get('overview_meta') is None) and meta.get('episode_overview', None): + if 'tvdb_episode_int' in meta and meta.get('tvdb_episode_int') != 0 and meta.get('tvdb_episode_int') != meta.get('episode_int'): + episode = meta.get('episode_int') + season = meta.get('tvdb_season_int') + if meta['debug']: + console.print(f"[yellow]Using absolute episode number from TVDb: {episode}[/yellow]") + console.print(f"[yellow]Using matching season number from TVDb: {season}[/yellow]") + else: + episode = meta.get('episode_int') + season = meta.get('season_int') + if meta['debug']: + console.print("[yellow]Fetching TMDb episode metadata...") + if not meta['tmdb_episode_data']: + episode_details = await get_episode_details(meta.get('tmdb_id'), season, episode, debug=meta.get('debug', False)) + else: + episode_details = meta.get('tmdb_episode_data', None) + if meta.get('auto_episode_title') is None and episode_details.get('name') is not None: + if 'episode' in episode_details.get("name").lower() or 'tba' in episode_details.get("name").lower(): + meta['auto_episode_title'] = None else: - episode_details = meta.get('tmdb_episode_data', None) - if meta.get('auto_episode_title') is None and episode_details.get('name') is not None: - if 'episode' in episode_details.get("name").lower(): - meta['auto_episode_title'] = None - else: - meta['auto_episode_title'] = episode_details['name'] - if meta.get('overview_meta') is None and episode_details.get('overview') is not None: - meta['overview_meta'] = episode_details.get('overview', None) - - if 'tvdb_season_int' in meta and meta['tvdb_season_int'] and meta['tvdb_episode_int'] != 0: - meta['episode_int'] = meta['tvdb_episode_int'] - meta['season_int'] = meta['tvdb_season_int'] - meta['season'] = "S" + str(meta['season_int']).zfill(2) - meta['episode'] = "E" + str(meta['episode_int']).zfill(2) + meta['auto_episode_title'] = episode_details['name'] + if meta.get('overview_meta') is None and episode_details.get('overview') is not None: + meta['overview_meta'] = episode_details.get('overview', None) + elif meta.get('tv_pack', False): - if tvdb_api and tvdb_token: - meta['tvdb_series_name'] = await get_tvdb_series_data(base_dir, tvdb_token, meta.get('tvdb_id'), tvdb_api, debug=meta.get('debug', False)) + if not meta.get('we_checked_tvdb', False) and meta.get('tvdb_id', 0): + tvdb_episode_data, tvdb_name = await tvdb_handler.get_tvdb_episodes(meta['tvdb_id']) + if tvdb_episode_data: + meta['tvdb_episode_data'] = tvdb_episode_data + if tvdb_name: + meta['tvdb_series_name'] = tvdb_name + if meta.get('tvdb_series_name', None): + year = meta['tvdb_series_name'] + year_match = re.search(r'\b(19\d\d|20[0-3]\d)\b', year) + if year_match: + meta['search_year'] = year_match.group(0) + else: + meta['search_year'] = "" + + if meta.get('tvdb_episode_data', None) and meta.get('tvdb_id', 0): + try: + meta['tvdb_season_name'], meta['tvdb_episode_name'], meta['tvdb_overview'], meta['tvdb_season'], meta['tvdb_episode'], meta['tvdb_episode_year'], meta['tvdb_episode_id'] = await tvdb_handler.get_specific_episode_data(meta['tvdb_episode_data'], meta.get('season_int', None), meta.get('episode_int', None), debug=meta.get('debug', False)) + except Exception as e: + console.print(f"[red]Error fetching TVDb episode data: {e}[/red]") + + if meta.get('tvdb_episode_id', None): + meta['tvdb_imdb_id'] = await tvdb_handler.get_imdb_id_from_tvdb_episode_id(meta['tvdb_episode_id'], debug=meta.get('debug', False)) + + return meta + + +async def get_tvdb_tvmaze_tmdb_episode_data(meta): + if meta['debug']: + console.print("[yellow]Gathering TVDb and TVMaze episode data[/yellow]") + + tasks = [] + task_map = {} # Track which tasks we added + + # Add TVMaze episode data task + if meta.get('tvmaze_id'): + if meta['debug']: + console.print("[yellow]Fetching TVMaze episode data...[/yellow]") + tasks.append( + get_tvmaze_episode_data( + meta.get('tvmaze_id'), + meta.get('season_int'), + meta.get('episode_int') + ) + ) + task_map['tvmaze'] = len(tasks) - 1 + + # Add TVDb episode data task + if meta.get('tvdb_id'): + if meta['debug']: + console.print("[yellow]Fetching TVDb episode data...[/yellow]") + tasks.append( + tvdb_handler.get_tvdb_episodes( + meta['tvdb_id'], + meta.get('debug', False) + ) + ) + task_map['tvdb'] = len(tasks) - 1 + + if meta.get('tmdb_id'): + if meta['debug']: + console.print("[yellow]Fetching TMDb episode data...[/yellow]") + tasks.append( + get_episode_details( + meta.get('tmdb_id'), + meta.get('season_int'), + meta.get('episode_int'), + debug=meta.get('debug', False) + ) + ) + task_map['tmdb'] = len(tasks) - 1 + + if not tasks: + return meta + + results = await asyncio.gather(*tasks, return_exceptions=True) + + # Process TVMaze results + if 'tvmaze' in task_map: + tvmaze_episode_data = results[task_map['tvmaze']] + if tvmaze_episode_data and not isinstance(tvmaze_episode_data, Exception): + meta['tvmaze_episode_data'] = tvmaze_episode_data + meta['we_asked_tvmaze'] = True + + if meta['debug']: + console.print("[green]TVMaze episode data retrieved successfully.[/green]") + elif isinstance(tvmaze_episode_data, Exception): + console.print(f"[yellow]TVMaze episode data retrieval failed: {tvmaze_episode_data}[/yellow]") + + # Process TVDB results + if 'tvdb' in task_map: + tvdb_episodes_result = results[task_map['tvdb']] + if tvdb_episodes_result and not isinstance(tvdb_episodes_result, Exception): + if isinstance(tvdb_episodes_result, tuple) and len(tvdb_episodes_result) == 2: + tvdb_episode_data, tvdb_name = tvdb_episodes_result + if tvdb_episode_data: + meta['tvdb_episode_data'] = tvdb_episode_data + meta['we_checked_tvdb'] = True + if meta['debug']: + console.print(f"[green]TVDb episodes list retrieved with {len(tvdb_episode_data)} episodes[/green]") + if tvdb_name: + meta['tvdb_series_name'] = tvdb_name + if meta['debug']: + console.print(f"[green]TVDb series name: {tvdb_name}[/green]") + else: + console.print(f"[yellow]Unexpected TVDb episodes result format: {tvdb_episodes_result}[/yellow]") + elif isinstance(tvdb_episodes_result, Exception): + console.print(f"[yellow]TVDb episode data retrieval failed: {tvdb_episodes_result}[/yellow]") + + # Process TMDb episode details results + if 'tmdb' in task_map: + tmdb_episode_data = results[task_map['tmdb']] + if not isinstance(tmdb_episode_data, Exception) and tmdb_episode_data: + meta['tmdb_episode_data'] = tmdb_episode_data + meta['we_checked_tmdb'] = True + if meta['debug']: + console.print("[green]TMDb episode data retrieved successfully.[/green]") + elif isinstance(tmdb_episode_data, Exception): + console.print(f"[yellow]TMDb episode data retrieval failed: {tmdb_episode_data}[/yellow]") + return meta diff --git a/src/nfo_link.py b/src/nfo_link.py new file mode 100644 index 000000000..d0d3fcb41 --- /dev/null +++ b/src/nfo_link.py @@ -0,0 +1,314 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import os +import re +import subprocess +import datetime +from src.console import console +from data.config import config + + +async def create_season_nfo(season_folder, season_number, season_year, tvdbid, tvmazeid, plot, outline): + """Create a season.nfo file in the given season folder.""" + now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + nfo_content = f''' + + + + false + {now} + Season {season_number} + {season_year} + Season {season_number} + {tvdbid} + {tvdbid} + {tvmazeid} + {tvmazeid} + {season_number} +''' + nfo_path = os.path.join(season_folder, "season.nfo") + with open(nfo_path, "w", encoding="utf-8") as f: + f.write(nfo_content) + return nfo_path + + +async def nfo_link(meta): + """Create an Emby-compliant NFO file from metadata""" + try: + # Get basic info + imdb_info = meta.get('imdb_info', {}) + title = imdb_info.get('title', meta.get('title', '')) + if meta['category'] == "MOVIE": + year = imdb_info.get('year', meta.get('year', '')) + else: + year = meta.get('search_year', '') + plot = meta.get('overview', '') + rating = imdb_info.get('rating', '') + runtime = imdb_info.get('runtime', meta.get('runtime', '')) + genres = imdb_info.get('genres', meta.get('genres', '')) + country = imdb_info.get('country', meta.get('country', '')) + aka = imdb_info.get('aka', title) # Fallback to title if no aka + tagline = imdb_info.get('plot', '') + premiered = meta.get('release_date', '') + + # IDs + imdb_id = imdb_info.get('imdbID', meta.get('imdb_id', '')).replace('tt', '') + tmdb_id = meta.get('tmdb_id', '') + tvdb_id = meta.get('tvdb_id', '') + + # Cast and crew + cast = meta.get('cast', []) + directors = meta.get('directors', []) + studios = meta.get('studios', []) + + # Build NFO XML content with proper structure + nfo_content = ''' +''' + + # Add plot with CDATA + if plot: + nfo_content += f'\n ' + + # Add tagline if available + if tagline: + nfo_content += f'\n ' + nfo_content += f'\n {tagline}' + + # Basic metadata + nfo_content += f'\n {title}' + nfo_content += f'\n {aka}' + + # Add cast/actors + for actor in cast: + name = actor.get('name', '') + role = actor.get('character', actor.get('role', '')) + tmdb_actor_id = actor.get('id', '') + if name: + nfo_content += '\n ' + nfo_content += f'\n {name}' + if role: + nfo_content += f'\n {role}' + nfo_content += '\n Actor' + if tmdb_actor_id: + nfo_content += f'\n {tmdb_actor_id}' + nfo_content += '\n ' + + # Add directors + for director in directors: + director_name = director.get('name', director) if isinstance(director, dict) else director + director_id = director.get('id', '') if isinstance(director, dict) else '' + if director_name: + nfo_content += '\n {director_name}' + + # Add rating and year + if rating: + nfo_content += f'\n {rating}' + if year: + nfo_content += f'\n {year}' + + nfo_content += f'\n {title}' + + # Add IDs + if imdb_id: + nfo_content += f'\n tt{imdb_id}' + if tvdb_id: + nfo_content += f'\n {tvdb_id}' + if tmdb_id: + nfo_content += f'\n {tmdb_id}' + + # Add dates + if premiered: + nfo_content += f'\n {premiered}' + nfo_content += f'\n {premiered}' + + # Add runtime (convert to minutes if needed) + if runtime: + # Handle runtime in different formats + runtime_minutes = runtime + if isinstance(runtime, str) and 'min' in runtime: + runtime_minutes = runtime.replace('min', '').strip() + nfo_content += f'\n {runtime_minutes}' + + # Add country + if country: + nfo_content += f'\n {country}' + + # Add genres + if genres: + if isinstance(genres, str): + genre_list = [g.strip() for g in genres.split(',')] + else: + genre_list = genres + for genre in genre_list: + if genre: + nfo_content += f'\n {genre}' + + # Add studios + for studio in studios: + studio_name = studio.get('name', studio) if isinstance(studio, dict) else studio + if studio_name: + nfo_content += f'\n {studio_name}' + + # Add unique IDs + if tmdb_id: + nfo_content += f'\n {tmdb_id}' + if imdb_id: + nfo_content += f'\n tt{imdb_id}' + if tvdb_id: + nfo_content += f'\n {tvdb_id}' + + # Add legacy ID + if imdb_id: + nfo_content += f'\n tt{imdb_id}' + + nfo_content += '\n' + + # Save NFO file + movie_name = meta.get('title', 'movie') + # Remove or replace invalid characters: < > : " | ? * \ / + movie_name = re.sub(r'[<>:"|?*\\/]', '', movie_name) + meta['linking_failed'] = False + link_dir = await linking(meta, movie_name, year) + + uuid = meta.get('uuid') + filelist = meta.get('filelist', []) + if len(filelist) == 1 and os.path.isfile(filelist[0]) and not meta.get('keep_folder'): + # Single file - create symlink in the target folder + src_file = filelist[0] + filename = os.path.splitext(os.path.basename(src_file))[0] + else: + filename = uuid + + if meta['category'] == "TV" and link_dir is not None and not meta.get('linking_failed', False): + season_number = meta.get('season_int') or meta.get('season') or "1" + season_year = meta.get('search_year') or meta.get('year') or "" + tvdbid = meta.get('tvdb_id', '') + tvmazeid = meta.get('tvmaze_id', '') + plot = meta.get('overview', '') + outline = imdb_info.get('plot', '') + + season_folder = link_dir + if not os.path.exists(f"{season_folder}/season.nfo"): + await create_season_nfo( + season_folder, season_number, season_year, tvdbid, tvmazeid, plot, outline + ) + nfo_file_path = os.path.join(season_folder, "season.nfo") + + elif link_dir is not None and not meta.get('linking_failed', False): + nfo_file_path = os.path.join(link_dir, f"{filename}.nfo") + else: + if meta.get('linking_failed', False): + console.print("[red]Linking failed, saving NFO in data/nfos[/red]") + nfo_dir = os.path.join(f"{meta['base_dir']}/data/nfos/{meta['uuid']}/") + os.makedirs(nfo_dir, exist_ok=True) + nfo_file_path = os.path.join(nfo_dir, f"{filename}.nfo") + with open(nfo_file_path, 'w', encoding='utf-8') as f: + f.write(nfo_content) + + if meta['debug']: + console.print(f"[green]Emby NFO created at {nfo_file_path}") + + return nfo_file_path + + except Exception as e: + console.print(f"[red]Failed to create Emby NFO: {e}") + return None + + +async def linking(meta, movie_name, year): + if meta['category'] == "MOVIE": + if not meta['is_disc']: + folder_name = f"{movie_name} ({year})" + elif meta['is_disc'] == "BDMV": + folder_name = f"{movie_name} ({year}) - Disc" + else: + folder_name = f"{movie_name} ({year}) - {meta['is_disc']}" + else: + if not meta.get('search_year'): + if not meta['is_disc']: + folder_name = f"{movie_name}" + elif meta['is_disc'] == "BDMV": + folder_name = f"{movie_name} - Disc" + else: + folder_name = f"{movie_name} - {meta['is_disc']}" + else: + if not meta['is_disc']: + folder_name = f"{movie_name} ({meta['search_year']})" + elif meta['is_disc'] == "BDMV": + folder_name = f"{movie_name} ({meta['search_year']}) - Disc" + else: + folder_name = f"{movie_name} ({meta['search_year']}) - {meta['is_disc']}" + + if meta['category'] == "TV": + target_base = config['DEFAULT'].get('emby_tv_dir', None) + else: + target_base = config['DEFAULT'].get('emby_dir', None) + if target_base is not None: + if meta['category'] == "MOVIE": + target_dir = os.path.join(target_base, folder_name) + else: + if meta.get('season') == 'S00': + season = "Specials" + else: + season_int = str(meta.get('season_int')).zfill(2) + season = f"Season {season_int}" + target_dir = os.path.join(target_base, folder_name, season) + + os.makedirs(target_dir, exist_ok=True) + # Get source path and files + path = meta.get('path') + filelist = meta.get('filelist', []) + + if not path: + console.print("[red]No path found in meta.") + return None + + # Handle single file vs folder content + if len(filelist) == 1 and os.path.isfile(filelist[0]) and not meta.get('keep_folder'): + # Single file - create symlink in the target folder + src_file = filelist[0] + filename = os.path.basename(src_file) + target_file = os.path.join(target_dir, filename) + + try: + cmd = f'mklink "{target_file}" "{src_file}"' + subprocess.run(cmd, check=True, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + if meta.get('debug'): + console.print(f"[green]Created symlink: {target_file}") + + except subprocess.CalledProcessError: + meta['linking_failed'] = True + + else: + # Folder content - symlink all files from the source folder + src_dir = path if os.path.isdir(path) else os.path.dirname(path) + + # Get all files in the source directory + for root, dirs, files in os.walk(src_dir): + for file in files: + src_file = os.path.join(root, file) + # Create relative path structure in target + rel_path = os.path.relpath(src_file, src_dir) + target_file = os.path.join(target_dir, rel_path) + + # Create subdirectories if needed + target_file_dir = os.path.dirname(target_file) + os.makedirs(target_file_dir, exist_ok=True) + + try: + cmd = f'mklink "{target_file}" "{src_file}"' + subprocess.run(cmd, check=True, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + if meta.get('debug'): + console.print(f"[green]Created symlink: {file}") + + except subprocess.CalledProcessError: + meta['linking_failed'] = True + + console.print(f"[green]Movie folder created: {target_dir}") + return target_dir + else: + return None diff --git a/src/prep.py b/src/prep.py index 9c9841d5b..45f2b6242 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1,41 +1,49 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -from src.console import console -from src.clients import Clients -from data.config import config -from src.tvmaze import search_tvmaze -from src.imdb import get_imdb_info_api, search_imdb -from src.tmdb import tmdb_other_meta, get_tmdb_imdb_from_mediainfo, get_tmdb_from_imdb, get_tmdb_id -from src.region import get_region, get_distributor, get_service -from src.exportmi import exportInfo, mi_resolution, validate_mediainfo -from src.getseasonep import get_season_episode -from src.get_tracker_data import get_tracker_data, ping_unit3d -from src.bluray_com import get_bluray_releases -from src.metadata_searching import all_ids, imdb_tvdb, imdb_tmdb, get_tv_data, imdb_tmdb_tvdb, get_tvdb_series, get_tvmaze_tvdb -from src.apply_overrides import get_source_override -from src.is_scene import is_scene -from src.audio import get_audio_v2 -from src.edition import get_edition -from src.video import get_video_codec, get_video_encode, get_uhd, get_hdr, get_video, get_resolution, get_type, is_3d, is_sd -from src.tags import get_tag, tag_override -from src.get_disc import get_disc, get_dvd_size -from src.get_source import get_source -from src.sonarr import get_sonarr_data -from src.radarr import get_radarr_data -from src.languages import parsed_mediainfo - try: - import traceback - import os - import re import asyncio - from guessit import guessit + import cli_ui import ntpath - from pathlib import Path + import os + import re + import sys + import traceback import time + from difflib import SequenceMatcher + from guessit import guessit + from pathlib import Path + + from data.config import config + from src.apply_overrides import get_source_override + from src.audio import get_audio_v2 + from src.bluray_com import get_bluray_releases + from src.cleanup import cleanup, reset_terminal + from src.clients import Clients + from src.console import console + from src.edition import get_edition + from src.exportmi import exportInfo, mi_resolution, validate_mediainfo, get_conformance_error + from src.get_disc import get_disc, get_dvd_size + from src.get_name import extract_title_and_year + from src.getseasonep import get_season_episode + from src.get_source import get_source + from src.get_tracker_data import get_tracker_data, ping_unit3d + from src.imdb import get_imdb_info_api, search_imdb, get_imdb_from_episode + from src.is_scene import is_scene + from src.languages import parsed_mediainfo + from src.metadata_searching import all_ids, imdb_tvdb, imdb_tmdb, get_tv_data, imdb_tmdb_tvdb, get_tvmaze_tvdb + from src.radarr import get_radarr_data + from src.region import get_region, get_distributor, get_service + from src.sonarr import get_sonarr_data + from src.tags import get_tag, tag_override + from src.tmdb import get_tmdb_imdb_from_mediainfo, get_tmdb_from_imdb, get_tmdb_id, set_tmdb_metadata + from src.tvdb import tvdb_data + from src.tvmaze import search_tvmaze + from src.video import get_video_codec, get_video_encode, get_uhd, get_hdr, get_video, get_resolution, get_type, is_3d, is_sd, get_video_duration, get_container + except ModuleNotFoundError: console.print(traceback.print_exc()) - console.print('[bold red]Missing Module Found. Please reinstall required dependancies.') + console.print('[bold red]Missing Module Found. Please reinstall required dependencies.') console.print('[yellow]pip3 install --user -U -r requirements.txt') exit() except KeyboardInterrupt: @@ -50,36 +58,37 @@ class Prep(): Database Identifiers (TMDB/IMDB/MAL/etc) Create Name """ + def __init__(self, screens, img_host, config): self.screens = screens self.config = config self.img_host = img_host.lower() + self.tvdb_handler = tvdb_data(config) async def gather_prep(self, meta, mode): + # set a timer to check speed + if meta['debug']: + meta_start_time = time.time() # set some details we'll need meta['cutoff'] = int(self.config['DEFAULT'].get('cutoff_screens', 1)) - tvdb_api_get = str(self.config['DEFAULT'].get('tvdb_api', None)) - if tvdb_api_get is None or len(tvdb_api_get) < 20: - tvdb_api = None - else: - tvdb_api = tvdb_api_get - tvdb_token_get = str(self.config['DEFAULT'].get('tvdb_token', None)) - if tvdb_token_get is None or len(tvdb_token_get) < 20: - tvdb_token = None - else: - tvdb_token = tvdb_token_get + meta['mode'] = mode meta['isdir'] = os.path.isdir(meta['path']) base_dir = meta['base_dir'] meta['saved_description'] = False client = Clients(config=config) - meta['skip_auto_torrent'] = config['DEFAULT'].get('skip_auto_torrent', False) + meta['skip_auto_torrent'] = meta.get('skip_auto_torrent', False) or config['DEFAULT'].get('skip_auto_torrent', False) hash_ids = ['infohash', 'torrent_hash', 'skip_auto_torrent'] - tracker_ids = ['ptp', 'bhd', 'btn', 'blu', 'aither', 'lst', 'oe', 'hdb', 'huno'] + tracker_ids = ['aither', 'ulcx', 'lst', 'blu', 'oe', 'btn', 'bhd', 'huno', 'hdb', 'rf', 'otw', 'yus', 'dp', 'sp', 'ptp'] use_sonarr = config['DEFAULT'].get('use_sonarr', False) use_radarr = config['DEFAULT'].get('use_radarr', False) meta['print_tracker_messages'] = config['DEFAULT'].get('print_tracker_messages', False) meta['print_tracker_links'] = config['DEFAULT'].get('print_tracker_links', True) + only_id = config['DEFAULT'].get('only_id', False) if meta.get('onlyID') is None else meta.get('onlyID') + meta['only_id'] = only_id + meta['keep_images'] = config['DEFAULT'].get('keep_images', True) if not meta.get('keep_images') else True + mkbrr_threads = config['DEFAULT'].get('mkbrr_threads', "0") + meta['mkbrr_threads'] = mkbrr_threads # make sure these are set in meta meta['we_checked_tvdb'] = False @@ -87,6 +96,7 @@ async def gather_prep(self, meta, mode): meta['we_asked_tvmaze'] = False meta['audio_languages'] = None meta['subtitle_languages'] = None + meta['aither_trumpable'] = None folder_id = os.path.basename(meta['path']) if meta.get('uuid', None) is None: @@ -97,7 +107,10 @@ async def gather_prep(self, meta, mode): if meta['debug']: console.print(f"[cyan]ID: {meta['uuid']}") - meta['is_disc'], videoloc, bdinfo, meta['discs'] = await get_disc(meta) + try: + meta['is_disc'], videoloc, bdinfo, meta['discs'] = await get_disc(meta) + except Exception: + raise # Debugging information # console.print(f"Debug: meta['filelist'] before population: {meta.get('filelist', 'Not Set')}") @@ -108,9 +121,39 @@ async def gather_prep(self, meta, mode): search_term = os.path.basename(meta['path']) search_file_folder = 'folder' try: - guess_name = bdinfo['title'].replace('-', ' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] - untouched_filename = bdinfo['title'] + if meta.get('emby', False): + title, secondary_title, extracted_year = await extract_title_and_year(meta, video) + if meta['debug']: + console.print(f"Title: {title}, Secondary Title: {secondary_title}, Year: {extracted_year}") + if secondary_title: + meta['secondary_title'] = secondary_title + if extracted_year and not meta.get('year'): + meta['year'] = extracted_year + if title: + filename = title + untouched_filename = search_term + meta['regex_title'] = title + meta['regex_secondary_title'] = secondary_title + meta['regex_year'] = extracted_year + else: + guess_name = search_term.replace('-', ' ') + untouched_filename = search_term + filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] + else: + title, secondary_title, extracted_year = await extract_title_and_year(meta, video) + if meta['debug']: + console.print(f"Title: {title}, Secondary Title: {secondary_title}, Year: {extracted_year}") + if secondary_title: + meta['secondary_title'] = secondary_title + if extracted_year and not meta.get('year'): + meta['year'] = extracted_year + if title: + filename = title + untouched_filename = search_term + else: + guess_name = bdinfo['title'].replace('-', ' ') + untouched_filename = bdinfo['title'] + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] try: meta['search_year'] = guessit(bdinfo['title'])['year'] except Exception: @@ -124,7 +167,7 @@ async def gather_prep(self, meta, mode): except Exception: meta['search_year'] = "" - if meta.get('resolution', None) is None: + if meta.get('resolution', None) is None and not meta.get('emby', False): meta['resolution'] = await mi_resolution(bdinfo['video'][0]['res'], guessit(video), width="OTHER", scan="p", height="OTHER", actual_height=0) try: is_hfr = bdinfo['video'][0]['fps'].split()[0] if bdinfo['video'] else "25" @@ -134,6 +177,8 @@ async def gather_prep(self, meta, mode): meta['hfr'] = False except Exception: meta['hfr'] = False + else: + meta['resolution'] = "1080p" meta['sd'] = await is_sd(meta['resolution']) @@ -144,22 +189,54 @@ async def gather_prep(self, meta, mode): meta['filelist'] = [] search_term = os.path.basename(meta['path']) search_file_folder = 'folder' - guess_name = meta['discs'][0]['path'].replace('-', ' ') - filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] - untouched_filename = os.path.basename(os.path.dirname(meta['discs'][0]['path'])) - try: - meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] - except Exception: + if meta.get('emby', False): + title, secondary_title, extracted_year = await extract_title_and_year(meta, video) + if meta['debug']: + console.print(f"Title: {title}, Secondary Title: {secondary_title}, Year: {extracted_year}") + if secondary_title: + meta['secondary_title'] = secondary_title + if extracted_year and not meta.get('year'): + meta['year'] = extracted_year + if title: + filename = title + untouched_filename = search_term + meta['regex_title'] = title + meta['regex_secondary_title'] = secondary_title + meta['regex_year'] = extracted_year + else: + guess_name = search_term.replace('-', ' ') + filename = guess_name + untouched_filename = search_term + meta['resolution'] = "480p" meta['search_year'] = "" - if not meta.get('edit', False): - mi = await exportInfo(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_1.VOB", False, meta['uuid'], meta['base_dir'], export_text=False, is_dvd=True, debug=meta['debug']) - meta['mediainfo'] = mi else: - mi = meta['mediainfo'] + title, secondary_title, extracted_year = await extract_title_and_year(meta, video) + if meta['debug']: + console.print(f"Title: {title}, Secondary Title: {secondary_title}, Year: {extracted_year}") + if secondary_title: + meta['secondary_title'] = secondary_title + if extracted_year and not meta.get('year'): + meta['year'] = extracted_year + if title: + filename = title + untouched_filename = search_term + else: + guess_name = meta['discs'][0]['path'].replace('-', ' ') + filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] + untouched_filename = os.path.basename(os.path.dirname(meta['discs'][0]['path'])) + try: + meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] + except Exception: + meta['search_year'] = "" + if not meta.get('edit', False): + mi = await exportInfo(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_0.IFO", False, meta['uuid'], meta['base_dir'], export_text=False, is_dvd=True, debug=meta.get('debug', False)) + meta['mediainfo'] = mi + else: + mi = meta['mediainfo'] - meta['dvd_size'] = await get_dvd_size(meta['discs'], meta.get('manual_dvds')) - meta['resolution'], meta['hfr'] = await get_resolution(guessit(video), meta['uuid'], base_dir) - meta['sd'] = await is_sd(meta['resolution']) + meta['dvd_size'] = await get_dvd_size(meta['discs'], meta.get('manual_dvds')) + meta['resolution'], meta['hfr'] = await get_resolution(guessit(video), meta['uuid'], base_dir) + meta['sd'] = await is_sd(meta['resolution']) elif meta['is_disc'] == "HDDVD": video, meta['scene'], meta['imdb_id'] = await is_scene(meta['path'], meta, meta.get('imdb_id', 0)) @@ -183,125 +260,160 @@ async def gather_prep(self, meta, mode): meta['sd'] = await is_sd(meta['resolution']) else: - # handle some specific cases that trouble guessit and then id grabbing - def extract_title_and_year(filename): - basename = os.path.basename(filename) - basename = os.path.splitext(basename)[0] - - secondary_title = None - year = None - - # Check for AKA patterns first - aka_patterns = [' AKA ', '.aka.', ' aka ', '.AKA.'] - for pattern in aka_patterns: - if pattern in basename: - aka_parts = basename.split(pattern, 1) - if len(aka_parts) > 1: - primary_title = aka_parts[0].strip() - secondary_part = aka_parts[1].strip() - - # Look for a year in the primary title - year_match_primary = re.search(r'\b(19|20)\d{2}\b', primary_title) - if year_match_primary: - year = year_match_primary.group(0) - - # Process secondary title - secondary_match = re.match(r"^(\d+)", secondary_part) - if secondary_match: - secondary_title = secondary_match.group(1) - else: - # Catch everything after AKA until it hits a year or release info - year_or_release_match = re.search(r'\b(19|20)\d{2}\b|\bBluRay\b|\bREMUX\b|\b\d+p\b|\bDTS-HD\b|\bAVC\b', secondary_part) - if year_or_release_match: - # Check if we found a year in the secondary part - if re.match(r'\b(19|20)\d{2}\b', year_or_release_match.group(0)): - # If no year was found in primary title, or we want to override - if not year: - year = year_or_release_match.group(0) - - secondary_title = secondary_part[:year_or_release_match.start()].strip() - else: - secondary_title = secondary_part - - primary_title = primary_title.replace('.', ' ') - secondary_title = secondary_title.replace('.', ' ') - return primary_title, secondary_title, year - - # if not AKA, catch titles that begin with a year - year_start_match = re.match(r'^(19|20)\d{2}', basename) - if year_start_match: - title = year_start_match.group(0) - rest = basename[len(title):].lstrip('. _-') - # Look for another year in the rest of the title - year_match = re.search(r'\b(19|20)\d{2}\b', rest) - year = year_match.group(0) if year_match else None - return title, None, year - - # If no pattern match works but there's still a year in the filename, extract it - year_match = re.search(r'(?= 0.7 or not aka_trimmed or aka_trimmed in title: aka = None + difference = SequenceMatcher(None, title, imdb_aka).ratio() + if difference >= 0.7 or not imdb_aka or imdb_aka in title: + imdb_aka = None + if aka is not None: if f"({year})" in aka: - aka = aka.replace(f"({year})", "").strip() + aka = meta.get('imdb_info', {}).get('title', "").replace(f"({year})", "").strip() + else: + aka = meta.get('imdb_info', {}).get('title', "").strip() meta['aka'] = f"AKA {aka.strip()}" - meta['title'] = f"{meta.get('imdb_info', {}).get('title', '').strip()}" + meta['title'] = meta['title'].strip() + elif imdb_aka is not None: + if f"({year})" in imdb_aka: + imdb_aka = meta.get('imdb_info', {}).get('aka', "").replace(f"({year})", "").strip() + else: + imdb_aka = meta.get('imdb_info', {}).get('aka', "").strip() + meta['aka'] = f"AKA {imdb_aka.strip()}" + meta['title'] = meta['title'].strip() if meta.get('aka', None) is None: meta['aka'] = "" - if meta['category'] == "TV": + # if it was skipped earlier, make sure we have the season/episode data + if not meta.get('not_anime', False) and meta.get('category') == "TV": + meta = await get_season_episode(video, meta) + + # lets check for tv movies + meta['tv_movie'] = False + is_tv_movie = meta.get('imdb_info', None).get('type', '') + tv_movie_keywords = ['tv movie', 'tv special', 'video'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', is_tv_movie, re.IGNORECASE) for keyword in tv_movie_keywords): + if meta['debug']: + console.print(f"[yellow]Identified as TV Movie based on IMDb type: {is_tv_movie}[/yellow]") + meta['tv_movie'] = True + + if meta['category'] == "TV" or meta.get('tv_movie', False): + both_ids_searched = False if meta.get('tvmaze_id', 0) == 0 and meta.get('tvdb_id', 0) == 0: - await get_tvmaze_tvdb(meta, filename, tvdb_api, tvdb_token) - elif meta.get('tvmaze_id', 0) == 0: - meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await search_tvmaze( + tvmaze, tvdb, tvdb_data = await get_tvmaze_tvdb(filename, meta['search_year'], meta.get('imdb_id', 0), meta.get('tmdb_id', 0), meta.get('manual_data'), meta.get('tvmaze_manual', 0), year=meta.get('year', ''), debug=meta.get('debug', False), tv_movie=meta.get('tv_movie', False)) + both_ids_searched = True + if tvmaze: + meta['tvmaze_id'] = tvmaze + if meta['debug']: + console.print(f"[blue]Found TVMAZE ID from search: {tvmaze}[/blue]") + if tvdb: + meta['tvdb_id'] = tvdb + if meta['debug']: + console.print(f"[blue]Found TVDB ID from search: {tvdb}[/blue]") + if tvdb_data: + meta['tvdb_search_results'] = tvdb_data + if meta['debug']: + console.print("[blue]Found TVDB search results from search.[/blue]") + if meta.get('tvmaze_id', 0) == 0 and not both_ids_searched: + if meta['debug']: + console.print("[yellow]No TVMAZE ID found, attempting to fetch...[/yellow]") + meta['tvmaze_id'] = await search_tvmaze( filename, meta['search_year'], meta.get('imdb_id', 0), meta.get('tvdb_id', 0), manual_date=meta.get('manual_date'), tvmaze_manual=meta.get('tvmaze_manual'), debug=meta.get('debug', False), - return_full_tuple=True + return_full_tuple=False ) - else: - meta.setdefault('tvmaze_id', 0) - if meta.get('tvdb_id', 0) == 0 and tvdb_api and tvdb_token: - meta['tvdb_id'] = await get_tvdb_series(base_dir, title=meta.get('title', ''), year=meta.get('year', ''), apikey=tvdb_api, token=tvdb_token, debug=meta.get('debug', False)) + if meta.get('tvdb_id', 0) == 0: + if meta['debug']: + console.print("[yellow]No TVDB ID found, attempting to fetch...[/yellow]") + try: + series_results, series_id = await self.tvdb_handler.search_tvdb_series(filename=filename, year=meta.get('year', ''), debug=meta.get('debug', False)) + if series_id: + meta['tvdb_id'] = series_id + console.print(f"[blue]Found TVDB series ID from search: {series_id}[/blue]") + if series_results: + meta['tvdb_search_results'] = series_results + except Exception as e: + console.print(f"[red]Error searching TVDB: {e}[/red]") - # if it was skipped earlier, make sure we have the season/episode data - if not meta.get('not_anime', False): - meta = await get_season_episode(video, meta) # all your episode data belongs to us - meta = await get_tv_data(meta, base_dir, tvdb_api, tvdb_token) - - # if we're using tvdb, lets use it's series name if it applies - # language check since tvdb returns original language names - if tvdb_api and tvdb_token and meta.get('original_language', "") == "en": - if meta.get('tvdb_episode_data') and meta.get('tvdb_episode_data').get('series_name') != "" and meta.get('title') != meta.get('tvdb_episode_data').get('series_name'): - series_name = meta.get('tvdb_episode_data').get('series_name', '') - if meta['debug']: - console.print(f"[yellow]tvdb series name: {series_name}") - year_match = re.search(r'\b(19|20)\d{2}\b', series_name) - if year_match: - extracted_year = year_match.group(0) - meta['search_year'] = extracted_year - series_name = re.sub(r'\s*\b(19|20)\d{2}\b\s*', '', series_name).strip() - series_name = series_name.replace('(', '').replace(')', '').strip() - meta['title'] = series_name - elif meta.get('tvdb_series_name') and meta.get('tvdb_series_name') != "" and meta.get('title') != meta.get('tvdb_series_name'): + meta = await get_tv_data(meta) + + if meta.get('tvdb_imdb_id', None): + imdb = meta['tvdb_imdb_id'].replace('tt', '') + if imdb.isdigit(): + if imdb != meta.get('imdb_id', 0): + episode_info = await get_imdb_from_episode(imdb, debug=True) + if episode_info: + series_id = episode_info.get('series', {}).get('series_id', None) + if series_id: + series_imdb = series_id.replace('tt', '') + if series_imdb.isdigit() and int(series_imdb) != meta.get('imdb_id', 0): + if meta['debug']: + console.print(f"[yellow]Updating IMDb ID from episode data: {series_imdb}") + meta['imdb_id'] = int(series_imdb) + imdb_info = await get_imdb_info_api(meta['imdb_id'], manual_language=meta.get('manual_language'), debug=meta.get('debug', False)) + meta['imdb_info'] = imdb_info + check_valid_data = meta.get('imdb_info', {}).get('title', "") + if check_valid_data: + title = meta.get('title', "").strip() + aka = meta.get('imdb_info', {}).get('aka', "").strip() + year = str(meta.get('imdb_info', {}).get('year', "")) + + if aka: + aka_trimmed = aka[4:].strip().lower() if aka.lower().startswith("aka") else aka.lower() + difference = SequenceMatcher(None, title.lower(), aka_trimmed).ratio() + if difference >= 0.7 or not aka_trimmed or aka_trimmed in title: + aka = None + + if aka is not None: + if f"({year})" in aka: + aka = meta.get('imdb_info', {}).get('aka', "").replace(f"({year})", "").strip() + else: + aka = meta.get('imdb_info', {}).get('aka', "").strip() + meta['aka'] = f"AKA {aka.strip()}" + else: + meta['aka'] = "" + else: + meta['aka'] = "" + + if meta.get('tvdb_series_name') and meta['category'] == "TV": series_name = meta.get('tvdb_series_name') - if meta['debug']: - console.print(f"[yellow]tvdb series name: {series_name}") - year_match = re.search(r'\b(19|20)\d{2}\b', series_name) - if year_match: - extracted_year = year_match.group(0) - meta['search_year'] = extracted_year - series_name = re.sub(r'\s*\b(19|20)\d{2}\b\s*', '', series_name).strip() - series_name = series_name.replace('(', '').replace(')', '').strip() - meta['title'] = series_name + if series_name and meta.get('title') != series_name: + if meta['debug']: + console.print(f"[yellow]tvdb series name: {series_name}") + year_match = re.search(r'\b(19|20)\d{2}\b', series_name) + if year_match: + extracted_year = year_match.group(0) + meta['search_year'] = extracted_year + series_name = re.sub(r'\s*\b(19|20)\d{2}\b\s*', '', series_name).strip() + series_name = series_name.replace('(', '').replace(')', '').strip() + if series_name and year_match: # Only set if not empty and year was found + meta['title'] = series_name # bluray.com data if config get_bluray_info = self.config['DEFAULT'].get('get_bluray_info', False) meta['bluray_score'] = int(float(self.config['DEFAULT'].get('bluray_score', 100))) meta['bluray_single_score'] = int(float(self.config['DEFAULT'].get('bluray_single_score', 100))) meta['use_bluray_images'] = self.config['DEFAULT'].get('use_bluray_images', False) - if meta.get('is_disc') == "BDMV" and get_bluray_info and (meta.get('distributor') is None or meta.get('region') is None) and meta.get('imdb_id') != 0: - await get_bluray_releases(meta) - - # and if we getting bluray images, we'll rehost them - if meta.get('is_disc') == "BDMV" and meta.get('use_bluray_images', False): - from src.rehostimages import check_hosts - url_host_mapping = { - "ibb.co": "imgbb", - "pixhost.to": "pixhost", - "imgbox.com": "imgbox", - } - - approved_image_hosts = ['imgbox', 'imgbb', 'pixhost'] - await check_hosts(meta, "covers", url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=approved_image_hosts) + if meta.get('is_disc') in ("BDMV", "DVD") and get_bluray_info and (meta.get('distributor') is None or meta.get('region') is None) and meta.get('imdb_id') != 0 and not meta.get('emby', False) and not meta.get('edit', False) and not meta.get('site_check', False): + releases = await get_bluray_releases(meta) + + if releases: + # and if we getting bluray/dvd images, we'll rehost them + if meta.get('is_disc') in ("BDMV", "DVD") and meta.get('use_bluray_images', False): + from src.rehostimages import check_hosts + url_host_mapping = { + "ibb.co": "imgbb", + "pixhost.to": "pixhost", + "imgbox.com": "imgbox", + } + + approved_image_hosts = ['imgbox', 'imgbb', 'pixhost'] + await check_hosts(meta, "covers", url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=approved_image_hosts) # user override check that only sets data after metadata setting - if user_overrides and not meta.get('no_override', False): + if user_overrides and not meta.get('no_override', False) and not meta.get('emby', False): meta = await get_source_override(meta) - if meta.get('tag') == "-SubsPlease": # SubsPlease-specific - tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) # Get all tracks - bitrate = tracks[1].get('BitRate', '') if len(tracks) > 1 and not isinstance(tracks[1].get('BitRate', ''), dict) else '' # Check that bitrate is not a dict - bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') if len(tracks) > 0 and not isinstance(tracks[0].get('OverallBitRate', ''), dict) else '' # Check for old MediaInfo - meta['episode_title'] = "" - if (bitrate.isdigit() and int(bitrate) >= 8000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 8000000) and meta.get('resolution') == "1080p": # 8Mbps for 1080p - meta['service'] = "CR" - elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()) and meta.get('resolution') == "1080p": # Only assign if at least one bitrate is present, otherwise leave it to user - meta['service'] = "HIDI" - elif (bitrate.isdigit() and int(bitrate) >= 4000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 4000000) and meta.get('resolution') == "720p": # 4Mbps for 720p - meta['service'] = "CR" - elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()) and meta.get('resolution') == "720p": - meta['service'] = "HIDI" - meta['video'] = video - meta['audio'], meta['channels'], meta['has_commentary'] = await get_audio_v2(mi, meta, bdinfo) - - meta['3D'] = await is_3d(mi, bdinfo) - - meta['source'], meta['type'] = await get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) + if not meta.get('emby', False): + meta['container'] = await get_container(meta) - meta['uhd'] = await get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) - meta['hdr'] = await get_hdr(mi, bdinfo) + meta['audio'], meta['channels'], meta['has_commentary'] = await get_audio_v2(mi, meta, bdinfo) - meta['distributor'] = await get_distributor(meta['distributor']) + meta['3D'] = await is_3d(mi, bdinfo) - if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific - meta['region'] = await get_region(bdinfo, meta.get('region', None)) - meta['video_codec'] = await get_video_codec(bdinfo) - else: - meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = await get_video_encode(mi, meta['type'], bdinfo) + meta['source'], meta['type'] = await get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) - if meta.get('no_edition') is False: - meta['edition'], meta['repack'], meta['webdv'] = await get_edition(meta['uuid'], bdinfo, meta['filelist'], meta.get('manual_edition'), meta) - if "REPACK" in meta.get('edition', ""): - meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] - meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') - else: - meta['edition'] = "" + meta['uhd'] = await get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) + meta['hdr'] = await get_hdr(mi, bdinfo) - meta.get('stream', False) - meta['stream'] = await self.stream_optimized(meta['stream']) + meta['distributor'] = await get_distributor(meta['distributor']) - if meta.get('tag', None) is None: - if meta.get('we_need_tag', False): - meta['tag'] = await get_tag(meta['scene_name'], meta) + if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific + meta['region'] = await get_region(bdinfo, meta.get('region', None)) + meta['video_codec'] = await get_video_codec(bdinfo) else: - meta['tag'] = await get_tag(video, meta) - # all lowercase filenames will have bad group tag, it's probably a scene release. - # some extracted files do not match release name so lets double check if it really is a scene release - if not meta.get('scene') and meta['tag']: - base = os.path.basename(video) - match = re.match(r"^(.+)\.[a-zA-Z0-9]{3}$", os.path.basename(video)) - if match and (not meta['is_disc'] or meta.get('keep_folder', False)): - base = match.group(1) - is_all_lowercase = base.islower() - if is_all_lowercase: - release_name = await is_scene(videopath, meta, meta.get('imdb_id', 0), lower=True) - if release_name is not None: - try: - meta['scene_name'] = release_name - meta['tag'] = await self.get_tag(release_name, meta) - except Exception: - console.print("[red]Error getting tag from scene name, check group tag.[/red]") - - else: - if not meta['tag'].startswith('-') and meta['tag'] != "": - meta['tag'] = f"-{meta['tag']}" - - meta = await tag_override(meta) - - if meta['tag'][1:].startswith(meta['channels']): - meta['tag'] = meta['tag'].replace(f"-{meta['channels']}", '') + meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = await get_video_encode(mi, meta['type'], bdinfo) - if meta.get('no_tag', False): - meta['tag'] = "" + if meta.get('no_edition') is False: + meta['edition'], meta['repack'], meta['webdv'] = await get_edition(meta['uuid'], bdinfo, meta['filelist'], meta.get('manual_edition'), meta) + if "REPACK" in meta.get('edition', ""): + meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] + meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') + else: + meta['edition'] = "" + + meta['valid_mi_settings'] = True + if not meta['is_disc'] and meta['type'] in ["ENCODE"] and meta['video_codec'] not in ["AV1"]: + valid_mi_settings = validate_mediainfo(meta, debug=meta['debug'], settings=True) + if not valid_mi_settings: + console.print("[red]MediaInfo validation failed. This file does not contain encode settings.") + meta['valid_mi_settings'] = False + await asyncio.sleep(2) + + meta.get('stream', False) + meta['stream'] = await self.stream_optimized(meta['stream']) + + if meta.get('tag', None) is None: + if meta.get('we_need_tag', False): + meta['tag'] = await get_tag(meta['scene_name'], meta) + else: + meta['tag'] = await get_tag(video, meta) + # all lowercase filenames will have bad group tag, it's probably a scene release. + # some extracted files do not match release name so lets double check if it really is a scene release + if not meta.get('scene') and meta['tag']: + base = os.path.basename(video) + match = re.match(r"^(.+)\.[a-zA-Z0-9]{3}$", os.path.basename(video)) + if match and (not meta['is_disc'] or meta.get('keep_folder', False)): + base = match.group(1) + is_all_lowercase = base.islower() + if is_all_lowercase: + release_name = await is_scene(videopath, meta, meta.get('imdb_id', 0), lower=True) + if release_name is not None: + try: + meta['scene_name'] = release_name + meta['tag'] = await self.get_tag(release_name, meta) + except Exception: + console.print("[red]Error getting tag from scene name, check group tag.[/red]") - if meta.get('service', None) in (None, ''): - meta['service'], meta['service_longname'] = await get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) - elif meta.get('service'): - services = await get_service(get_services_only=True) - meta['service_longname'] = max((k for k, v in services.items() if v == meta['service']), key=len, default=meta['service']) + else: + if not meta['tag'].startswith('-') and meta['tag'] != "": + meta['tag'] = f"-{meta['tag']}" + + meta = await tag_override(meta) + + if meta['tag'][1:].startswith(meta['channels']): + meta['tag'] = meta['tag'].replace(f"-{meta['channels']}", '') + + if meta.get('no_tag', False): + meta['tag'] = "" + + if meta.get('tag') == "-SubsPlease": # SubsPlease-specific + tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) # Get all tracks + bitrate = tracks[1].get('BitRate', '') if len(tracks) > 1 and not isinstance(tracks[1].get('BitRate', ''), dict) else '' # Check that bitrate is not a dict + bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') if len(tracks) > 0 and not isinstance(tracks[0].get('OverallBitRate', ''), dict) else '' # Check for old MediaInfo + meta['episode_title'] = "" + if (bitrate.isdigit() and int(bitrate) >= 8000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 8000000) and meta.get('resolution') == "1080p": # 8Mbps for 1080p + meta['service'] = "CR" + elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()) and meta.get('resolution') == "1080p": # Only assign if at least one bitrate is present, otherwise leave it to user + meta['service'] = "HIDI" + elif (bitrate.isdigit() and int(bitrate) >= 4000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 4000000) and meta.get('resolution') == "720p": # 4Mbps for 720p + meta['service'] = "CR" + elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()) and meta.get('resolution') == "720p": + meta['service'] = "HIDI" + + if meta.get('service', None) in (None, ''): + meta['service'], meta['service_longname'] = await get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) + elif meta.get('service'): + services = await get_service(get_services_only=True) + meta['service_longname'] = max((k for k, v in services.items() if v == meta['service']), key=len, default=meta['service']) + + # Parse NFO for scene releases to get service + if meta['scene'] and not meta.get('service') and meta['category'] == "TV": + await self.parse_scene_nfo(meta) + + # Combine genres from TMDB and IMDb + tmdb_genres = meta.get('genres', '') or '' + imdb_genres = meta.get('imdb_info', {}).get('genres', '') or '' + + all_genres = [] + if tmdb_genres: + all_genres.extend([g.strip() for g in tmdb_genres.split(',') if g.strip()]) + if imdb_genres: + all_genres.extend([g.strip() for g in imdb_genres.split(',') if g.strip()]) + + seen = set() + unique_genres = [] + for genre in all_genres: + genre_lower = genre.lower() + if genre_lower not in seen: + seen.add(genre_lower) + unique_genres.append(genre) + + meta['combined_genres'] = ', '.join(unique_genres) if unique_genres else '' # return duplicate ids so I don't have to catch every site file - # this has the other adavantage of stringifying immb for this object + # this has the other advantage of stringing imdb for this object meta['tmdb'] = meta.get('tmdb_id') if int(meta.get('imdb_id')) != 0: imdb_str = str(meta['imdb_id']).zfill(7) @@ -901,22 +1108,23 @@ def extract_title_and_year(filename): return meta async def get_cat(self, video, meta): - if meta.get('category'): - return meta.get('category') + if meta.get('manual_category'): + return meta.get('manual_category').upper() path_patterns = [ r'(?i)[\\/](?:tv|tvshows|tv.shows|series|shows)[\\/]', - r'(?i)[\\/](?:season\s*\d+|s\d+|complete)[\\/]', + r'(?i)[\\/](?:season\s*\d+|s\d+)[\\/]', r'(?i)[\\/](?:s\d{1,2}e\d{1,2}|s\d{1,2}|season\s*\d+)', - r'(?i)(?:complete series|tv pack|season\s*\d+\s*complete)' + r'(?i)(?:tv pack|season\s*\d+)' ] filename_patterns = [ r'(?i)s\d{1,2}e\d{1,2}', + r'(?i)s\d{1,2}', r'(?i)\d{1,2}x\d{2}', r'(?i)(?:season|series)\s*\d+', r'(?i)e\d{2,3}\s*\-', - r'(?i)(?:complete|full)\s*(?:season|series)' + r'(?i)\d{4}\.\d{1,2}\.\d{1,2}' ] path = meta.get('path', '') @@ -930,6 +1138,11 @@ async def get_cat(self, video, meta): if re.search(pattern, uuid) or re.search(pattern, os.path.basename(path)): return "TV" + if "subsplease" in path.lower() or "subsplease" in uuid.lower(): + anime_pattern = r'(?:\s-\s)?(\d{1,3})\s*\((?:\d+p|480p|480i|576i|576p|720p|1080i|1080p|2160p)\)' + if re.search(anime_pattern, path.lower()) or re.search(anime_pattern, uuid.lower()): + return "TV" + return "MOVIE" async def stream_optimized(self, stream_opt): @@ -938,3 +1151,41 @@ async def stream_optimized(self, stream_opt): else: stream = 0 return stream + + async def parse_scene_nfo(self, meta): + try: + nfo_file = meta.get('scene_nfo_file', '') + + if not nfo_file: + if meta['debug']: + console.print("[yellow]No NFO file found for scene release[/yellow]") + return + + if meta['debug']: + console.print(f"[cyan]Parsing NFO file: {nfo_file}[/cyan]") + + with open(nfo_file, 'r', encoding='utf-8', errors='ignore') as f: + nfo_content = f.read() + + # Parse Source field + source_match = re.search(r'^Source\s*:\s*(.+?)$', nfo_content, re.MULTILINE | re.IGNORECASE) + if source_match: + nfo_source = source_match.group(1).strip() + if meta['debug']: + console.print(f"[cyan]Found source in NFO: {nfo_source}[/cyan]") + + # Check if source matches any service + services = await get_service(get_services_only=True) + + # Exact match + for service_name, service_code in services.items(): + if nfo_source.upper() == service_name.upper() or nfo_source.upper() == service_code.upper(): + meta['service'] = service_code + meta['service_longname'] = service_name + if meta['debug']: + console.print(f"[green]Matched service: {service_code} ({service_name})[/green]") + break + + except Exception as e: + if meta['debug']: + console.print(f"[red]Error parsing NFO file: {e}[/red]") diff --git a/src/queuemanage.py b/src/queuemanage.py index 69bd7f9ec..afe51caf5 100644 --- a/src/queuemanage.py +++ b/src/queuemanage.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import os import json import glob @@ -9,6 +10,106 @@ from rich.style import Style +async def process_site_upload_queue(meta, base_dir): + site_upload = meta.get('site_upload') + if not site_upload: + return [], None + + # Get the search results file path + search_results_file = os.path.join(base_dir, "tmp", f"{site_upload}_search_results.json") + + if not os.path.exists(search_results_file): + console.print(f"[red]Search results file not found: {search_results_file}[/red]") + return [], None + + try: + with open(search_results_file, 'r', encoding='utf-8') as f: + search_results = json.load(f) + except (json.JSONDecodeError, IOError) as e: + console.print(f"[red]Error loading search results file: {e}[/red]") + return [], None + + # Get processed files log + processed_files_log = os.path.join(base_dir, "tmp", f"{site_upload}_processed_paths.log") + processed_paths = set() + + if os.path.exists(processed_files_log): + try: + with open(processed_files_log, 'r', encoding='utf-8') as f: + processed_paths = set(json.load(f)) + except (json.JSONDecodeError, IOError) as e: + console.print(f"[yellow]Warning: Could not load processed files log: {e}[/yellow]") + + # Extract paths and IMDb IDs, filtering out processed paths + queue = [] + for item in search_results: + path = item.get('path') + try: + imdb_id = item.get('imdb_id') + except KeyError: + imdb_id = 0 + + if path and imdb_id is not None and path not in processed_paths: + # Set tracker and imdb_id in meta for this queue item + queue_item = { + 'path': path, + 'imdb_id': imdb_id, + 'tracker': site_upload + } + queue.append(queue_item) + + console.print(f"[cyan]Found {len(queue)} unprocessed items for {site_upload} upload[/cyan]") + + if queue: + # Display the queue + paths_only = [item['path'] for item in queue] + md_text = "\n - ".join(paths_only) + console.print("\n[bold green]Queuing these files for site upload:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print(f"[yellow]Tracker: {site_upload}[/yellow]") + console.print("\n\n") + + return queue, processed_files_log + + +async def process_site_upload_item(queue_item, meta): + # Set the tracker argument (-tk XXX) + meta['trackers'] = [queue_item['tracker']] + + # Set the IMDb ID + try: + imdb = queue_item['imdb_id'] + except KeyError: + imdb = 0 + meta['imdb_id'] = imdb + + # Return the path for processing + return queue_item['path'] + + +async def save_processed_path(processed_files_log, path): + processed_paths = set() + + # Load existing processed paths + if os.path.exists(processed_files_log): + try: + with open(processed_files_log, 'r', encoding='utf-8') as f: + processed_paths = set(json.load(f)) + except (json.JSONDecodeError, IOError): + pass + + # Add the new path + processed_paths.add(path) + + # Save back to file + try: + os.makedirs(os.path.dirname(processed_files_log), exist_ok=True) + with open(processed_files_log, 'w', encoding='utf-8') as f: + json.dump(list(processed_paths), f, indent=4) + except IOError as e: + console.print(f"[red]Error saving processed path: {e}[/red]") + + async def get_log_file(base_dir, queue_name): """ Returns the path to the log file for the given base directory and queue name. @@ -22,7 +123,7 @@ async def load_processed_files(log_file): Loads the list of processed files from the log file. """ if os.path.exists(log_file): - with open(log_file, "r") as f: + with open(log_file, 'r', encoding='utf-8') as f: return set(json.load(f)) return set() @@ -34,19 +135,59 @@ async def gather_files_recursive(path, allowed_extensions=None): Skip folders that don't contain allowed extensions or disc structures (VIDEO_TS/BDMV). """ queue = [] + + # Normalize the path to handle Unicode characters properly + try: + if isinstance(path, bytes): + path = path.decode('utf-8', errors='replace') + + # Normalize Unicode characters + import unicodedata + path = unicodedata.normalize('NFC', path) + + # Ensure proper path format + path = os.path.normpath(path) + + except Exception as e: + console.print(f"[yellow]Warning: Path normalization failed for {path}: {e}[/yellow]") + if os.path.isdir(path): - for entry in os.scandir(path): - if entry.is_dir(): - # Check if this directory should be included - if await should_include_directory(entry.path, allowed_extensions): - queue.append(entry.path) - elif entry.is_file() and (allowed_extensions is None or entry.name.lower().endswith(tuple(allowed_extensions))): - queue.append(entry.path) + try: + for entry in os.scandir(path): + try: + # Get the full path and normalize it + entry_path = os.path.normpath(entry.path) + + if entry.is_dir(): + # Check if this directory should be included + if await should_include_directory(entry_path, allowed_extensions): + queue.append(entry_path) + elif entry.is_file() and (allowed_extensions is None or entry.name.lower().endswith(tuple(allowed_extensions))): + queue.append(entry_path) + + except (OSError, UnicodeDecodeError, UnicodeError) as e: + console.print(f"[yellow]Warning: Skipping entry due to encoding issue: {e}[/yellow]") + # Try to get the path in a different way + try: + alt_path = os.path.join(path, entry.name) + if os.path.exists(alt_path): + if os.path.isdir(alt_path) and await should_include_directory(alt_path, allowed_extensions): + queue.append(alt_path) + elif os.path.isfile(alt_path) and (allowed_extensions is None or alt_path.lower().endswith(tuple(allowed_extensions))): + queue.append(alt_path) + except Exception: + continue + + except (OSError, PermissionError) as e: + console.print(f"[red]Error scanning directory {path}: {e}[/red]") + return [] + elif os.path.isfile(path): if allowed_extensions is None or path.lower().endswith(tuple(allowed_extensions)): queue.append(path) else: - console.print(f"[red]Invalid path: {path}") + console.print(f"[red]Invalid path: {path}[/red]") + return queue @@ -58,6 +199,9 @@ async def should_include_directory(dir_path, allowed_extensions=None): - A subfolder named 'VIDEO_TS' or 'BDMV' (disc structures) """ try: + # Normalize the path + dir_path = os.path.normpath(dir_path) + # Check for disc structures first (VIDEO_TS or BDMV subfolders) for entry in os.scandir(dir_path): if entry.is_dir() and entry.name.upper() in ('VIDEO_TS', 'BDMV'): @@ -76,11 +220,32 @@ async def should_include_directory(dir_path, allowed_extensions=None): return False - except (OSError, PermissionError) as e: - console.print(f"[yellow]Warning: Could not scan directory {dir_path}: {e}") + except (OSError, PermissionError, UnicodeError) as e: + console.print(f"[yellow]Warning: Could not scan directory {dir_path}: {e}[/yellow]") return False +async def _resolve_split_path(path): + queue = [] + split_path = path.split() + p1 = split_path[0] + + for i, _ in enumerate(split_path): + try: + if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): + queue.append(p1) + p1 = split_path[i + 1] + else: + p1 += f" {split_path[i + 1]}" + except IndexError: + if os.path.exists(p1): + queue.append(p1) + else: + console.print(f"[red]Path: [bold red]{p1}[/bold red] does not exist") + + return queue + + async def resolve_queue_with_glob_or_split(path, paths, allowed_extensions=None): """ Handle glob patterns and split path resolution. @@ -103,7 +268,7 @@ async def resolve_queue_with_glob_or_split(path, paths, allowed_extensions=None) await display_queue(queue) elif not os.path.exists(os.path.dirname(path)): queue = [ - file for file in resolve_split_path(path) # noqa F821 + file for file in await _resolve_split_path(path) if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) ] await display_queue(queue) @@ -120,7 +285,7 @@ async def extract_safe_file_locations(log_file): safe_section = False safe_file_locations = [] - with open(log_file, 'r') as f: + with open(log_file, 'r', encoding='utf-8') as f: for line in f: line = line.strip() @@ -153,7 +318,7 @@ async def display_queue(queue, base_dir, queue_name, save_to_log=True): log_file = os.path.join(tmp_dir, f"{queue_name}_queue.log") try: - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) console.print(f"[bold green]Queue successfully saved to log file: {log_file}") except Exception as e: @@ -164,8 +329,21 @@ async def handle_queue(path, meta, paths, base_dir): allowed_extensions = ['.mkv', '.mp4', '.ts'] queue = [] - log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") - allowed_extensions = ['.mkv', '.mp4', '.ts'] + if meta.get('site_upload'): + console.print(f"[bold yellow]Processing site upload queue for tracker: {meta['site_upload']}[/bold yellow]") + site_queue, processed_log = await process_site_upload_queue(meta, base_dir) + + if site_queue: + meta['queue'] = f"{meta['site_upload']}_upload" + meta['site_upload_queue'] = True + + # Return the structured queue and log file + return site_queue, processed_log + else: + console.print(f"[yellow]No unprocessed items found for {meta['site_upload']} upload[/yellow]") + return [], None + + log_file = os.path.join(base_dir, "tmp", f"{meta.get('queue', 'default')}_queue.log") if path.endswith('.txt') and meta.get('unit3d'): console.print(f"[bold yellow]Detected a text file for queue input: {path}[/bold yellow]") @@ -178,7 +356,7 @@ async def handle_queue(path, meta, paths, base_dir): # Save the queue to the log file try: - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) console.print(f"[bold green]Queue log file saved successfully: {log_file}[/bold green]") except IOError as e: @@ -205,7 +383,7 @@ async def handle_queue(path, meta, paths, base_dir): elif meta.get('queue'): if os.path.exists(log_file): - with open(log_file, 'r') as f: + with open(log_file, 'r', encoding='utf-8') as f: existing_queue = json.load(f) if os.path.exists(path): @@ -242,7 +420,7 @@ async def handle_queue(path, meta, paths, base_dir): if edit_choice == 'u': queue = current_files console.print(f"[bold green]Queue updated with current files ({len(queue)} items).") - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) console.print(f"[bold green]Queue log file updated: {log_file}[/bold green]") elif edit_choice == 'a': @@ -255,7 +433,7 @@ async def handle_queue(path, meta, paths, base_dir): selected_files = [file for i, file in enumerate(sorted(new_files), 1) if i in indices] queue = list(existing_queue) + selected_files console.print(f"[bold green]Queue updated with selected new files ({len(queue)} items).") - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) console.print(f"[bold green]Queue log file updated: {log_file}[/bold green]") except Exception as e: @@ -267,7 +445,7 @@ async def handle_queue(path, meta, paths, base_dir): try: queue = json.loads(edited_content.strip()) console.print("[bold green]Successfully updated the queue from the editor.") - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) except json.JSONDecodeError as e: console.print(f"[bold red]Failed to parse the edited content: {e}. Using the current files.") @@ -278,7 +456,7 @@ async def handle_queue(path, meta, paths, base_dir): elif edit_choice == 'd': console.print("[bold yellow]Discarding the existing queue log. Creating a new queue.") queue = current_files - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) console.print(f"[bold green]New queue log file created: {log_file}[/bold green]") else: @@ -291,7 +469,7 @@ async def handle_queue(path, meta, paths, base_dir): else: # No changes detected console.print("[green]No changes detected in the queue.[/green]") - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): console.print("[yellow]Do you want to edit, discard, or keep the existing queue?[/yellow]") edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ").strip().lower() @@ -301,7 +479,7 @@ async def handle_queue(path, meta, paths, base_dir): try: queue = json.loads(edited_content.strip()) console.print("[bold green]Successfully updated the queue from the editor.") - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) except json.JSONDecodeError as e: console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") @@ -312,7 +490,7 @@ async def handle_queue(path, meta, paths, base_dir): elif edit_choice == 'd': console.print("[bold yellow]Discarding the existing queue log. Creating a new queue.") queue = current_files - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) console.print(f"[bold green]New queue log file created: {log_file}[/bold green]") else: @@ -344,7 +522,7 @@ async def handle_queue(path, meta, paths, base_dir): console.print("[bold red]No changes were made. Using the original queue.") # Save the queue to the log file - with open(log_file, 'w') as f: + with open(log_file, 'w', encoding='utf-8') as f: json.dump(queue, f, indent=4) console.print(f"[bold green]Queue log file created: {log_file}[/bold green]") diff --git a/src/radarr.py b/src/radarr.py index 2119a5106..205061fc1 100644 --- a/src/radarr.py +++ b/src/radarr.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import httpx from data.config import config from src.console import console diff --git a/src/region.py b/src/region.py index a63d98b25..19ba6ba4c 100644 --- a/src/region.py +++ b/src/region.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import re from guessit import guessit @@ -90,11 +91,11 @@ async def get_service(video=None, tag=None, audio=None, guess_title=None, get_se 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'CNGO': 'CNGO', 'Cinego': 'CNGO', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', - 'Crunchy Roll': 'CR', 'Crave': 'CRAV', 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', + 'Crunchy Roll': 'CR', 'Crave': 'CRAV', 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'Chorki': 'CRKI', 'CRKI': 'CRKI', 'CRKL': 'CRKL', 'Crackle': 'CRKL', 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', 'DC Universe': 'DCU', 'DDY': 'DDY', 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', 'Deadhouse Films': 'DHF', - 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', 'Doc Club': 'DOCC', + 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', 'Doc Club': 'DOCC', 'DOCPLAY': 'DOCPLAY', 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', 'ETV': 'ETV', 'E!': 'ETV', @@ -104,13 +105,13 @@ async def get_service(video=None, tag=None, audio=None, guess_title=None, get_se 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', 'Global': 'GLBL', 'GLBO': 'GLBO', 'Globoplay': 'GLBO', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', - 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', + 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HBOMAX': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', 'hoichoi': 'HoiChoi', 'ID': 'ID', 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', 'JOYN': 'JOYN', 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', - 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', 'MA': 'MA', 'Looke': 'LOOKE', 'LOOKE': 'LOOKE', 'Movies Anywhere': 'MA', + 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'Kocowa+': 'KCW', 'Kocowa': 'KCW', 'KCW': 'KCW', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', 'MA': 'MA', 'Looke': 'LOOKE', 'LOOKE': 'LOOKE', 'Movies Anywhere': 'MA', 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', - 'Netflix': 'NF', 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', + 'NBLA': 'NBLA', 'Nebula': 'NBLA', 'Netflix': 'NF', 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NOW': 'NOW', 'NOWTV': 'NOW', 'NRK': 'NRK', 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'ORF': 'ORF', 'ORF ON': 'ORF', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLTV': 'PLTV', 'Pluto TV': 'PLTV', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', 'PMNT': 'PMNT', @@ -127,7 +128,7 @@ async def get_service(video=None, tag=None, audio=None, guess_title=None, get_se 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'Vivamax': 'VMAX', 'VMAX': 'VMAX', 'Vivaone': 'VONE', 'VONE': 'VONE', 'VLCT': 'VLCT', 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', - 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', + 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'XUMO': 'XUMO', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' } diff --git a/src/rehostimages.py b/src/rehostimages.py index 13a9b3477..701ead3fe 100644 --- a/src/rehostimages.py +++ b/src/rehostimages.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import glob import os import json @@ -9,9 +10,10 @@ from src.takescreens import disc_screenshots, dvd_screenshots, screenshots from src.uploadscreens import upload_screens from data.config import config +from aiofiles import os as aio_os -def match_host(hostname, approved_hosts): +async def match_host(hostname, approved_hosts): for approved_host in approved_hosts: if hostname == approved_host or hostname.endswith(f".{approved_host}"): return approved_host @@ -28,8 +30,6 @@ async def check_hosts(meta, tracker, url_host_mapping, img_host_index=1, approve if meta['debug']: console.print(f"[yellow]Skipping image host upload for {tracker} as per meta['skip_imghost_upload'] setting.") return - if meta['debug']: - console.print(f"[yellow]Checking existing image hosts for {tracker}...") new_images_key = f'{tracker}_images_key' if new_images_key not in meta: meta[new_images_key] = [] @@ -49,7 +49,7 @@ async def check_hosts(meta, tracker, url_host_mapping, img_host_index=1, approve parsed_url = urlparse(raw_url) hostname = parsed_url.netloc - mapped_host = match_host(hostname, url_host_mapping.keys()) + mapped_host = await match_host(hostname, url_host_mapping.keys()) if mapped_host: mapped_host = url_host_mapping.get(mapped_host, mapped_host) @@ -103,7 +103,7 @@ async def check_hosts(meta, tracker, url_host_mapping, img_host_index=1, approve parsed_url = urlparse(raw_url) hostname = parsed_url.netloc - mapped_host = match_host(hostname, url_host_mapping.keys()) + mapped_host = await match_host(hostname, url_host_mapping.keys()) if mapped_host: mapped_host = url_host_mapping.get(mapped_host, mapped_host) @@ -123,14 +123,15 @@ async def check_hosts(meta, tracker, url_host_mapping, img_host_index=1, approve # Check if the tracker-specific key has valid images has_valid_images = False if meta.get(new_images_key): - all_images_valid = all( - url_host_mapping.get( - match_host(urlparse(image.get('raw_url', '')).netloc, url_host_mapping.keys()), - None - ) in approved_image_hosts for image in meta[new_images_key] - ) - - if all_images_valid and meta[new_images_key]: + valid_hosts = [] + for image in meta[new_images_key]: + netloc = urlparse(image.get('raw_url', '')).netloc + matched_host = await match_host(netloc, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(matched_host, None) + valid_hosts.append(mapped_host in approved_image_hosts) + + # Then check if all are valid + if all(valid_hosts) and meta[new_images_key]: has_valid_images = True if has_valid_images: @@ -194,7 +195,7 @@ async def handle_image_upload(meta, tracker, url_host_mapping, approved_image_ho # First check if there are any saved screenshots matching those in the image_list if meta.get('image_list') and isinstance(meta['image_list'], list): # Get all PNG files in the screenshots directory - all_png_files = await asyncio.to_thread(glob.glob, os.path.join(screenshots_dir, "*.png")) + all_png_files = [file for file in await aio_os.listdir(screenshots_dir) if file.endswith('.png')] if all_png_files and meta.get('debug'): console.print(f"[cyan]Found {len(all_png_files)} PNG files in screenshots directory") @@ -261,17 +262,20 @@ async def handle_image_upload(meta, tracker, url_host_mapping, approved_image_ho image_patterns = ["*.png", ".[!.]*.png"] image_glob = [] for pattern in image_patterns: - image_glob.extend(glob.glob(pattern)) + glob_results = await asyncio.to_thread(glob.glob, pattern) + image_glob.extend(glob_results) if meta['debug']: console.print(f"[cyan]Found {len(image_glob)} files matching pattern: {pattern}") unwanted_patterns = ["FILE*", "PLAYLIST*", "POSTER*"] unwanted_files = set() for pattern in unwanted_patterns: - unwanted_files.update(glob.glob(pattern)) + glob_results = await asyncio.to_thread(glob.glob, pattern) + unwanted_files.update(glob_results) if pattern.startswith("FILE") or pattern.startswith("PLAYLIST") or pattern.startswith("POSTER"): hidden_pattern = "." + pattern - unwanted_files.update(glob.glob(hidden_pattern)) + hidden_glob_results = await asyncio.to_thread(glob.glob, hidden_pattern) + unwanted_files.update(hidden_glob_results) # Remove unwanted files image_glob = [file for file in image_glob if file not in unwanted_files] @@ -329,6 +333,8 @@ async def handle_image_upload(meta, tracker, url_host_mapping, approved_image_ho else: # Use a more generic pattern to find any PNG files that aren't already in all_screenshots new_screens = await asyncio.to_thread(glob.glob, os.path.join(screenshots_dir, "*.png")) + indexed_pattern = re.compile(r".*-\d+\.png$") + new_screens = [s for s in new_screens if indexed_pattern.match(os.path.basename(s))] # Filter out files we already have new_screens = [screen for screen in new_screens if screen not in all_screenshots] @@ -411,7 +417,8 @@ async def handle_image_upload(meta, tracker, url_host_mapping, approved_image_ho continue else: meta['imghost'] = current_img_host - console.print(f"[green]Uploading to approved host '{current_img_host}'.") + if meta['debug']: + console.print(f"[green]Uploading to approved host '{current_img_host}'.") break uploaded_images, _ = await upload_screens( @@ -430,7 +437,7 @@ async def handle_image_upload(meta, tracker, url_host_mapping, approved_image_ho raw_url = image['raw_url'] parsed_url = urlparse(raw_url) hostname = parsed_url.netloc - mapped_host = match_host(hostname, url_host_mapping.keys()) + mapped_host = await match_host(hostname, url_host_mapping.keys()) mapped_host = url_host_mapping.get(mapped_host, mapped_host) if mapped_host not in approved_image_hosts: @@ -440,68 +447,67 @@ async def handle_image_upload(meta, tracker, url_host_mapping, approved_image_ho return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts # Ensure all uploaded images are valid - if all( - url_host_mapping.get( - match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), - match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), - ) in approved_image_hosts - for image in meta[new_images_key] - ): - if new_images_key in meta and isinstance(meta[new_images_key], list): - if tracker == "covers": - output_file = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], "covers.json") - else: - output_file = os.path.join(screenshots_dir, "reuploaded_images.json") - - try: - async with aiofiles.open(output_file, 'r', encoding='utf-8') as f: - existing_data = await f.read() - existing_data = json.loads(existing_data) if existing_data else [] - if not isinstance(existing_data, list): - console.print(f"[red]Existing data in {output_file} is not a list. Resetting to an empty list.") - existing_data = [] - except Exception: - existing_data = [] - - updated_data = existing_data + meta[new_images_key] - updated_data = [dict(s) for s in {tuple(d.items()) for d in updated_data}] - - if tracker == "covers" and "release_url" in meta: - for image in updated_data: - if "release_url" not in image: - image["release_url"] = meta["release_url"] - console.print(f"[green]Added release URL to {len(updated_data)} cover images: {meta['release_url']}") - - try: - async with aiofiles.open(output_file, 'w', encoding='utf-8') as f: - await f.write(json.dumps(updated_data, indent=4)) - if meta['debug']: - console.print(f"[green]Successfully updated reuploaded images in {output_file}.") - - if tracker == "covers": - deleted_count = 0 - for screenshot in all_screenshots: - try: - if os.path.exists(screenshot): - os.remove(screenshot) - deleted_count += 1 - if meta.get('debug'): - console.print(f"[dim]Deleted cover image file: {screenshot}[/dim]") - except Exception as e: - console.print(f"[yellow]Failed to delete cover image file {screenshot}: {str(e)}[/yellow]") - - if deleted_count > 0: - if meta['debug']: - console.print(f"[green]Cleaned up {deleted_count} cover image files after successful upload[/green]") - - except Exception as e: - console.print(f"[red]Failed to save reuploaded images: {e}") + valid_hosts = [] + for image in meta[new_images_key]: + netloc = urlparse(image['raw_url']).netloc + matched_host = await match_host(netloc, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(matched_host, matched_host) + valid_hosts.append(mapped_host in approved_image_hosts) + if all(valid_hosts) and new_images_key in meta and isinstance(meta[new_images_key], list): + if tracker == "covers": + output_file = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], "covers.json") else: - console.print("[red]new_images_key is not a valid key in meta or is not a list.") + output_file = os.path.join(screenshots_dir, "reuploaded_images.json") + + try: + async with aiofiles.open(output_file, 'r', encoding='utf-8') as f: + existing_data = await f.read() + existing_data = json.loads(existing_data) if existing_data else [] + if not isinstance(existing_data, list): + console.print(f"[red]Existing data in {output_file} is not a list. Resetting to an empty list.") + existing_data = [] + except Exception: + existing_data = [] + + updated_data = existing_data + meta[new_images_key] + updated_data = [dict(s) for s in {tuple(d.items()) for d in updated_data}] + + if tracker == "covers" and "release_url" in meta: + for image in updated_data: + if "release_url" not in image: + image["release_url"] = meta["release_url"] + console.print(f"[green]Added release URL to {len(updated_data)} cover images: {meta['release_url']}") - if original_imghost: - meta['imghost'] = original_imghost - return meta[new_images_key], False, images_reuploaded + try: + async with aiofiles.open(output_file, 'w', encoding='utf-8') as f: + await f.write(json.dumps(updated_data, indent=4)) + if meta['debug']: + console.print(f"[green]Successfully updated reuploaded images in {output_file}.") + + if tracker == "covers": + deleted_count = 0 + for screenshot in all_screenshots: + try: + if os.path.exists(screenshot): + os.remove(screenshot) + deleted_count += 1 + if meta.get('debug'): + console.print(f"[dim]Deleted cover image file: {screenshot}[/dim]") + except Exception as e: + console.print(f"[yellow]Failed to delete cover image file {screenshot}: {str(e)}[/yellow]") + + if deleted_count > 0: + if meta['debug']: + console.print(f"[green]Cleaned up {deleted_count} cover image files after successful upload[/green]") + + except Exception as e: + console.print(f"[red]Failed to save reuploaded images: {e}") + else: + console.print("[red]new_images_key is not a valid key in meta or is not a list.") + + if original_imghost: + meta['imghost'] = original_imghost + return meta[new_images_key], False, images_reuploaded else: if original_imghost: meta['imghost'] = original_imghost diff --git a/src/search.py b/src/search.py index b8510fe63..c7c9f13c1 100644 --- a/src/search.py +++ b/src/search.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import platform import os from src.console import console @@ -7,6 +8,7 @@ class Search(): """ Logic for searching """ + def __init__(self, config): self.config = config pass diff --git a/src/sonarr.py b/src/sonarr.py index 09a75c2bb..8c4588cf3 100644 --- a/src/sonarr.py +++ b/src/sonarr.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import httpx from data.config import config from src.console import console diff --git a/src/tags.py b/src/tags.py index 627390180..fcb343a02 100644 --- a/src/tags.py +++ b/src/tags.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import os import re import json @@ -9,6 +10,7 @@ async def get_tag(video, meta): # Using regex from cross-seed (https://github.com/cross-seed/cross-seed/tree/master?tab=Apache-2.0-1-ov-file) release_group = None basename = os.path.basename(video) + matched_anime = False # Try specialized regex patterns first if meta.get('anime', False): @@ -16,18 +18,27 @@ async def get_tag(video, meta): basename_stripped = os.path.splitext(basename)[0] anime_match = re.search(r'^\s*\[(.+?)\]', basename_stripped) if anime_match: + matched_anime = True release_group = anime_match.group(1) if meta['debug']: console.print(f"Anime regex match: {release_group}") - else: + if not meta.get('anime', False) or not matched_anime: if not meta.get('is_disc') == "BDMV": # Non-anime pattern: group at the end after last hyphen, avoiding resolutions and numbers if os.path.isdir(video): # If video is a directory, use the directory name as basename basename_stripped = os.path.basename(os.path.normpath(video)) + elif meta.get('tv_pack', False) or meta.get('keep_folder', False): + basename_stripped = meta['uuid'] else: # If video is a file, use the filename without extension - basename_stripped = os.path.splitext(os.path.basename(video))[0] + basename_no_path = os.path.basename(video) + name, ext = os.path.splitext(basename_no_path) + # If the extension contains a hyphen, it's not a real extension + if ext and '-' in ext: + basename_stripped = basename_no_path + else: + basename_stripped = name non_anime_match = re.search(r'(?<=-)((?!\s*(?:WEB-DL|Blu-ray|H-264|H-265))(?:\W|\b)(?!(?:\d{3,4}[ip]))(?!\d+\b)(?:\W|\b)([\w .]+?))(?:\[.+\])?(?:\))?(?:\s\[.+\])?$', basename_stripped) if non_anime_match: release_group = non_anime_match.group(1).strip() @@ -40,7 +51,7 @@ async def get_tag(video, meta): console.print(f"Non-anime regex match: {release_group}") # If regex patterns didn't work, fall back to guessit - if not release_group: + if not release_group and meta.get('is_disc'): try: parsed = guessit(video) release_group = parsed.get('release_group') @@ -64,7 +75,7 @@ async def get_tag(video, meta): tag = "" # Remove generic "no group" tags - if tag and tag[1:].lower() in ["nogroup", "nogrp", "hd.ma.5.1", "untouched"]: + if tag and tag[1:].lower() in ["hd.ma.5.1", "untouched"]: tag = "" return tag @@ -90,7 +101,7 @@ async def tag_override(meta): elif key == 'personalrelease': meta[key] = _is_true(value.get(key, "False")) elif key == 'template': - meta['desc_template'] = value.get(key) + meta['description_template'] = value.get(key) else: meta[key] = value.get(key) except Exception as e: diff --git a/src/takescreens.py b/src/takescreens.py index 9019aa8f4..ac5efcac8 100644 --- a/src/takescreens.py +++ b/src/takescreens.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import os import re import glob @@ -7,11 +8,8 @@ import json import platform import asyncio -import oxipng import psutil import sys -import concurrent.futures -import signal import gc import traceback from pymediainfo import MediaInfo @@ -19,25 +17,21 @@ from data.config import config from src.cleanup import cleanup, reset_terminal -img_host = [ - config["DEFAULT"][key].lower() - for key in sorted(config["DEFAULT"].keys()) - if key.startswith("img_host_1") and not key.endswith("0") -] task_limit = int(config['DEFAULT'].get('process_limit', 1)) threads = str(config['DEFAULT'].get('threads', '1')) cutoff = int(config['DEFAULT'].get('cutoff_screens', 1)) ffmpeg_limit = config['DEFAULT'].get('ffmpeg_limit', False) +ffmpeg_is_good = config['DEFAULT'].get('ffmpeg_is_good', False) +use_libplacebo = config['DEFAULT'].get('use_libplacebo', True) try: task_limit = int(task_limit) # Convert to integer except ValueError: task_limit = 1 tone_map = config['DEFAULT'].get('tone_map', False) -optimize_images = config['DEFAULT'].get('optimize_images', True) +ffmpeg_compression = str(config['DEFAULT'].get('ffmpeg_compression', '6')) algorithm = config['DEFAULT'].get('algorithm', 'mobius').strip() desat = float(config['DEFAULT'].get('desat', 10.0)) -frame_overlay = config['DEFAULT'].get('frame_overlay', False) async def run_ffmpeg(command): @@ -70,6 +64,7 @@ async def sanitize_filename(filename): async def disc_screenshots(meta, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None, force_screenshots=False): + img_host = await get_image_host(meta) screens = meta['screens'] if meta['debug']: start_time = time.time() @@ -136,7 +131,7 @@ async def disc_screenshots(meta, filename, bdinfo, folder_id, base_dir, use_vs, ss_times = await valid_ss_time([], num_screens, length, frame_rate, meta, retake=force_screenshots) - if frame_overlay: + if meta.get('frame_overlay', False): console.print("[yellow]Getting frame information for overlays...") frame_info_tasks = [ get_frame_info(file, ss_times[i], meta) @@ -154,6 +149,11 @@ async def disc_screenshots(meta, filename, bdinfo, folder_id, base_dir, use_vs, if meta['debug']: console.print(f"[cyan]Collected frame information for {len(frame_info_results)} frames") + num_workers = min(num_screens, task_limit) + + if meta['debug']: + console.print(f"Using {num_workers} worker(s) for {num_screens} image(s)") + capture_tasks = [] capture_results = [] if use_vs: @@ -166,8 +166,16 @@ async def disc_screenshots(meta, filename, bdinfo, folder_id, base_dir, use_vs, loglevel = 'quiet' existing_indices = {int(p.split('-')[-1].split('.')[0]) for p in existing_screens} + + # Create semaphore to limit concurrent tasks + semaphore = asyncio.Semaphore(task_limit) + + async def capture_disc_with_semaphore(*args): + async with semaphore: + return await capture_disc_task(*args) + capture_tasks = [ - capture_disc_task( + capture_disc_with_semaphore( i, file, ss_times[i], @@ -202,86 +210,41 @@ async def disc_screenshots(meta, filename, bdinfo, folder_id, base_dir, use_vs, if not force_screenshots and meta['debug']: console.print(f"[green]Successfully captured {len(capture_results)} screenshots.") - optimized_results = [] - valid_images = [image for image in capture_results if os.path.exists(image)] - - if not valid_images: - console.print("[red]No valid images found for optimization.[/red]") - return [] - - # Dynamically determine the number of processes - num_tasks = len(valid_images) - num_workers = min(num_tasks, task_limit) - if optimize_images: - if meta['debug']: - console.print("[yellow]Now optimizing images...[/yellow]") - - loop = asyncio.get_running_loop() - stop_event = asyncio.Event() - - def handle_sigint(sig, frame): - console.print("\n[red]CTRL+C detected. Cancelling optimization...[/red]") - executor.shutdown(wait=False) - stop_event.set() - for task in asyncio.all_tasks(loop): - task.cancel() - - signal.signal(signal.SIGINT, handle_sigint) - - try: - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: - tasks = [asyncio.create_task(worker_wrapper(image, optimize_image_task, executor)) for image in valid_images] - - optimized_results = await asyncio.gather(*tasks, return_exceptions=True) - - except KeyboardInterrupt: - console.print("\n[red]CTRL+C detected. Cancelling tasks...[/red]") - executor.shutdown(wait=False) - await kill_all_child_processes() - console.print("[red]All tasks cancelled. Exiting.[/red]") - sys.exit(1) - - finally: - if meta['debug']: - console.print("[yellow]Shutting down optimization workers...[/yellow]") - executor.shutdown(wait=False) - await asyncio.sleep(0.1) - await kill_all_child_processes() - gc.collect() - - optimized_results = [res for res in optimized_results if not isinstance(res, str) or not res.startswith("Error")] - if meta['debug']: - console.print("Optimized results:", optimized_results) - - if not force_screenshots and meta['debug']: - console.print(f"[green]Successfully optimized {len(optimized_results)} images.[/green]") - else: - optimized_results = valid_images - valid_results = [] remaining_retakes = [] - for image_path in optimized_results: + for image_path in capture_results: if "Error" in image_path: console.print(f"[red]{image_path}") continue retake = False image_size = os.path.getsize(image_path) + if meta['debug']: + console.print(f"[yellow]Checking image {image_path} (size: {image_size} bytes) for image host: {img_host}[/yellow]") if image_size <= 75000: console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") retake = True - elif "imgbb" in img_host and image_size <= 31000000: - if meta['debug']: - console.print(f"[green]Image {image_path} meets size requirements for imgbb.[/green]") - elif any(host in ["imgbox", "pixhost"] for host in img_host) and image_size <= 10000000: - if meta['debug']: - console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") - elif any(host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage"] for host in img_host): - if meta['debug']: - console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") else: - console.print(f"[red]Image {image_path} with size {image_size} bytes: does not meet size requirements for {img_host}, retaking.") - retake = True + if "imgbb" in img_host: + if image_size <= 31000000: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for imgbb.[/green]") + else: + console.print(f"[red]Image {image_path} with size {image_size} bytes: does not meet size requirements for imgbb, retaking.") + retake = True + elif img_host in ["imgbox", "pixhost"]: + if 75000 < image_size <= 10000000: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") + else: + console.print(f"[red]Image {image_path} with size {image_size} bytes: does not meet size requirements for {img_host}, retaking.") + retake = True + elif img_host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage", "seedpool_cdn"]: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") + else: + console.print(f"[red]Unknown image host or image doesn't meet requirements for host: {img_host}, retaking.") + retake = True if retake: retry_attempts = 3 @@ -296,20 +259,21 @@ def handle_sigint(sig, frame): screenshot_response = await capture_disc_task( index, file, random_time, image_path, keyframe, loglevel, hdr_tonemap, meta ) - if optimize_images: - optimize_image_task(screenshot_response) new_size = os.path.getsize(screenshot_response) valid_image = False - if "imgbb" in img_host and new_size > 75000 and new_size <= 31000000: - console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") - valid_image = True - elif new_size > 75000 and new_size <= 10000000 and any(host in ["imgbox", "pixhost"] for host in img_host): - console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") - valid_image = True - elif new_size > 75000 and any(host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage"] for host in img_host): - console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") - valid_image = True + if "imgbb" in img_host: + if new_size > 75000 and new_size <= 31000000: + console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") + valid_image = True + elif img_host in ["imgbox", "pixhost"]: + if new_size > 75000 and new_size <= 10000000: + console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") + valid_image = True + elif img_host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage", "seedpool_cdn"]: + if new_size > 75000: + console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") + valid_image = True if valid_image: valid_results.append(screenshot_response) @@ -333,22 +297,33 @@ def handle_sigint(sig, frame): if meta['debug']: finish_time = time.time() console.print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") - await cleanup() + + multi_screens = int(config['DEFAULT'].get('multiScreens', 2)) + discs = meta.get('discs', []) + one_disc = True + if discs and len(discs) == 1: + one_disc = True + elif discs and len(discs) > 1: + one_disc = False + + if (not meta.get('tv_pack') and one_disc) or multi_screens == 0: + await cleanup() async def capture_disc_task(index, file, ss_time, image_path, keyframe, loglevel, hdr_tonemap, meta): try: - ff = ffmpeg.input(file, ss=ss_time, skip_frame=keyframe) - if hdr_tonemap: - ff = ( - ff - .filter('zscale', transfer='linear') - .filter('tonemap', tonemap=algorithm, desat=desat) - .filter('zscale', transfer='bt709') - .filter('format', 'rgb24') - ) + # Build filter chain + vf_filters = [] - if frame_overlay: + if hdr_tonemap: + vf_filters.extend([ + "zscale=transfer=linear", + f"tonemap=tonemap={algorithm}:desat={desat}", + "zscale=transfer=bt709", + "format=rgb24" + ]) + + if meta.get('frame_overlay', False): # Get frame info from pre-collected data if available frame_info = meta.get('frame_info_map', {}).get(ss_time, {}) @@ -377,54 +352,63 @@ async def capture_disc_task(index, file, ss_time, image_path, keyframe, loglevel y_type = y_number + line_spacing y_hdr = y_type + line_spacing - # Use the filtered output with frame info - base_text = ff - # Frame number - base_text = base_text.filter('drawtext', - text=f"Frame Number: {frame_number}", - fontcolor='white', - fontsize=font_size, - x=x_all, - y=y_number, - box=1, - boxcolor='black@0.5' - ) + vf_filters.append( + f"drawtext=text='Frame Number\\: {frame_number}':fontcolor=white:fontsize={font_size}:x={x_all}:y={y_number}:box=1:boxcolor=black@0.5" + ) # Frame type - base_text = base_text.filter('drawtext', - text=f"Frame Type: {frame_type}", - fontcolor='white', - fontsize=font_size, - x=x_all, - y=y_type, - box=1, - boxcolor='black@0.5' - ) + vf_filters.append( + f"drawtext=text='Frame Type\\: {frame_type}':fontcolor=white:fontsize={font_size}:x={x_all}:y={y_type}:box=1:boxcolor=black@0.5" + ) # HDR status if hdr_tonemap: - base_text = base_text.filter('drawtext', - text="Tonemapped HDR", - fontcolor='white', - fontsize=font_size, - x=x_all, - y=y_hdr, - box=1, - boxcolor='black@0.5' - ) - - # Use the filtered output with frame info - ff = base_text - - command = ( - ff - .output(image_path, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) + vf_filters.append( + f"drawtext=text='Tonemapped HDR':fontcolor=white:fontsize={font_size}:x={x_all}:y={y_hdr}:box=1:boxcolor=black@0.5" + ) + + # Build command + # Always ensure at least format filter is present for PNG compression to work + if not vf_filters: + vf_filters.append("format=rgb24") + vf_chain = ",".join(vf_filters) + + cmd = [ + "ffmpeg", + "-y", + "-loglevel", loglevel, + "-hide_banner", + "-ss", str(ss_time), + "-skip_frame", keyframe, + "-i", file, + "-vframes", "1", + "-vf", vf_chain, + "-compression_level", ffmpeg_compression, + "-pred", "mixed", + image_path + ] + + # Print the command for debugging + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + console.print(f"[cyan]FFmpeg command: {' '.join(cmd)}[/cyan]") + + # Run command + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE ) + stdout, stderr = await process.communicate() + returncode = process.returncode + + # Print stdout and stderr if in verbose mode + if loglevel == 'verbose': + if stdout: + console.print(f"[blue]FFmpeg stdout:[/blue]\n{stdout.decode('utf-8', errors='replace')}") + if stderr: + console.print(f"[yellow]FFmpeg stderr:[/yellow]\n{stderr.decode('utf-8', errors='replace')}") - returncode, stdout, stderr = await run_ffmpeg(command) if returncode == 0: return (index, image_path) else: @@ -548,8 +532,9 @@ async def _is_vob_good(n, loops, num_screens): image_paths.append(image) input_files.append(input_file) - if frame_overlay: - console.print("[yellow]Getting frame information for overlays...") + if meta.get('frame_overlay', False): + if meta['debug']: + console.print("[yellow]Getting frame information for overlays...") frame_info_tasks = [ get_frame_info(input_files[i], ss_times[i], meta) for i in range(num_screens + 1) @@ -565,10 +550,22 @@ async def _is_vob_good(n, loops, num_screens): if meta['debug']: console.print(f"[cyan]Collected frame information for {len(frame_info_results)} frames") + num_workers = min(num_screens + 1, task_limit) + + if meta['debug']: + console.print(f"Using {num_workers} worker(s) for {num_screens} image(s)") + + # Create semaphore to limit concurrent tasks + semaphore = asyncio.Semaphore(task_limit) + + async def capture_dvd_with_semaphore(args): + async with semaphore: + return await capture_dvd_screenshot(args) + for i in range(num_screens + 1): if not os.path.exists(image_paths[i]) or meta.get('retake', False): capture_tasks.append( - capture_dvd_screenshot( + capture_dvd_with_semaphore( (i, input_files[i], image_paths[i], ss_times[i], meta, width, height, w_sar, h_sar) ) ) @@ -601,71 +598,12 @@ async def _is_vob_good(n, loops, num_screens): if meta['debug']: console.print(f"[yellow]Removing smallest image: {smallest} ({smallest_size} bytes)[/yellow]") os.remove(smallest) - - optimized_results = [] - - # Filter out non-existent files first - valid_images = [image for image in capture_results if os.path.exists(image)] - - # Dynamically determine the number of processes - num_tasks = len(valid_images) - num_workers = min(num_tasks, task_limit) - - if optimize_images: - if num_workers == 0: - console.print("[red]No valid images found for optimization.[/red]") - return - if meta['debug']: - console.print("[yellow]Now optimizing images...[/yellow]") - - loop = asyncio.get_running_loop() - stop_event = asyncio.Event() - - def handle_sigint(sig, frame): - console.print("\n[red]CTRL+C detected. Cancelling optimization...[/red]") - executor.shutdown(wait=False) - stop_event.set() - for task in asyncio.all_tasks(loop): - task.cancel() - - signal.signal(signal.SIGINT, handle_sigint) - - try: - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: - # Start all tasks in parallel using worker_wrapper() - tasks = [asyncio.create_task(worker_wrapper(image, optimize_image_task, executor)) for image in valid_images] - - # Wait for all tasks to complete - optimized_results = await asyncio.gather(*tasks, return_exceptions=True) - except KeyboardInterrupt: - console.print("\n[red]CTRL+C detected. Cancelling tasks...[/red]") - executor.shutdown(wait=False) - await kill_all_child_processes() - console.print("[red]All tasks cancelled. Exiting.[/red]") - sys.exit(1) - finally: - if meta['debug']: - console.print("[yellow]Shutting down optimization workers...[/yellow]") - await asyncio.sleep(0.1) - await kill_all_child_processes() - executor.shutdown(wait=False) - gc.collect() - - optimized_results = [res for res in optimized_results if not isinstance(res, str) or not res.startswith("Error")] - - if meta['debug']: - console.print("Optimized results:", optimized_results) - if not retry_cap and meta['debug']: - console.print(f"[green]Successfully optimized {len(optimized_results)} images.") - - executor.shutdown(wait=True) # Ensure cleanup - else: - optimized_results = valid_images + capture_results.remove(smallest) valid_results = [] remaining_retakes = [] - for image in optimized_results: + for image in capture_results: if "Error" in image: console.print(f"[red]{image}") continue @@ -709,9 +647,6 @@ def handle_sigint(sig, frame): console.print(f"[red]Failed to capture screenshot for {image}. Retrying...[/red]") continue - if optimize_images: - optimize_image_task(screenshot_result) - retaken_size = os.path.getsize(screenshot_result) if retaken_size > 75000: console.print(f"[green]Successfully retaken screenshot for: {screenshot_result} ({retaken_size} bytes)[/green]") @@ -732,7 +667,17 @@ def handle_sigint(sig, frame): if not retry_cap and meta['debug']: console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") - await cleanup() + + multi_screens = int(config['DEFAULT'].get('multiScreens', 2)) + discs = meta.get('discs', []) + one_disc = True + if discs and len(discs) == 1: + one_disc = True + elif discs and len(discs) > 1: + one_disc = False + + if (not meta.get('tv_pack') and one_disc) or multi_screens == 0: + await cleanup() async def capture_dvd_screenshot(task): @@ -746,12 +691,14 @@ async def capture_dvd_screenshot(task): if video_duration and seek_time > video_duration: seek_time = max(0, video_duration - 1) - # Construct ffmpeg command - ff = ffmpeg.input(input_file, ss=seek_time) + # Build filter chain + vf_filters = [] if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + scaled_w = int(round(width * w_sar)) + scaled_h = int(round(height * h_sar)) + vf_filters.append(f"scale={scaled_w}:{scaled_h}") - if frame_overlay: + if meta.get('frame_overlay', False): # Get frame info from pre-collected data if available frame_info = meta.get('frame_info_map', {}).get(seek_time, {}) @@ -779,35 +726,50 @@ async def capture_dvd_screenshot(task): y_number = x_all y_type = y_number + line_spacing - # Use the filtered output with frame info - base_text = ff - # Frame number - base_text = base_text.filter('drawtext', - text=f"Frame Number: {frame_number}", - fontcolor='white', - fontsize=font_size, - x=x_all, - y=y_number, - box=1, - boxcolor='black@0.5' - ) + vf_filters.append( + f"drawtext=text='Frame Number\\: {frame_number}':fontcolor=white:fontsize={font_size}:x={x_all}:y={y_number}:box=1:boxcolor=black@0.5" + ) # Frame type - base_text = base_text.filter('drawtext', - text=f"Frame Type: {frame_type}", - fontcolor='white', - fontsize=font_size, - x=x_all, - y=y_type, - box=1, - boxcolor='black@0.5' - ) - - # Use the filtered output with frame info - ff = base_text - - returncode, _, stderr = await run_ffmpeg(ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel, '-accurate_seek')) + vf_filters.append( + f"drawtext=text='Frame Type\\: {frame_type}':fontcolor=white:fontsize={font_size}:x={x_all}:y={y_type}:box=1:boxcolor=black@0.5" + ) + + # Build command + # Always ensure at least format filter is present for PNG compression to work + if not vf_filters: + vf_filters.append("format=rgb24") + vf_chain = ",".join(vf_filters) + + cmd = [ + "ffmpeg", + "-y", + "-loglevel", loglevel, + "-hide_banner", + "-ss", str(seek_time), + "-accurate_seek", + "-i", input_file, + "-vframes", "1", + "-vf", vf_chain, + "-compression_level", ffmpeg_compression, + "-pred", "mixed", + image + ] + + # Print the command for debugging + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + console.print(f"[cyan]FFmpeg command: {' '.join(cmd)}[/cyan]", emoji=False) + + # Run command + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + stdout, stderr = await process.communicate() + returncode = process.returncode + if returncode != 0: console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s:[/red]\n{stderr.decode()}") return (index, None) @@ -824,6 +786,7 @@ async def capture_dvd_screenshot(task): async def screenshots(path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): + img_host = await get_image_host(meta) screens = meta['screens'] if meta['debug']: start_time = time.time() @@ -919,12 +882,6 @@ def safe_float(value, default=0.0, field_name=""): sanitized_filename = await sanitize_filename(filename) - if tone_map and "HDR" in meta['hdr']: - hdr_tonemap = True - meta['tonemapped'] = True - else: - hdr_tonemap = False - existing_images_count = 0 existing_image_paths = [] for i in range(num_screens): @@ -942,8 +899,9 @@ def safe_float(value, default=0.0, field_name=""): if not ss_times: ss_times = await valid_ss_time([], num_capture, length, frame_rate, meta, retake=force_screenshots) - if frame_overlay: - console.print("[yellow]Getting frame information for overlays...") + if meta.get('frame_overlay', False): + if meta['debug']: + console.print("[yellow]Getting frame information for overlays...") frame_info_tasks = [ get_frame_info(path, ss_times[i], meta) for i in range(num_capture) @@ -963,16 +921,60 @@ def safe_float(value, default=0.0, field_name=""): num_tasks = num_capture num_workers = min(num_tasks, task_limit) + meta['libplacebo'] = False + if tone_map and ("HDR" in meta['hdr'] or "DV" in meta['hdr'] or "HLG" in meta['hdr']): + if use_libplacebo and not meta.get('frame_overlay', False): + if not ffmpeg_is_good: + test_time = ss_times[0] if ss_times else 0 + test_image = image_path if isinstance(image_path, str) else ( + image_path[0] if isinstance(image_path, list) and image_path else None + ) + libplacebo, compatible = await check_libplacebo_compatibility( + w_sar, h_sar, width, height, path, test_time, test_image, loglevel, meta + ) + if compatible: + hdr_tonemap = True + meta['tonemapped'] = True + if libplacebo: + hdr_tonemap = True + meta['tonemapped'] = True + meta['libplacebo'] = True + if not compatible and not libplacebo: + hdr_tonemap = False + console.print("[yellow]FFMPEG failed tonemap checking.[/yellow]") + await asyncio.sleep(2) + if not libplacebo and "HDR" not in meta.get('hdr'): + hdr_tonemap = False + else: + hdr_tonemap = True + meta['tonemapped'] = True + meta['libplacebo'] = True + else: + if "HDR" not in meta.get('hdr'): + hdr_tonemap = False + else: + hdr_tonemap = True + meta['tonemapped'] = True + else: + hdr_tonemap = False + if meta['debug']: console.print(f"Using {num_workers} worker(s) for {num_capture} image(s)") + # Create semaphore to limit concurrent tasks + semaphore = asyncio.Semaphore(num_workers) + + async def capture_with_semaphore(args): + async with semaphore: + return await capture_screenshot(args) + capture_tasks = [] for i in range(num_capture): image_index = existing_images_count + i image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{image_index}.png") if not os.path.exists(image_path) or meta.get('retake', False): capture_tasks.append( - capture_screenshot( # Direct async function call + capture_with_semaphore( (i, path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap, meta) ) ) @@ -1012,59 +1014,9 @@ def safe_float(value, default=0.0, field_name=""): if not force_screenshots and meta['debug']: console.print(f"[green]Successfully captured {len(capture_results)} screenshots.") - optimized_results = [] - valid_images = [image for image in capture_results if os.path.exists(image)] - num_workers = min(task_limit, len(valid_images)) - if optimize_images: - if meta['debug']: - console.print("[yellow]Now optimizing images...[/yellow]") - console.print(f"Using {num_workers} worker(s) for {len(valid_images)} image(s)") - - executor = concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) - try: - with executor: - # Start all tasks in parallel using worker_wrapper() - tasks = [asyncio.create_task(worker_wrapper(image, optimize_image_task, executor)) for image in valid_images] - - # Wait for all tasks to complete - optimized_results = await asyncio.gather(*tasks, return_exceptions=True) - except KeyboardInterrupt: - console.print("\n[red]CTRL+C detected. Cancelling optimization tasks...[/red]") - await asyncio.sleep(0.1) - executor.shutdown(wait=True, cancel_futures=True) - await kill_all_child_processes() - console.print("[red]All tasks cancelled. Exiting.[/red]") - gc.collect() - reset_terminal() - sys.exit(1) - except Exception as e: - console.print(f"[red]Error during image optimization: {e}[/red]") - await asyncio.sleep(0.1) - executor.shutdown(wait=True, cancel_futures=True) - await kill_all_child_processes() - gc.collect() - reset_terminal() - sys.exit(1) - finally: - if meta['debug']: - console.print("[yellow]Shutting down optimization workers...[/yellow]") - await asyncio.sleep(0.1) - executor.shutdown(wait=True, cancel_futures=True) - for task in tasks: - task.cancel() - await kill_all_child_processes() - gc.collect() - - # Filter out failed results - optimized_results = [res for res in optimized_results if isinstance(res, str) and "Error" not in res] - if not force_screenshots and meta['debug']: - console.print(f"[green]Successfully optimized {len(optimized_results)} images.[/green]") - else: - optimized_results = valid_images - valid_results = [] remaining_retakes = [] - for image_path in optimized_results: + for image_path in capture_results: if "Error" in image_path: console.print(f"[red]{image_path}") continue @@ -1077,18 +1029,27 @@ def safe_float(value, default=0.0, field_name=""): if image_size <= 75000: console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") retake = True - elif "imgbb" in img_host and image_size <= 31000000: - if meta['debug']: - console.print(f"[green]Image {image_path} meets size requirements for imgbb.[/green]") - elif any(host in ["imgbox", "pixhost"] for host in img_host) and image_size <= 10000000: - if meta['debug']: - console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") - elif any(host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage"] for host in img_host): - if meta['debug']: - console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") else: - console.print(f"[red]Image {image_path} with size {image_size} bytes: does not meet size requirements for {img_host}, retaking.") - retake = True + if "imgbb" in img_host: + if image_size <= 31000000: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for imgbb.[/green]") + else: + console.print(f"[red]Image {image_path} with size {image_size} bytes: does not meet size requirements for imgbb, retaking.") + retake = True + elif img_host in ["imgbox", "pixhost"]: + if 75000 < image_size <= 10000000: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") + else: + console.print(f"[red]Image {image_path} with size {image_size} bytes: does not meet size requirements for {img_host}, retaking.") + retake = True + elif img_host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage", "seedpool_cdn"]: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") + else: + console.print(f"[red]Unknown image host or image doesn't meet requirements for host: {img_host}, retaking.") + retake = True if retake: retry_attempts = 5 @@ -1118,20 +1079,21 @@ def safe_float(value, default=0.0, field_name=""): if not screenshot_path or not os.path.exists(screenshot_path): continue - if optimize_images: - optimize_image_task(screenshot_path) new_size = os.path.getsize(screenshot_path) valid_image = False - if "imgbb" in img_host and 75000 < new_size <= 31000000: - console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") - valid_image = True - elif 75000 < new_size <= 10000000 and any(host in ["imgbox", "pixhost"] for host in img_host): - console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") - valid_image = True - elif new_size > 75000 and any(host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage"] for host in img_host): - console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") - valid_image = True + if "imgbb" in img_host: + if 75000 < new_size <= 31000000: + console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") + valid_image = True + elif img_host in ["imgbox", "pixhost"]: + if 75000 < new_size <= 10000000: + console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") + valid_image = True + elif img_host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage", "seedpool_cdn"]: + if new_size > 75000: + console.print(f"[green]Successfully retaken screenshot for: {screenshot_response} ({new_size} bytes)[/green]") + valid_image = True if valid_image: valid_results.append(screenshot_response) @@ -1161,17 +1123,18 @@ def safe_float(value, default=0.0, field_name=""): if not screenshot_path or not os.path.exists(screenshot_path): continue - if optimize_images: - optimize_image_task(screenshot_path) new_size = os.path.getsize(screenshot_path) valid_image = False - if "imgbb" in img_host and 75000 < new_size <= 31000000: - valid_image = True - elif 75000 < new_size <= 10000000 and any(host in ["imgbox", "pixhost"] for host in img_host): - valid_image = True - elif new_size > 75000 and any(host in ["ptpimg", "lensdump", "ptscreens", "onlyimage"] for host in img_host): - valid_image = True + if "imgbb" in img_host: + if 75000 < new_size <= 31000000: + valid_image = True + elif img_host in ["imgbox", "pixhost"]: + if 75000 < new_size <= 10000000: + valid_image = True + elif img_host in ["ptpimg", "lensdump", "ptscreens", "onlyimage", "dalexni", "zipline", "passtheimage", "seedpool_cdn"]: + if new_size > 75000: + valid_image = True if valid_image: valid_results.append(screenshot_response) @@ -1195,7 +1158,17 @@ def safe_float(value, default=0.0, field_name=""): if meta['debug']: finish_time = time.time() console.print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") - await cleanup() + + multi_screens = int(config['DEFAULT'].get('multiScreens', 2)) + discs = meta.get('discs', []) + one_disc = True + if discs and len(discs) == 1: + one_disc = True + elif discs and len(discs) > 1: + one_disc = False + + if (not meta.get('tv_pack') and one_disc) or multi_screens == 0: + await cleanup() async def capture_screenshot(args): @@ -1238,30 +1211,170 @@ def set_ffmpeg_threads(): if loglevel == 'verbose' or (meta and meta.get('debug', False)): console.print(f"[cyan]Processing file: {path}[/cyan]") + if not meta.get('frame_overlay', False): + # Warm-up (only for first screenshot index or if not warmed) + if use_libplacebo: + warm_up = config['DEFAULT'].get('ffmpeg_warmup', False) + if warm_up: + meta['_libplacebo_warmed'] = False + else: + meta['_libplacebo_warmed'] = True + if "_libplacebo_warmed" not in meta: + meta['_libplacebo_warmed'] = False + if hdr_tonemap and meta.get('libplacebo') and not meta.get('_libplacebo_warmed'): + await libplacebo_warmup(path, meta, loglevel) + + threads_value = set_ffmpeg_threads() + threads_val = threads_value[1] + vf_filters = [] + + if w_sar != 1 or h_sar != 1: + scaled_w = int(round(width * w_sar)) + scaled_h = int(round(height * h_sar)) + vf_filters.append(f"scale={scaled_w}:{scaled_h}") + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + console.print(f"[cyan]Applied PAR scale -> {scaled_w}x{scaled_h}[/cyan]") + + if hdr_tonemap: + if meta.get('libplacebo', False): + vf_filters.append( + "libplacebo=tonemapping=hable:colorspace=bt709:" + "color_primaries=bt709:color_trc=bt709:range=tv" + ) + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + console.print("[cyan]Using libplacebo tonemapping[/cyan]") + else: + vf_filters.extend([ + "zscale=transfer=linear", + f"tonemap=tonemap={algorithm}:desat={desat}", + "zscale=transfer=bt709" + ]) + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + console.print(f"[cyan]Using zscale tonemap chain (algo={algorithm}, desat={desat})[/cyan]") + + vf_filters.append("format=rgb24") + vf_chain = ",".join(vf_filters) if vf_filters else "format=rgb24" + + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + console.print(f"[cyan]Final -vf chain: {vf_chain}[/cyan]") + + threads_value = ['-threads', '1'] + threads_val = threads_value[1] + + def build_cmd(use_libplacebo=True): + cmd_local = [ + "ffmpeg", + "-y", + "-ss", str(ss_time), + "-i", path, + "-map", "0:v:0", + "-an", + "-sn", + ] + if use_libplacebo and meta.get('libplacebo', False): + cmd_local += ["-init_hw_device", "vulkan"] + cmd_local += [ + "-vframes", "1", + "-vf", vf_chain, + "-compression_level", ffmpeg_compression, + "-pred", "mixed", + "-loglevel", loglevel, + ] + if ffmpeg_limit: + cmd_local += ["-threads", threads_val] + cmd_local.append(image_path) + return cmd_local + + cmd = build_cmd(use_libplacebo=True) + + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + # Disable emoji translation so 0:v:0 stays literal + console.print(f"[cyan]FFmpeg command: {' '.join(cmd)}[/cyan]", emoji=False) + + # --- Execute with retry/fallback if libplacebo fails --- + async def run_cmd(run_cmd_list, timeout_sec): + proc = await asyncio.create_subprocess_exec( + *run_cmd_list, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + try: + stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=timeout_sec) + except asyncio.TimeoutError: + proc.kill() + try: + await proc.wait() + except Exception: + pass + return -1, b"", b"Timeout" + return proc.returncode, stdout, stderr + + returncode, stdout, stderr = await run_cmd(cmd, 140) # a bit longer for first pass + if returncode != 0 and hdr_tonemap and meta.get('libplacebo'): + # Retry once (shader compile might have delayed first invocation) + if loglevel == 'verbose' or meta.get('debug', False): + console.print("[yellow]First libplacebo attempt failed; retrying once...[/yellow]") + await asyncio.sleep(1.0) + returncode, stdout, stderr = await run_cmd(cmd, 160) + + if returncode != 0 and hdr_tonemap and meta.get('libplacebo'): + # Fallback: switch to zscale tonemap chain + if loglevel == 'verbose' or meta.get('debug', False): + console.print("[red]libplacebo failed twice; falling back to zscale tonemap[/red]") + meta['libplacebo'] = False + # Rebuild chain with zscale + z_vf_filters = [] + if w_sar != 1 or h_sar != 1: + z_vf_filters.append(f"scale={scaled_w}:{scaled_h}") + z_vf_filters.extend([ + "zscale=transfer=linear", + f"tonemap=tonemap={algorithm}:desat={desat}", + "zscale=transfer=bt709", + "format=rgb24" + ]) + vf_chain = ",".join(z_vf_filters) + fallback_cmd = build_cmd(use_libplacebo=False) + # Replace the -vf argument with new chain + for i, tok in enumerate(fallback_cmd): + if tok == "-vf": + fallback_cmd[i+1] = vf_chain + break + if loglevel == 'verbose' or meta.get('debug', False): + console.print(f"[cyan]Fallback FFmpeg command: {' '.join(fallback_cmd)}[/cyan]", emoji=False) + returncode, stdout, stderr = await run_cmd(fallback_cmd, 140) + cmd = fallback_cmd # for logging below + + if returncode == 0 and os.path.exists(image_path): + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + console.print(f"[green]Screenshot captured successfully: {image_path}[/green]") + return (index, image_path) + else: + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + err_txt = (stderr or b"").decode(errors='replace').strip() + console.print(f"[red]FFmpeg process failed (final): {err_txt}[/red]") + return (index, None) + # Proceed with screenshot capture threads_value = set_ffmpeg_threads() threads_val = threads_value[1] - if ffmpeg_limit: - ff = ( - ffmpeg - .input(path, ss=ss_time, threads=threads_val) - ) - else: - ff = ffmpeg.input(path, ss=ss_time) - ff = ff['v'] + + # Build filter chain + vf_filters = [] + if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + scaled_w = int(round(width * w_sar)) + scaled_h = int(round(height * h_sar)) + vf_filters.append(f"scale={scaled_w}:{scaled_h}") if hdr_tonemap: - ff = ( - ff - .filter('zscale', transfer='linear') - .filter('tonemap', tonemap=algorithm, desat=desat) - .filter('zscale', transfer='bt709') - .filter('format', 'rgb24') - ) - - if frame_overlay: + vf_filters.extend([ + "zscale=transfer=linear", + f"tonemap=tonemap={algorithm}:desat={desat}", + "zscale=transfer=bt709", + "format=rgb24" + ]) + + if meta.get('frame_overlay', False): # Get frame info from pre-collected data if available frame_info = meta.get('frame_info_map', {}).get(ss_time, {}) @@ -1290,68 +1403,59 @@ def set_ffmpeg_threads(): y_type = y_number + line_spacing y_hdr = y_type + line_spacing - # Use the filtered output with frame info - base_text = ff - # Frame number - base_text = base_text.filter('drawtext', - text=f"Frame Number: {frame_number}", - fontcolor='white', - fontsize=font_size, - x=x_all, - y=y_number, - box=1, - boxcolor='black@0.5' - ) + vf_filters.append( + f"drawtext=text='Frame Number\\: {frame_number}':fontcolor=white:fontsize={font_size}:x={x_all}:y={y_number}:box=1:boxcolor=black@0.5" + ) # Frame type - base_text = base_text.filter('drawtext', - text=f"Frame Type: {frame_type}", - fontcolor='white', - fontsize=font_size, - x=x_all, - y=y_type, - box=1, - boxcolor='black@0.5' - ) + vf_filters.append( + f"drawtext=text='Frame Type\\: {frame_type}':fontcolor=white:fontsize={font_size}:x={x_all}:y={y_type}:box=1:boxcolor=black@0.5" + ) # HDR status if hdr_tonemap: - base_text = base_text.filter('drawtext', - text="Tonemapped HDR", - fontcolor='white', - fontsize=font_size, - x=x_all, - y=y_hdr, - box=1, - boxcolor='black@0.5' - ) - - # Use the filtered output with frame info - ff = base_text + vf_filters.append( + f"drawtext=text='Tonemapped HDR':fontcolor=white:fontsize={font_size}:x={x_all}:y={y_hdr}:box=1:boxcolor=black@0.5" + ) + + # Build command + # Always ensure at least format filter is present for PNG compression to work + vf_filters.append("format=rgb24") + vf_chain = ",".join(vf_filters) + + cmd = [ + "ffmpeg", + "-y", + "-loglevel", loglevel, + "-hide_banner", + "-ss", str(ss_time), + "-i", path, + "-vframes", "1", + "-vf", vf_chain, + "-compression_level", ffmpeg_compression, + "-pred", "mixed", + image_path + ] if ffmpeg_limit: - command = ( - ff - .output(image_path, vframes=1, pix_fmt="rgb24", **{'threads': threads_val}) - .overwrite_output() - .global_args('-loglevel', loglevel) - ) - else: - command = ( - ff - .output(image_path, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - ) + # Insert threads before compression options + cmd.insert(-3, "-threads") + cmd.insert(-3, threads_val) # Print the command for debugging if loglevel == 'verbose' or (meta and meta.get('debug', False)): - cmd = command.compile() console.print(f"[cyan]FFmpeg command: {' '.join(cmd)}[/cyan]") try: - returncode, stdout, stderr = await run_ffmpeg(command) + # Run command + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + stdout, stderr = await process.communicate() + returncode = process.returncode # Print stdout and stderr if in verbose mode if loglevel == 'verbose': @@ -1437,73 +1541,35 @@ async def valid_ss_time(ss_times, num_screens, length, frame_rate, meta, retake= return result_times -async def worker_wrapper(image, optimize_image_task, executor): - """ Async wrapper to run optimize_image_task in a separate process """ - loop = asyncio.get_running_loop() - try: - return await loop.run_in_executor(executor, optimize_image_task, image) - except KeyboardInterrupt: - console.print(f"[red][{time.strftime('%X')}] Worker interrupted while processing {image}[/red]") - gc.collect() - return None - except Exception as e: - console.print(f"[red][{time.strftime('%X')}] Worker error on {image}: {e}[/red]") - gc.collect() - return f"Error: {e}" - finally: - gc.collect() - - async def kill_all_child_processes(): - """Ensures all child processes (e.g., ProcessPoolExecutor workers) are terminated.""" - current_process = psutil.Process() - children = current_process.children(recursive=True) # Get child processes once - - for child in children: - console.print(f"[red]Killing stuck worker process: {child.pid}[/red]") - child.terminate() - - gone, still_alive = psutil.wait_procs(children, timeout=3) # Wait for termination - for process in still_alive: - console.print(f"[red]Force killing stubborn process: {process.pid}[/red]") - process.kill() - - -def optimize_image_task(image): - """Optimizes an image using oxipng in a separate process.""" + """Ensures all child processes are terminated.""" try: - if optimize_images: - os.environ['RAYON_NUM_THREADS'] = threads - if not os.path.exists(image): - error_msg = f"ERROR: File not found - {image}" - console.print(f"[red]{error_msg}[/red]") - return error_msg - - pyver = platform.python_version_tuple() - if int(pyver[0]) == 3 and int(pyver[1]) >= 7: - level = 6 if os.path.getsize(image) >= 16000000 else 2 - - # Run optimization directly in the process - oxipng.optimize(image, level=level) - - return image - else: - return image - + current_process = psutil.Process() + children = current_process.children(recursive=True) # Get child processes once + + for child in children: + console.print(f"[red]Killing stuck worker process: {child.pid}[/red]") + child.terminate() + + gone, still_alive = psutil.wait_procs(children, timeout=3) # Wait for termination + for process in still_alive: + console.print(f"[red]Force killing stubborn process: {process.pid}[/red]") + process.kill() + except (psutil.AccessDenied, PermissionError) as e: + # Handle restricted environments like Termux/Android where /proc/stat is inaccessible + console.print(f"[yellow]Warning: Unable to access process information (restricted environment): {e}[/yellow]") except Exception as e: - error_message = f"ERROR optimizing {image}: {e}" - console.print(f"[red]{error_message}[/red]") - console.print(traceback.format_exc()) # Print detailed traceback - return None + console.print(f"[yellow]Warning: Error during child process cleanup: {e}[/yellow]") async def get_frame_info(path, ss_time, meta): """Get frame information (type, exact timestamp) for a specific frame""" try: info_ff = ffmpeg.input(path, ss=ss_time) + # Use video stream selector and apply showinfo filter + filtered = info_ff['v:0'].filter('showinfo') info_command = ( - info_ff - .filter('showinfo') + filtered .output('-', format='null', vframes=1) .global_args('-loglevel', 'info') ) @@ -1511,7 +1577,7 @@ async def get_frame_info(path, ss_time, meta): # Print the actual FFmpeg command for debugging cmd = info_command.compile() if meta.get('debug', False): - console.print(f"[cyan]FFmpeg showinfo command: {' '.join(cmd)}[/cyan]") + console.print(f"[cyan]FFmpeg showinfo command: {' '.join(cmd)}[/cyan]", emoji=False) returncode, _, stderr = await run_ffmpeg(info_command) assert returncode is not None @@ -1553,3 +1619,163 @@ async def get_frame_info(path, ss_time, meta): 'frame_type': 'Unknown', 'frame_number': int(ss_time * meta.get('frame_rate', 24.0)) } + + +async def check_libplacebo_compatibility(w_sar, h_sar, width, height, path, ss_time, image_path, loglevel, meta): + test_image_path = image_path.replace('.png', '_test.png') + + async def run_check(w_sar, h_sar, width, height, path, ss_time, image_path, loglevel, meta, try_libplacebo=False, test_image_path=None): + filter_parts = [] + input_label = "[0:v]" + output_map = "0:v" # Default output mapping + + if w_sar != 1 or h_sar != 1: + filter_parts.append(f"{input_label}scale={int(round(width * w_sar))}:{int(round(height * h_sar))}[scaled]") + input_label = "[scaled]" + output_map = "[scaled]" + + # Add libplacebo filter with output label + if try_libplacebo: + filter_parts.append(f"{input_label}libplacebo=tonemapping=auto:colorspace=bt709:color_primaries=bt709:color_trc=bt709:range=tv[out]") + output_map = "[out]" + cmd = [ + "ffmpeg", + "-init_hw_device", "vulkan", + "-ss", str(ss_time), + "-i", path, + "-filter_complex", ",".join(filter_parts), + "-map", output_map, + "-vframes", "1", + "-pix_fmt", "rgb24", + "-y", + "-loglevel", "quiet", + test_image_path + ] + else: + # Use -vf for zscale/tonemap chain, no output label or -map needed + vf_chain = f"zscale=transfer=linear,tonemap=tonemap={algorithm}:desat={desat},zscale=transfer=bt709,format=rgb24" + cmd = [ + "ffmpeg", + "-ss", str(ss_time), + "-i", path, + "-vf", vf_chain, + "-vframes", "1", + "-pix_fmt", "rgb24", + "-y", + "-loglevel", "quiet", + test_image_path + ] + + if loglevel == 'verbose' or (meta and meta.get('debug', False)): + console.print(f"[cyan]libplacebo compatibility test command: {' '.join(cmd)}[/cyan]") + + # Add timeout to prevent hanging + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + + try: + stdout, stderr = await asyncio.wait_for( + process.communicate(), + timeout=30.0 # 30 second timeout for compatibility test + ) + return process.returncode + except asyncio.TimeoutError: + console.print("[red]libplacebo compatibility test timed out after 30 seconds[/red]") + process.kill() + try: + await process.wait() + except Exception: + pass + return False + + if not meta['is_disc']: + is_libplacebo_compatible = await run_check(w_sar, h_sar, width, height, path, ss_time, image_path, loglevel, meta, try_libplacebo=True, test_image_path=test_image_path) + if is_libplacebo_compatible == 0: + if meta['debug']: + console.print("[green]libplacebo compatibility test succeeded[/green]") + try: + if os.path.exists(test_image_path): + os.remove(test_image_path) + except Exception: + pass + return True, True + else: + can_hdr = await run_check(w_sar, h_sar, width, height, path, ss_time, image_path, loglevel, meta, try_libplacebo=False, test_image_path=test_image_path) + if can_hdr == 0: + if meta['debug']: + console.print("[yellow]libplacebo compatibility test failed, but zscale HDR tonemapping is compatible[/yellow]") + # Clean up the test image regardless of success/failure + try: + if os.path.exists(test_image_path): + os.remove(test_image_path) + except Exception: + pass + return False, True + return False, False + + +async def libplacebo_warmup(path, meta, loglevel): + if not meta.get('libplacebo') or meta.get('_libplacebo_warmed'): + return + if not os.path.exists(path): + return + # Use a very small seek (0.1s) to avoid issues at pts 0 + cmd = [ + "ffmpeg", + "-ss", "0.1", + "-i", path, + "-map", "0:v:0", + "-an", "-sn", + "-init_hw_device", "vulkan", + "-vf", "libplacebo=tonemapping=hable:colorspace=bt709:color_primaries=bt709:color_trc=bt709:range=tv,format=rgb24", + "-vframes", "1", + "-f", "null", + "-", + "-loglevel", "error" + ] + if loglevel == 'verbose' or meta.get('debug', False): + console.print("[cyan]Running libplacebo warm-up...[/cyan]", emoji=False) + try: + proc = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + try: + await asyncio.wait_for(proc.communicate(), timeout=40) + except asyncio.TimeoutError: + proc.kill() + try: + await proc.wait() + except Exception: + pass + if loglevel == 'verbose' or meta.get('debug', False): + console.print("[yellow]libplacebo warm-up timed out (continuing anyway)[/yellow]") + meta['_libplacebo_warmed'] = True + except Exception as e: + if loglevel == 'verbose' or meta.get('debug', False): + console.print(f"[yellow]libplacebo warm-up failed: {e} (continuing)[/yellow]") + + +async def get_image_host(meta): + if meta.get('imghost') is not None: + host = meta['imghost'] + + if isinstance(host, str): + return host.lower().strip() + + elif isinstance(host, list): + for item in host: + if item and isinstance(item, str): + return item.lower().strip() + else: + img_host_config = [ + config["DEFAULT"][key].lower() + for key in sorted(config["DEFAULT"].keys()) + if key.startswith("img_host_1") and not key.endswith("0") + ] + if img_host_config: + return str(img_host_config[0]) diff --git a/src/tmdb.py b/src/tmdb.py index cf8e3e528..5fcd40317 100644 --- a/src/tmdb.py +++ b/src/tmdb.py @@ -1,21 +1,35 @@ -from src.console import console -from src.imdb import get_imdb_aka_api, get_imdb_info_api -from src.args import Args -from data.config import config -import re -from guessit import guessit -import cli_ui +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles import anitopy -from datetime import datetime -from difflib import SequenceMatcher -import requests -import json -import httpx import asyncio +import cli_ui +import httpx +import json import os +import re +import requests +import sys + +from datetime import datetime +from difflib import SequenceMatcher +from guessit import guessit + +from data.config import config +from src.args import Args +from src.cleanup import cleanup, reset_terminal +from src.console import console +from src.imdb import get_imdb_info_api TMDB_API_KEY = config['DEFAULT'].get('tmdb_api', False) TMDB_BASE_URL = "https://api.themoviedb.org/3" +parser = Args(config=config) + +# Module-level dict to store async locks for cache keys to prevent race conditions +_cache_locks = {} + + +async def normalize_title(title): + return title.lower().replace('&', 'and').replace(' ', ' ').strip() async def get_tmdb_from_imdb(imdb_id, tvdb_id=None, search_year=None, filename=None, debug=False, mode="discord", category_preference=None, imdb_info=None): @@ -29,6 +43,7 @@ async def get_tmdb_from_imdb(imdb_id, tvdb_id=None, search_year=None, filename=N imdb_id = f"tt{int(imdb_id):07d}" elif isinstance(imdb_id, int): imdb_id = f"tt{imdb_id:07d}" + filename_search = False async def _tmdb_find_by_external_source(external_id, source): """Helper function to find a movie or TV show on TMDb by external ID.""" @@ -46,15 +61,7 @@ async def _tmdb_find_by_external_source(external_id, source): return {} - # Check TVDb for an ID first if present - if tvdb_id: - info_tvdb = await _tmdb_find_by_external_source(str(tvdb_id), "tvdb_id") - if debug: - console.print("TVDB INFO", info_tvdb) - if info_tvdb.get("tv_results"): - return "TV", info_tvdb['tv_results'][0]['id'], info_tvdb['tv_results'][0].get('original_language') - - # Use IMDb ID if no TVDb ID is provided + # Run a search by IMDb ID info = await _tmdb_find_by_external_source(imdb_id, "imdb_id") # Check if both movie and TV results exist @@ -66,24 +73,35 @@ async def _tmdb_find_by_external_source(external_id, source): if category_preference == "MOVIE" and has_movie_results: if debug: console.print("[green]Found both movie and TV results, using movie based on preference") - return "MOVIE", info['movie_results'][0]['id'], info['movie_results'][0].get('original_language') + return "MOVIE", info['movie_results'][0]['id'], info['movie_results'][0].get('original_language'), filename_search elif category_preference == "TV" and has_tv_results: if debug: console.print("[green]Found both movie and TV results, using TV based on preference") - return "TV", info['tv_results'][0]['id'], info['tv_results'][0].get('original_language') + return "TV", info['tv_results'][0]['id'], info['tv_results'][0].get('original_language'), filename_search # If no preference or preference doesn't match available results, proceed with normal logic if has_movie_results: if debug: console.print("Movie INFO", info) - return "MOVIE", info['movie_results'][0]['id'], info['movie_results'][0].get('original_language') + return "MOVIE", info['movie_results'][0]['id'], info['movie_results'][0].get('original_language'), filename_search elif has_tv_results: if debug: console.print("TV INFO", info) - return "TV", info['tv_results'][0]['id'], info['tv_results'][0].get('original_language') + return "TV", info['tv_results'][0]['id'], info['tv_results'][0].get('original_language'), filename_search - console.print("[yellow]TMDb was unable to find anything with that IMDb ID, checking TVDb...") + if debug: + console.print("[yellow]TMDb was unable to find anything with that IMDb ID, checking TVDb...") + + # Check TVDb for an ID if TVDb and still no results + if tvdb_id: + info_tvdb = await _tmdb_find_by_external_source(str(tvdb_id), "tvdb_id") + if debug: + console.print("TVDB INFO", info_tvdb) + if info_tvdb.get("tv_results"): + return "TV", info_tvdb['tv_results'][0]['id'], info_tvdb['tv_results'][0].get('original_language'), filename_search + + filename_search = True # If both TMDb and TVDb fail, fetch IMDb info and attempt a title search imdb_id = imdb_id.replace("tt", "") @@ -131,229 +149,555 @@ async def _tmdb_find_by_external_source(external_id, source): if tmdb_id in ('None', '', None, 0, '0') and mode == "cli": console.print('[yellow]Unable to find a matching TMDb entry[/yellow]') tmdb_id = console.input("Please enter TMDb ID (format: tv/12345 or movie/12345): ") - parser = Args(config=config) category, tmdb_id = parser.parse_tmdb_id(id=tmdb_id, category=category) - return category, tmdb_id, original_language + return category, tmdb_id, original_language, filename_search -async def get_tmdb_id(filename, search_year, category, untouched_filename="", attempted=0, debug=False, secondary_title=None, path=None, final_attempt=None): +async def get_tmdb_id(filename, search_year, category, untouched_filename="", attempted=0, debug=False, secondary_title=None, path=None, final_attempt=None, new_category=None, unattended=False): search_results = {"results": []} - secondary_results = {"results": []} + original_category = category + if new_category: + category = new_category + else: + category = original_category if final_attempt is None: final_attempt = False if attempted is None: attempted = 0 - - async with httpx.AsyncClient() as client: - try: - # Primary search attempt with year - if category == "MOVIE": - if debug: - console.print(f"[green]Searching TMDb for movie:[/] [cyan]{filename}[/cyan] (Year: {search_year})") - - params = { - "api_key": TMDB_API_KEY, - "query": filename, - "language": "en-US", - "include_adult": "true" - } - - if search_year: - params["year"] = search_year - - response = await client.get(f"{TMDB_BASE_URL}/search/movie", params=params) - try: - response.raise_for_status() - search_results = response.json() - except Exception: - console.print(f"[bold red]Failure with primary movie search: {response.status_code}[/bold red]") - - elif category == "TV": - if debug: - console.print(f"[green]Searching TMDb for TV show:[/] [cyan]{filename}[/cyan] (Year: {search_year})") - - params = { - "api_key": TMDB_API_KEY, - "query": filename, - "language": "en-US", - "include_adult": "true" - } - - if search_year: - params["first_air_date_year"] = search_year - - response = await client.get(f"{TMDB_BASE_URL}/search/tv", params=params) - try: - response.raise_for_status() - search_results = response.json() - except Exception: - console.print(f"[bold red]Failed with primary TV search: {response.status_code}[/bold red]") - - if debug: - console.print(f"[yellow]Search results (primary): {json.dumps(search_results.get('results', [])[:2], indent=2)}[/yellow]") - - # Check if results were found - if search_results.get('results'): - tmdb_id = search_results['results'][0]['id'] - return tmdb_id, category - - # If no results and we have a secondary title, try searching with that - if not search_results.get('results') and secondary_title and attempted < 3: - console.print(f"[yellow]No results found for primary title. Trying secondary title: {secondary_title}[/yellow]") - secondary_meta = await get_tmdb_id( - secondary_title, - search_year, - category, - untouched_filename, - attempted + 1, - debug=debug, - secondary_title=secondary_title - ) - - if secondary_meta.get('tmdb_id', 0) != 0: - tmdb_id = secondary_meta['tmdb_id'] - return tmdb_id, category - - except Exception as e: - console.print(f"[bold red]TMDb search error:[/bold red] {e}") - search_results = {"results": []} # Reset search_results on exception - - # Secondary attempt: Try searching without the year - console.print("[yellow]Retrying without year...[/yellow]") - try: - if category == "MOVIE": - if debug: - console.print(f"[green]Searching TMDb for movie:[/] [cyan]{filename}[/cyan] (Without year)") - - params = { - "api_key": TMDB_API_KEY, - "query": filename, - "language": "en-US", - "include_adult": "true" - } - - response = await client.get(f"{TMDB_BASE_URL}/search/movie", params=params) - try: - response.raise_for_status() - search_results = response.json() - except Exception: - console.print(f"[bold red]Failed with secondary movie search: {response.status_code}[/bold red]") - - elif category == "TV": - if debug: - console.print(f"[green]Searching TMDb for TV show:[/] [cyan]{filename}[/cyan] (Without year)") - - params = { - "api_key": TMDB_API_KEY, - "query": filename, - "language": "en-US", - "include_adult": "true" - } - - response = await client.get(f"{TMDB_BASE_URL}/search/tv", params=params) - try: - response.raise_for_status() - search_results = response.json() - except Exception: - console.print(f"[bold red]Failed secondary TV search: {response.status_code}[/bold red]") - - if debug: - console.print(f"[yellow]Search results (secondary): {json.dumps(search_results.get('results', [])[:2], indent=2)}[/yellow]") - - # Check if results were found - if search_results.get('results'): - tmdb_id = search_results['results'][0]['id'] - return tmdb_id, category - - # Try with secondary title without year - if not search_results.get('results') and secondary_title and attempted < 3: - console.print(f"[yellow]No results found for primary title without year. Trying secondary title: {secondary_title}[/yellow]") - + if attempted: + await asyncio.sleep(1) # Whoa baby, slow down + + async def search_tmdb_id(filename, search_year, category, untouched_filename="", attempted=0, debug=False, secondary_title=None, path=None, final_attempt=None, new_category=None, unattended=False): + search_results = {"results": []} + original_category = category + if new_category: + category = new_category + else: + category = original_category + if final_attempt is None: + final_attempt = False + if attempted is None: + attempted = 0 + if attempted: + await asyncio.sleep(1) # Whoa baby, slow down + async with httpx.AsyncClient() as client: + try: + # Primary search attempt with year if category == "MOVIE": if debug: - console.print(f"[green]Searching TMDb for movie with secondary title:[/] [cyan]{secondary_title}[/cyan] (Without year)") + console.print(f"[green]Searching TMDb for movie:[/] [cyan]{filename}[/cyan] (Year: {search_year})") params = { "api_key": TMDB_API_KEY, - "query": secondary_title, + "query": filename, "language": "en-US", "include_adult": "true" } + if search_year: + params["year"] = search_year + response = await client.get(f"{TMDB_BASE_URL}/search/movie", params=params) try: response.raise_for_status() - secondary_results = response.json() + search_results = response.json() except Exception: - console.print(f"[bold red]Failed with secondary title movie search: {response.status_code}[/bold red]") + console.print(f"[bold red]Failure with primary movie search: {response.status_code}[/bold red]") elif category == "TV": if debug: - console.print(f"[green]Searching TMDb for TV show with secondary title:[/] [cyan]{secondary_title}[/cyan] (Without year)") + console.print(f"[green]Searching TMDb for TV show:[/] [cyan]{filename}[/cyan] (Year: {search_year})") params = { "api_key": TMDB_API_KEY, - "query": secondary_title, + "query": filename, "language": "en-US", "include_adult": "true" } + if search_year: + params["first_air_date_year"] = search_year + response = await client.get(f"{TMDB_BASE_URL}/search/tv", params=params) try: response.raise_for_status() - secondary_results = response.json() + search_results = response.json() except Exception: - console.print(f"[bold red]Failed with secondary title TV search: {response.status_code}[/bold red]") + console.print(f"[bold red]Failed with primary TV search: {response.status_code}[/bold red]") if debug: - console.print(f"[yellow]Secondary title search results: {json.dumps(secondary_results.get('results', [])[:2], indent=2)}[/yellow]") + console.print(f"[yellow]TMDB search results (primary): {json.dumps(search_results.get('results', [])[:4], indent=2)}[/yellow]") + + # Check if results were found + results = search_results.get('results', []) + if results: + # Filter results by year if search_year is provided + if search_year: + def get_result_year(result): + return int((result.get('release_date') or result.get('first_air_date') or '0000')[:4] or 0) + filtered_results = [ + r for r in results + if abs(get_result_year(r) - int(search_year)) <= 2 + ] + limited_results = (filtered_results if filtered_results else results)[:8] + else: + limited_results = results[:8] + + if len(limited_results) == 1: + tmdb_id = int(limited_results[0]['id']) + return tmdb_id, category + elif len(limited_results) > 1: + filename_norm = await normalize_title(filename) + secondary_norm = await normalize_title(secondary_title) if secondary_title else None + search_year_int = int(search_year) if search_year else 0 + + # Find all exact matches (title and year) + exact_matches = [] + for r in limited_results: + if r.get('title'): + result_title = await normalize_title(r.get('title')) + else: + result_title = await normalize_title(r.get('name', '')) + if r.get('original_title'): + original_title = await normalize_title(r.get('original_title')) + else: + original_title = await normalize_title(r.get('original_name', '')) + result_year = int((r.get('release_date') or r.get('first_air_date') or '0')[:4] or 0) + # Only count as exact match if both years are present and non-zero + if secondary_norm and ( + secondary_norm == original_title + and search_year_int > 0 + and result_year > 0 + and (result_year == search_year_int or result_year == search_year_int + 1) + ): + exact_matches.append(r) + + if ( + filename_norm == result_title + and search_year_int > 0 + and result_year > 0 + and (result_year == search_year_int or result_year == search_year_int + 1) + ): + exact_matches.append(r) + + if secondary_norm and ( + secondary_norm == result_title + and search_year_int > 0 + and result_year > 0 + and (result_year == search_year_int or result_year == search_year_int + 1) + ): + exact_matches.append(r) + + summary_exact_matches = set((r['id'] for r in exact_matches)) + + if len(summary_exact_matches) == 1: + tmdb_id = int(summary_exact_matches.pop()) + return tmdb_id, category + + # If no exact matches, calculate similarity for all results and sort them + results_with_similarity = [] + for r in limited_results: + if r.get('title'): + result_title = await normalize_title(r.get('title')) + else: + result_title = await normalize_title(r.get('name', '')) - if secondary_results.get('results'): - tmdb_id = secondary_results['results'][0]['id'] - return tmdb_id, category + if r.get('original_title'): + original_title = await normalize_title(r.get('original_title')) + else: + original_title = await normalize_title(r.get('original_name', '')) + + # Calculate similarity for both main title and original title + main_similarity = SequenceMatcher(None, filename_norm, result_title).ratio() + original_similarity = SequenceMatcher(None, filename_norm, original_title).ratio() + + # Try getting TMDb translation for original title if it's different + translated_title = "" + translated_similarity = 0.0 + secondary_best = 0.0 + + if original_title and original_title != result_title: + translated_title = await get_tmdb_translations(r['id'], category, 'en', debug) + if translated_title: + translated_title_norm = await normalize_title(translated_title) + translated_similarity = SequenceMatcher(None, filename_norm, translated_title_norm).ratio() + + if debug: + console.print(f"[cyan] TMDb translation: '{translated_title}' (similarity: {translated_similarity:.3f})[/cyan]") + + # Also calculate secondary title similarity if available + if secondary_norm is not None: + secondary_main_sim = SequenceMatcher(None, secondary_norm, result_title).ratio() + secondary_orig_sim = SequenceMatcher(None, secondary_norm, original_title).ratio() + secondary_trans_sim = 0.0 + + if translated_title: + translated_title_norm = await normalize_title(translated_title) + secondary_trans_sim = SequenceMatcher(None, secondary_norm, translated_title_norm).ratio() + + secondary_best = max(secondary_main_sim, secondary_orig_sim, secondary_trans_sim) + + if translated_similarity == 0.0: + if secondary_best == 0.0: + similarity = (main_similarity * 0.5) + (original_similarity * 0.5) + else: + similarity = (main_similarity * 0.3) + (original_similarity * 0.3) + (secondary_best * 0.4) + else: + if secondary_best == 0.0: + similarity = (main_similarity * 0.5) + (translated_similarity * 0.5) + else: + similarity = (main_similarity * 0.5) + (secondary_best * 0.5) + + result_year = int((r.get('release_date') or r.get('first_air_date') or '0')[:4] or 0) + + if debug: + console.print(f"[cyan]ID {r['id']}: '{result_title}' vs '{filename_norm}'[/cyan]") + console.print(f"[cyan] Main similarity: {main_similarity:.3f}[/cyan]") + console.print(f"[cyan] Original similarity: {original_similarity:.3f}[/cyan]") + if translated_similarity > 0: + console.print(f"[cyan] Translated similarity: {translated_similarity:.3f}[/cyan]") + if secondary_best > 0: + console.print(f"[cyan] Secondary similarity: {secondary_best:.3f}[/cyan]") + console.print(f"[cyan] Final similarity: {similarity:.3f}[/cyan]") + + # Boost similarity if we have exact matches with year validation + if similarity >= 0.9 and search_year_int > 0 and result_year > 0: + if result_year == search_year_int: + similarity += 0.1 # Full boost for exact year match + elif result_year == search_year_int + 1: + similarity += 0.1 # Boost for +1 year (handles TMDB/IMDb differences) + + results_with_similarity.append((r, similarity)) + + # Give a slight boost to the first result for TV shows (often the main series) + if category == "TV" and results_with_similarity: + first_result = results_with_similarity[0] + # Boost the first result's similarity by 0.05 (5%) + boosted_similarity = first_result[1] + 0.05 + results_with_similarity[0] = (first_result[0], boosted_similarity) + + if debug: + console.print(f"[cyan]Boosted first TV result similarity from {first_result[1]:.3f} to {boosted_similarity:.3f}[/cyan]") + + # Sort by similarity (highest first) + results_with_similarity.sort(key=lambda x: x[1], reverse=True) + sorted_results = [r[0] for r in results_with_similarity] + + # Filter results: if we have high similarity matches (>= 0.90), hide low similarity ones (< 0.75) + best_similarity = results_with_similarity[0][1] + if best_similarity >= 0.90: + # Filter out results with similarity < 0.75 + filtered_results_with_similarity = [ + (result, sim) for result, sim in results_with_similarity + if sim >= 0.75 + ] + results_with_similarity = filtered_results_with_similarity + sorted_results = [r[0] for r in results_with_similarity] + + if debug: + console.print(f"[yellow]Filtered out low similarity results (< 0.70) since best match has {best_similarity:.2f} similarity[/yellow]") + else: + sorted_results = [r[0] for r in results_with_similarity] + + # Check if the best match is significantly better than others + best_similarity = results_with_similarity[0][1] + similarity_threshold = 0.70 + + if best_similarity >= similarity_threshold: + # Check that no other result is close to the best match + second_best = results_with_similarity[1][1] if len(results_with_similarity) > 1 else 0.0 + if best_similarity >= 0.75 and best_similarity - second_best >= 0.10: + if debug: + console.print(f"[green]Auto-selecting best match: {sorted_results[0].get('title') or sorted_results[0].get('name')} (similarity: {best_similarity:.2f}[/green]") + tmdb_id = int(sorted_results[0]['id']) + return tmdb_id, category + + # Check for "The" prefix handling + if len(results_with_similarity) > 1: + the_results = [] + non_the_results = [] + + for result_tuple in results_with_similarity: + result, similarity = result_tuple + if result.get('title'): + title = await normalize_title(result.get('title')) + else: + title = await normalize_title(result.get('name', '')) + if title.startswith('the '): + the_results.append(result_tuple) + else: + non_the_results.append(result_tuple) + + # If exactly one result starts with "The", check if similarity improves + if len(the_results) == 1 and len(non_the_results) > 0: + the_result, the_similarity = the_results[0] + if the_result.get('title'): + the_title = await normalize_title(the_result.get('title')) + else: + the_title = await normalize_title(the_result.get('name', '')) + the_title_without_the = the_title[4:] + new_similarity = SequenceMatcher(None, filename_norm, the_title_without_the).ratio() + + if debug: + console.print(f"[cyan]Checking 'The' prefix: '{the_title}' -> '{the_title_without_the}'[/cyan]") + console.print(f"[cyan]Original similarity: {the_similarity:.3f}, New similarity: {new_similarity:.3f}[/cyan]") + + # If similarity improves significantly, update and resort + if new_similarity > the_similarity + 0.05: + if debug: + console.print("[green]'The' prefix removal improved similarity, updating results[/green]") + + updated_results = [] + for result_tuple in results_with_similarity: + result, similarity = result_tuple + if result['id'] == the_result['id']: + updated_results.append((result, new_similarity)) + else: + updated_results.append(result_tuple) + + # Resort by similarity + updated_results.sort(key=lambda x: x[1], reverse=True) + results_with_similarity = updated_results + sorted_results = [r[0] for r in results_with_similarity] + best_similarity = results_with_similarity[0][1] + second_best = results_with_similarity[1][1] if len(results_with_similarity) > 1 else 0.0 + + if best_similarity >= 0.75 and best_similarity - second_best >= 0.10: + if debug: + console.print(f"[green]Auto-selecting 'The' prefixed match: {sorted_results[0].get('title') or sorted_results[0].get('name')} (similarity: {best_similarity:.2f})[/green]") + tmdb_id = int(sorted_results[0]['id']) + return tmdb_id, category + + # Put unattended handling here, since it will work based on the sorted results + if unattended and not debug: + tmdb_id = int(sorted_results[0]['id']) + return tmdb_id, category + + # Show sorted results to user + console.print() + console.print("[bold yellow]Multiple TMDb results found. Please select the correct entry:[/bold yellow]") + if category == "MOVIE": + tmdb_url = "https://www.themoviedb.org/movie/" + else: + tmdb_url = "https://www.themoviedb.org/tv/" + + for idx, result in enumerate(sorted_results): + title = result.get('title') or result.get('name', '') + year = result.get('release_date', result.get('first_air_date', ''))[:4] + overview = result.get('overview', '') + similarity_score = results_with_similarity[idx][1] + + console.print(f"[cyan]{idx+1}.[/cyan] [bold]{title}[/bold] ({year}) [yellow]ID:[/yellow] {tmdb_url}{result['id']} [dim](similarity: {similarity_score:.2f})[/dim]") + if overview: + console.print(f"[green]Overview:[/green] {overview[:200]}{'...' if len(overview) > 200 else ''}") + console.print() + + selection = None + while True: + console.print("Enter the number of the correct entry, or manual TMDb ID (tv/12345 or movie/12345):") + try: + selection = cli_ui.ask_string("Or push enter to try a different search: ") + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + try: + # Check if it's a manual TMDb ID entry + if '/' in selection and (selection.lower().startswith('tv/') or selection.lower().startswith('movie/')): + try: + parsed_category, parsed_tmdb_id = parser.parse_tmdb_id(selection, category) + if parsed_tmdb_id and parsed_tmdb_id != 0: + console.print(f"[green]Using manual TMDb ID: {parsed_tmdb_id} and category: {parsed_category}[/green]") + return int(parsed_tmdb_id), parsed_category + else: + console.print("[bold red]Invalid TMDb ID format. Please try again.[/bold red]") + continue + except Exception as e: + console.print(f"[bold red]Error parsing TMDb ID: {e}. Please try again.[/bold red]") + continue + except KeyboardInterrupt: + console.print("\n[bold red]Search cancelled by user.[/bold red]") + sys.exit(0) + + # Handle numeric selection + selection_int = int(selection) + if 1 <= selection_int <= len(sorted_results): + tmdb_id = int(sorted_results[selection_int - 1]['id']) + return tmdb_id, category + else: + console.print("[bold red]Selection out of range. Please try again.[/bold red]") + except ValueError: + console.print("[bold red]Invalid input. Please enter a number or TMDb ID (tv/12345 or movie/12345).[/bold red]") + except KeyboardInterrupt: + console.print("\n[bold red]Search cancelled by user.[/bold red]") + sys.exit(0) + except Exception: + search_results = {"results": []} # Reset search_results on exception + + # TMDb doesn't do roman + if not search_results.get('results'): + try: + words = filename.split() + roman_numerals = { + 'II': '2', 'III': '3', 'IV': '4', 'V': '5', + 'VI': '6', 'VII': '7', 'VIII': '8', 'IX': '9', 'X': '10' + } + + converted = False + for i, word in enumerate(words): + if word.upper() in roman_numerals: + words[i] = roman_numerals[word.upper()] + converted = True + + if converted: + converted_title = ' '.join(words) + if debug: + console.print(f"[bold yellow]Trying with roman numerals converted: {converted_title}[/bold yellow]") + result = await search_tmdb_id(converted_title, search_year, original_category, untouched_filename, attempted + 1, debug=debug, secondary_title=secondary_title, path=path, unattended=unattended) + if result and result != (0, category): + return result except Exception as e: - console.print(f"[bold red]Secondary search error:[/bold red] {e}") + console.print(f"[bold red]Roman numeral conversion error:[/bold red] {e}") + search_results = {"results": []} + + # If we have a secondary title, try searching with that + if secondary_title: + if debug: + console.print(f"[yellow]Trying secondary title: {secondary_title}[/yellow]") + result = await search_tmdb_id( + secondary_title, + search_year, + category, + untouched_filename, + debug=debug, + secondary_title=secondary_title, + path=path, + unattended=unattended + ) + if result and result != (0, category): + return result + + # Try searching with the primary filename + if debug: + console.print(f"[yellow]Trying primary filename: {filename}[/yellow]") + if not search_results.get('results'): + result = await search_tmdb_id( + filename, + search_year, + category, + untouched_filename, + debug=debug, + secondary_title=secondary_title, + path=path, + unattended=unattended + ) + if result and result != (0, category): + return result - # If still no match, attempt alternative category switch - if attempted < 1: - new_category = "TV" if category == "MOVIE" else "MOVIE" + # Try searching with year + 1 if search_year is provided + if not search_results.get('results'): + try: + year_int = int(search_year) + except Exception: + year_int = 0 + + if year_int > 0: + imdb_year = year_int + 1 + if debug: + console.print("[yellow]Retrying with year +1...[/yellow]") + result = await search_tmdb_id(filename, imdb_year, category, untouched_filename, attempted + 1, debug=debug, secondary_title=secondary_title, path=path, unattended=unattended) + if result and result != (0, category): + return result + + # Try switching category + if not search_results.get('results'): + new_category = "TV" if category == "MOVIE" else "MOVIE" + if debug: console.print(f"[bold yellow]Switching category to {new_category} and retrying...[/bold yellow]") - return await get_tmdb_id(filename, search_year, new_category, untouched_filename, attempted + 1, debug=debug, secondary_title=secondary_title, path=path) + result = await search_tmdb_id(filename, search_year, category, untouched_filename, attempted + 1, debug=debug, secondary_title=secondary_title, path=path, new_category=new_category, unattended=unattended) + if result and result != (0, category): + return result - # Last attempt: Try parsing a better title - if attempted == 1: - try: - parsed_title = anitopy.parse( - guessit(untouched_filename, {"excludes": ["country", "language"]})['title'] - )['anime_title'] - original_category = "MOVIE" - console.print(f"[bold yellow]Trying parsed title: {parsed_title}[/bold yellow]") - return await get_tmdb_id(parsed_title, search_year, original_category, untouched_filename, attempted + 2, debug=debug, secondary_title=secondary_title, path=path) - except KeyError: - console.print("[bold red]Failed to parse title for TMDb search.[/bold red]") - - # lets try with a folder name if we have one - if attempted > 1 and path and not final_attempt: - try: - folder_name = os.path.basename(path).replace("_", "").replace("-", "") if path else "" - title = guessit(folder_name, {"excludes": ["country", "language"]})['title'] - original_category = "MOVIE" - console.print(f"[bold yellow]Trying folder name: {title}[/bold yellow]") - return await get_tmdb_id(title, search_year, original_category, untouched_filename, attempted + 3, debug=debug, secondary_title=secondary_title, path=path, final_attempt=True) - except Exception as e: - console.print(f"[bold red]Folder name search error:[/bold red] {e}") - search_results = {"results": []} + # try anime name parsing + if not search_results.get('results'): + try: + parsed_title = anitopy.parse( + guessit(untouched_filename, {"excludes": ["country", "language"]})['title'] + )['anime_title'] + if debug: + console.print(f"[bold yellow]Trying parsed anime title: {parsed_title}[/bold yellow]") + result = await search_tmdb_id(parsed_title, search_year, original_category, untouched_filename, attempted + 1, debug=debug, secondary_title=secondary_title, path=path, unattended=unattended) + if result and result != (0, category): + return result + except KeyError: + console.print("[bold red]Failed to parse title for TMDb search.[/bold red]") + search_results = {"results": []} + + # Try with less words in the title + if not search_results.get('results'): + try: + words = filename.split() + extensions = ['mp4', 'mkv', 'avi', 'webm', 'mov', 'wmv'] + words_lower = [word.lower() for word in words] + + for ext in extensions: + if ext in words_lower: + ext_index = words_lower.index(ext) + words.pop(ext_index) + words_lower.pop(ext_index) + break + + if len(words) >= 2: + title = ' '.join(words[:-1]) + if debug: + console.print(f"[bold yellow]Trying reduced name: {title}[/bold yellow]") + result = await search_tmdb_id(title, search_year, original_category, untouched_filename, attempted + 1, debug=debug, secondary_title=secondary_title, path=path, unattended=unattended) + if result and result != (0, category): + return result + except Exception as e: + console.print(f"[bold red]Reduced name search error:[/bold red] {e}") + search_results = {"results": []} - # No match found, prompt user if in CLI mode - console.print(f"[bold red]Unable to find TMDb match for {filename}[/bold red]") + # Try with even less words + if not search_results.get('results'): + try: + words = filename.split() + extensions = ['mp4', 'mkv', 'avi', 'webm', 'mov', 'wmv'] + words_lower = [word.lower() for word in words] + + for ext in extensions: + if ext in words_lower: + ext_index = words_lower.index(ext) + words.pop(ext_index) + words_lower.pop(ext_index) + break + + if len(words) >= 3: + title = ' '.join(words[:-2]) + if debug: + console.print(f"[bold yellow]Trying further reduced name: {title}[/bold yellow]") + result = await search_tmdb_id(title, search_year, original_category, untouched_filename, attempted + 1, debug=debug, secondary_title=secondary_title, path=path, unattended=unattended) + if result and result != (0, category): + return result + except Exception as e: + console.print(f"[bold red]Reduced name search error:[/bold red] {e}") + search_results = {"results": []} + # No match found, prompt user if in CLI mode + console.print("[bold red]Unable to find TMDb match using any search[/bold red]") + try: tmdb_id = cli_ui.ask_string("Please enter TMDb ID in this format: tv/12345 or movie/12345") - parser = Args(config=config) - category, tmdb_id = parser.parse_tmdb_id(id=tmdb_id, category=category) + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + category, tmdb_id = parser.parse_tmdb_id(id=tmdb_id, category=category) - return tmdb_id, category + return tmdb_id, category async def tmdb_other_meta( @@ -371,7 +715,8 @@ async def tmdb_other_meta( debug=False, mode="discord", tvdb_id=0, - quickie_search=False + quickie_search=False, + filename=None ): """ Fetch metadata from TMDB for a movie or TV show. @@ -380,25 +725,34 @@ async def tmdb_other_meta( tmdb_metadata = {} # Initialize variables that might not be set in all code paths - retrieved_aka = "" - year = None - title = None - youtube = None - overview = "" - genres = "" + backdrop = "" + cast = [] + certification = "" + creators = [] + demographic = "" + directors = [] genre_ids = "" + genres = "" + imdb_mismatch = False keywords = "" - directors = [] - original_title = "" - runtime = 60 - certification = "" - backdrop = "" logo_path = "" + tmdb_logo = "" + mal_id = 0 + mismatched_imdb_id = 0 + origin_country = [] + original_title = "" + overview = "" poster_path = "" + retrieved_aka = "" + runtime = 60 + title = None tmdb_type = "" - mal_id = 0 - demographic = "" - imdb_mismatch = False + year = None + release_date = None + first_air_date = None + last_air_date = None + youtube = None + networks = [] if tmdb_id == 0: try: @@ -455,7 +809,7 @@ async def tmdb_other_meta( return {} if debug: - console.print(f"[cyan]TMDB Response: {json.dumps(media_data, indent=2)[:600]}...") + console.print(f"[cyan]TMDB Response: {json.dumps(media_data, indent=2)[:1200]}...") # Extract basic info from media_data if category == "MOVIE": @@ -463,21 +817,39 @@ async def tmdb_other_meta( original_title = media_data.get('original_title', title) year = datetime.strptime(media_data['release_date'], '%Y-%m-%d').year if media_data['release_date'] else search_year runtime = media_data.get('runtime', 60) + if media_data.get('release_date'): + release_date = media_data['release_date'] if quickie_search or not imdb_id: imdb_id_str = str(media_data.get('imdb_id', '')).replace('tt', '') - if imdb_id_str == "None": - imdb_id_str = "" - if imdb_id and imdb_id_str and (int(imdb_id_str) != imdb_id): - imdb_mismatch = True - imdb_id = int(imdb_id_str) if imdb_id_str.isdigit() else 0 + if imdb_id_str and imdb_id_str.isdigit(): + if imdb_id and int(imdb_id_str) != imdb_id: + imdb_mismatch = True + mismatched_imdb_id = int(imdb_id_str) + imdb_id = original_imdb_id + else: + imdb_id = original_imdb_id + tmdb_type = 'Movie' else: # TV show title = media_data['name'] original_title = media_data.get('original_name', title) year = datetime.strptime(media_data['first_air_date'], '%Y-%m-%d').year if media_data['first_air_date'] else search_year + if not year: + year_pattern = r'(18|19|20)\d{2}' + year_match = re.search(year_pattern, title) + if year_match: + year = int(year_match.group(0)) + if not year: + year = datetime.strptime(media_data['last_air_date'], '%Y-%m-%d').year if media_data['last_air_date'] else 0 + first_air_date = media_data.get('first_air_date', None) + last_air_date = media_data.get('last_air_date', None) runtime_list = media_data.get('episode_run_time', [60]) runtime = runtime_list[0] if runtime_list else 60 tmdb_type = media_data.get('type', 'Scripted') + networks = media_data.get('networks', []) + + production_companies = media_data.get('production_companies', []) + production_countries = media_data.get('production_countries', []) overview = media_data['overview'] original_language_from_tmdb = media_data['original_language'] @@ -509,11 +881,6 @@ async def tmdb_other_meta( params={"api_key": TMDB_API_KEY}) ) - # Add IMDB API call if we already have an IMDB ID - if imdb_id != 0: - # Get AKA and original language from IMDB immediately, don't wait - endpoints.append(get_imdb_aka_api(imdb_id, manual_language)) - # Make all requests concurrently results = await asyncio.gather(*endpoints, return_exceptions=True) @@ -521,23 +888,12 @@ async def tmdb_other_meta( external_data, videos_data, keywords_data, credits_data, *rest = results idx = 0 logo_data = None - imdb_data = None # Get logo data if it was requested if config['DEFAULT'].get('add_logo', False): logo_data = rest[idx] idx += 1 - # Get IMDB data if it was requested - if imdb_id != 0: - imdb_data = rest[idx] - # Process IMDB data - if isinstance(imdb_data, Exception): - console.print("[yellow]Failed to get AKA and original language from IMDB[/yellow]") - retrieved_aka, retrieved_original_language = "", None - else: - retrieved_aka, retrieved_original_language = imdb_data - # Process external IDs if isinstance(external_data, Exception): console.print("[bold red]Failed to fetch external IDs[/bold red]") @@ -547,22 +903,28 @@ async def tmdb_other_meta( # Process IMDB ID if quickie_search or imdb_id == 0: imdb_id_str = external.get('imdb_id', None) - if imdb_id_str and imdb_id_str not in ["", " ", "None", None]: + if isinstance(imdb_id_str, str) and imdb_id_str not in ["", " ", "None", "null"]: imdb_id_clean = imdb_id_str.lstrip('t') if imdb_id_clean.isdigit(): imdb_id_clean_int = int(imdb_id_clean) if imdb_id_clean_int != int(original_imdb_id) and quickie_search and original_imdb_id != 0: imdb_mismatch = True - imdb_id = original_imdb_id + mismatched_imdb_id = imdb_id_clean_int else: imdb_id = int(imdb_id_clean) + else: + imdb_id = original_imdb_id + else: + imdb_id = original_imdb_id else: - imdb_id = int(imdb_id) + imdb_id = original_imdb_id # Process TVDB ID if tvdb_id == 0: - tvdb_id = external.get('tvdb_id', None) - if tvdb_id in ["", " ", "None", None]: + tvdb_id_str = external.get('tvdb_id', None) + if isinstance(tvdb_id_str, str) and tvdb_id_str not in ["", " ", "None", "null"]: + tvdb_id = int(tvdb_id_str) if tvdb_id_str.isdigit() else 0 + else: tvdb_id = 0 except Exception: console.print("[bold red]Failed to process external IDs[/bold red]") @@ -595,20 +957,38 @@ async def tmdb_other_meta( console.print("[bold red]Failed to process keywords[/bold red]") keywords = "" + origin_country = list(media_data.get("origin_country", [])) + # Process credits + creators = [] + for each in media_data.get("created_by", []): + name = each.get('original_name') or each.get('name') + if name: + creators.append(name) + # Limit to the first 5 unique names + creators = list(dict.fromkeys(creators))[:5] + if isinstance(credits_data, Exception): console.print("[bold red]Failed to fetch credits[/bold red]") directors = [] + cast = [] else: try: credits = credits_data.json() directors = [] + cast = [] for each in credits.get('cast', []) + credits.get('crew', []): if each.get('known_for_department', '') == "Directing" or each.get('job', '') == "Director": directors.append(each.get('original_name', each.get('name'))) + elif each.get('known_for_department', '') == "Acting" or each.get('job', '') in {"Actor", "Actress"}: + cast.append(each.get('original_name', each.get('name'))) + # Limit to the first 5 unique names + directors = list(dict.fromkeys(directors))[:5] + cast = list(dict.fromkeys(cast))[:5] except Exception: console.print("[bold red]Failed to process credits[/bold red]") directors = [] + cast = [] # Process genres genres_data = await get_genres(media_data) @@ -620,28 +1000,27 @@ async def tmdb_other_meta( try: logo_json = logo_data.json() logo_path = await get_logo(tmdb_id, category, debug, TMDB_API_KEY=TMDB_API_KEY, TMDB_BASE_URL=TMDB_BASE_URL, logo_json=logo_json) + tmdb_logo = logo_path.split('/')[-1] except Exception: console.print("[yellow]Failed to process logo[/yellow]") logo_path = "" - - # Get AKA and original language from IMDB if needed - if imdb_id != 0 and imdb_data is None: - retrieved_aka, retrieved_original_language = await get_imdb_aka_api(imdb_id, manual_language) - elif imdb_data is None: - retrieved_aka, retrieved_original_language = "", None + tmdb_logo = "" # Use retrieved original language or fallback to TMDB's value - if retrieved_original_language is not None: - original_language = retrieved_original_language + if manual_language: + original_language = manual_language else: original_language = original_language_from_tmdb # Get anime information if applicable - if not anime: - mal_id, retrieved_aka, anime, demographic = await get_anime( - media_data, - {'title': title, 'aka': retrieved_aka, 'mal_id': 0} - ) + if category == "MOVIE": + filename = filename + else: + filename = path + mal_id, retrieved_aka, anime, demographic = await get_anime( + media_data, + {'title': title, 'aka': retrieved_aka, 'mal_id': 0, 'filename': filename} + ) if mal_manual is not None and mal_manual != 0: mal_id = mal_manual @@ -658,14 +1037,20 @@ async def tmdb_other_meta( tmdb_metadata = { 'title': title, 'year': year, + 'release_date': release_date, + 'first_air_date': first_air_date, + 'last_air_date': last_air_date, 'imdb_id': imdb_id, 'tvdb_id': tvdb_id, + 'origin_country': origin_country, 'original_language': original_language, 'original_title': original_title, 'keywords': keywords, 'genres': genres, 'genre_ids': genre_ids, + 'tmdb_creators': creators, 'tmdb_directors': directors, + 'tmdb_cast': cast, 'mal_id': mal_id, 'anime': anime, 'demographic': demographic, @@ -673,13 +1058,18 @@ async def tmdb_other_meta( 'poster': poster, 'tmdb_poster': poster_path, 'logo': logo_path, + 'tmdb_logo': tmdb_logo, 'backdrop': backdrop, 'overview': overview, 'tmdb_type': tmdb_type, 'runtime': runtime, 'youtube': youtube, 'certification': certification, - 'imdb_mismatch': imdb_mismatch + 'production_companies': production_companies, + 'production_countries': production_countries, + 'networks': networks, + 'imdb_mismatch': imdb_mismatch, + 'mismatched_imdb_id': mismatched_imdb_id } return tmdb_metadata @@ -772,8 +1162,8 @@ async def get_anime(response, meta): if each['id'] == 16: animation = True if response['original_language'] == 'ja' and animation is True: - romaji, mal_id, eng_title, season_year, episodes, demographic = await get_romaji(tmdb_name, meta.get('mal_id', None)) - alt_name = f" AKA {romaji}" + romaji, mal_id, eng_title, season_year, episodes, demographic = await get_romaji(tmdb_name, meta.get('mal_id', None), meta) + alt_name = f"AKA {romaji}" anime = True # mal = AnimeSearch(romaji) @@ -785,100 +1175,158 @@ async def get_anime(response, meta): return mal_id, alt_name, anime, demographic -async def get_romaji(tmdb_name, mal): - if mal is None or mal == 0: - tmdb_name = tmdb_name.replace('-', "").replace("The Movie", "") - tmdb_name = ' '.join(tmdb_name.split()) - query = ''' - query ($search: String) { - Page (page: 1) { - pageInfo { - total - } - media (search: $search, type: ANIME, sort: SEARCH_MATCH) { - id - idMal - title { - romaji - english - native - } - seasonYear - episodes - tags { - name +async def get_romaji(tmdb_name, mal, meta): + media = [] + demographic = 'Mina' # Default to Mina if no tags are found + + # Try AniList query with tmdb_name first, then fallback to meta['filename'] if no results + for search_term in [tmdb_name, meta.get('filename', '')]: + if not search_term: + continue + if mal is None or mal == 0: + cleaned_name = search_term.replace('-', "").replace("The Movie", "") + cleaned_name = ' '.join(cleaned_name.split()) + query = ''' + query ($search: String) { + Page (page: 1) { + pageInfo { + total + } + media (search: $search, type: ANIME, sort: SEARCH_MATCH) { + id + idMal + title { + romaji + english + native + } + seasonYear + episodes + tags { + name + } + externalLinks { + id + url + site + siteId + } } } } - } - ''' - # Define our query variables and values that will be used in the query request - variables = { - 'search': tmdb_name - } - else: - query = ''' - query ($search: Int) { - Page (page: 1) { - pageInfo { - total - } - media (idMal: $search, type: ANIME, sort: SEARCH_MATCH) { - id - idMal - title { - romaji - english - native - } - seasonYear - episodes - tags { - name + ''' + variables = {'search': cleaned_name} + else: + query = ''' + query ($search: Int) { + Page (page: 1) { + pageInfo { + total + } + media (idMal: $search, type: ANIME, sort: SEARCH_MATCH) { + id + idMal + title { + romaji + english + native + } + seasonYear + episodes + tags { + name + } } } } - } - ''' - # Define our query variables and values that will be used in the query request - variables = { - 'search': mal - } - - # Make the HTTP Api request - url = 'https://graphql.anilist.co/' - demographic = 'Mina' # Default to Mina if no tags are found - try: - response = requests.post(url, json={'query': query, 'variables': variables}) - json = response.json() + ''' + variables = {'search': mal} - # console.print('Checking for demographic tags...') + url = 'https://graphql.anilist.co/' + try: + response = requests.post(url, json={'query': query, 'variables': variables}) + json_data = response.json() + + demographics = ["Shounen", "Seinen", "Shoujo", "Josei", "Kodomo", "Mina"] + for tag in demographics: + if tag in response.text: + demographic = tag + break + + media = json_data['data']['Page']['media'] + if media not in (None, []): + break # Found results, stop retrying + except Exception: + console.print('[red]Failed to get anime specific info from anilist. Continuing without it...') + media = [] + if "subsplease" in meta.get('filename', '').lower(): + search_name = meta['filename'].lower() + else: + search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) - demographics = ["Shounen", "Seinen", "Shoujo", "Josei", "Kodomo", "Mina"] + # Extract expected season number from various sources + expected_season = None - for tag in demographics: - if tag in response.text: - demographic = tag - # print(f"Found {tag} tag") - break + # Try manual_season first + if meta.get('manual_season'): + season_match = re.search(r'S?(\d+)', str(meta['manual_season']), re.IGNORECASE) + if season_match: + expected_season = int(season_match.group(1)) + + # Try parsing the filename with anitopy + if expected_season is None and meta.get('filename'): + try: + parsed = anitopy.parse(meta['filename']) + if parsed.get('anime_season'): + expected_season = int(parsed['anime_season']) + except Exception: + pass + + # Fall back to meta['season'] if available + if expected_season is None and meta.get('season'): + season_match = re.search(r'S?(\d+)', str(meta['season']), re.IGNORECASE) + if season_match: + expected_season = int(season_match.group(1)) - media = json['data']['Page']['media'] - except Exception: - console.print('[red]Failed to get anime specific info from anilist. Continuing without it...') - media = [] if media not in (None, []): result = {'title': {}} difference = 0 + best_match_with_season = None + best_season_diff = float('inf') + for anime in media: - search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) + # Extract season number from AniList title if present + anime_season = None + for title_value in anime['title'].values(): + if title_value: + season_match = re.search(r'Season (\d+)', title_value, re.IGNORECASE) + if season_match: + anime_season = int(season_match.group(1)) + break + + # Calculate title similarity for title in anime['title'].values(): if title is not None: - title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) - diff = SequenceMatcher(None, title, search_name).ratio() - if diff >= difference: + title_clean = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) + diff = SequenceMatcher(None, title_clean, search_name).ratio() + + # Prioritize season match if expected_season is set + if expected_season is not None and anime_season is not None: + if anime_season == expected_season and diff > difference * 0.8: + # If season matches and title similarity is reasonable, prefer this + if best_match_with_season is None or diff > best_season_diff: + best_match_with_season = anime + best_season_diff = diff + + # Keep track of best overall match + if diff > difference: result = anime difference = diff + # Use season-matched result if found, otherwise use best title match + if best_match_with_season is not None: + result = best_match_with_season + romaji = result['title'].get('romaji', result['title'].get('english', "")) mal_id = result.get('idMal', 0) eng_title = result['title'].get('english', result['title'].get('romaji', "")) @@ -887,33 +1335,41 @@ async def get_romaji(tmdb_name, mal): else: romaji = eng_title = season_year = "" episodes = mal_id = 0 - if mal_id in [None, 0]: + if mal in [None, 0]: + mal_id = mal_id + else: mal_id = mal if not episodes: episodes = 0 return romaji, mal_id, eng_title, season_year, episodes, demographic -async def get_tmdb_imdb_from_mediainfo(mediainfo, category, is_disc, tmdbid, imdbid): +async def get_tmdb_imdb_from_mediainfo(mediainfo, category, is_disc, tmdbid, imdbid, tvdbid): if not is_disc: if mediainfo['media']['track'][0].get('extra'): extra = mediainfo['media']['track'][0]['extra'] for each in extra: try: - if each.lower().startswith('tmdb'): - parser = Args(config=config) + if each.lower().startswith('tmdb') and not tmdbid: category, tmdbid = parser.parse_tmdb_id(id=extra[each], category=category) - if each.lower().startswith('imdb'): + if each.lower().startswith('imdb') and not imdbid: try: imdb_id = extract_imdb_id(extra[each]) if imdb_id: imdbid = imdb_id except Exception: pass + if each.lower().startswith('tvdb') and not tvdbid: + try: + tvdb_id = int(extra[each]) + if tvdb_id: + tvdbid = tvdb_id + except Exception: + pass except Exception: pass - return category, tmdbid, imdbid + return category, tmdbid, imdbid, tvdbid def extract_imdb_id(value): @@ -988,6 +1444,8 @@ async def daily_to_tmdb_season_episode(tmdbid, date): async def get_episode_details(tmdb_id, season_number, episode_number, debug=False): + if debug: + console.print(f"[cyan]Fetching episode details for TMDb ID: {tmdb_id}, Season: {season_number}, Episode: {episode_number}[/cyan]") async with httpx.AsyncClient() as client: try: # Get episode details @@ -1056,6 +1514,77 @@ async def get_episode_details(tmdb_id, season_number, episode_number, debug=Fals return {} +async def get_season_details(tmdb_id, season_number, debug=False): + if debug: + console.print(f"[cyan]Fetching season details for TMDb ID: {tmdb_id}, Season: {season_number}[/cyan]") + async with httpx.AsyncClient() as client: + try: + # Get season details + response = await client.get( + f"{TMDB_BASE_URL}/tv/{tmdb_id}/season/{season_number}", + params={"api_key": TMDB_API_KEY, "append_to_response": "images,credits"} + ) + try: + response.raise_for_status() + season_data = response.json() + + # Extract only relevant information + season_info = { + '_id': season_data.get('_id'), + 'air_date': season_data.get('air_date'), + 'name': season_data.get('name'), + 'overview': season_data.get('overview'), + 'id': season_data.get('id'), + 'poster_path': season_data.get('poster_path'), + 'season_number': season_data.get('season_number'), + 'vote_average': season_data.get('vote_average'), + 'vote_count': season_data.get('vote_count'), + 'episodes': [] + } + + # Extract minimal episode information + for episode in season_data.get('episodes', []): + season_info['episodes'].append({ + 'air_date': episode.get('air_date'), + 'episode_number': episode.get('episode_number'), + 'episode_type': episode.get('episode_type'), + 'id': episode.get('id'), + 'name': episode.get('name'), + 'overview': episode.get('overview'), + 'runtime': episode.get('runtime'), + 'season_number': episode.get('season_number'), + 'still_path': episode.get('still_path'), + 'vote_average': episode.get('vote_average'), + 'vote_count': episode.get('vote_count') + }) + + # Include poster images if available + if 'images' in season_data and 'posters' in season_data['images']: + season_info['images'] = { + 'posters': season_data['images']['posters'] + } + + # Include main cast/crew if available (top-level only, not per-episode) + if 'credits' in season_data: + if 'cast' in season_data['credits']: + season_info['credits'] = { + 'cast': season_data['credits']['cast'] + } + + if debug: + console.print(f"[cyan]Extracted season data: {json.dumps(season_info, indent=2)[:600]}...[/cyan]") + return season_info + + except Exception: + console.print(f"[bold red]Failed to fetch season data: {response.status_code}[/bold red]") + return {} + + except Exception: + console.print(f"[red]Error fetching season details for {tmdb_id}[/red]") + console.print(f"[red]Season: {season_number}[/red]") + return {} + + async def get_logo(tmdb_id, category, debug=False, logo_languages=None, TMDB_API_KEY=None, TMDB_BASE_URL=None, logo_json=None): logo_path = "" if logo_languages and isinstance(logo_languages, str) and ',' in logo_languages: @@ -1126,3 +1655,198 @@ async def get_logo(tmdb_id, category, debug=False, logo_languages=None, TMDB_API console.print(f"[red]Error fetching logo: {e}[/red]") return logo_path + + +async def get_tmdb_translations(tmdb_id, category, target_language='en', debug=False): + """Get translations from TMDb API""" + endpoint = "movie" if category == "MOVIE" else "tv" + url = f"{TMDB_BASE_URL}/{endpoint}/{tmdb_id}/translations" + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, params={"api_key": TMDB_API_KEY}) + response.raise_for_status() + data = response.json() + + # Look for target language translation + for translation in data.get('translations', []): + if translation.get('iso_639_1') == target_language: + translated_data = translation.get('data', {}) + translated_title = translated_data.get('title') or translated_data.get('name') + + if translated_title and debug: + console.print(f"[cyan]Found TMDb translation: '{translated_title}'[/cyan]") + + return translated_title or "" + + if debug: + console.print(f"[yellow]No {target_language} translation found in TMDb[/yellow]") + return "" + + except Exception as e: + if debug: + console.print(f"[yellow]TMDb translation fetch failed: {e}[/yellow]") + return "" + + +async def set_tmdb_metadata(meta, filename=None): + if not meta.get('edit', False): + # if we have these fields already, we probably got them from a multi id searching + # and don't need to fetch them again + essential_fields = ['title', 'year', 'genres', 'overview'] + tmdb_metadata_populated = all(meta.get(field) is not None for field in essential_fields) + else: + # if we're in that blasted edit mode, ignore any previous set data and get fresh + tmdb_metadata_populated = False + + if not tmdb_metadata_populated: + max_attempts = 2 + delay_seconds = 5 + for attempt in range(1, max_attempts + 1): + try: + tmdb_metadata = await tmdb_other_meta( + tmdb_id=meta['tmdb_id'], + path=meta.get('path'), + search_year=meta.get('search_year'), + category=meta.get('category'), + imdb_id=meta.get('imdb_id', 0), + manual_language=meta.get('manual_language'), + anime=meta.get('anime', False), + mal_manual=meta.get('mal_manual'), + aka=meta.get('aka', ''), + original_language=meta.get('original_language'), + poster=meta.get('poster'), + debug=meta.get('debug', False), + mode=meta.get('mode', 'cli'), + tvdb_id=meta.get('tvdb_id', 0), + quickie_search=meta.get('quickie_search', False), + filename=filename, + ) + + if tmdb_metadata and all(tmdb_metadata.get(field) for field in ['title', 'year']): + meta.update(tmdb_metadata) + if meta.get('retrieved_aka', None) is not None: + meta['aka'] = meta['retrieved_aka'] + break + else: + error_msg = f"Failed to retrieve essential metadata from TMDB ID: {meta['tmdb_id']}" + if meta['debug']: + console.print(f"[bold red]{error_msg}[/bold red]") + if attempt < max_attempts: + console.print(f"[yellow]Retrying TMDB metadata fetch in {delay_seconds} seconds... (Attempt {attempt + 1}/{max_attempts})[/yellow]") + await asyncio.sleep(delay_seconds) + else: + raise ValueError(error_msg) + except Exception as e: + error_msg = f"TMDB metadata retrieval failed for ID {meta['tmdb_id']}: {str(e)}" + if meta['debug']: + console.print(f"[bold red]{error_msg}[/bold red]") + if attempt < max_attempts: + console.print(f"[yellow]Retrying TMDB metadata fetch in {delay_seconds} seconds... (Attempt {attempt + 1}/{max_attempts})[/yellow]") + await asyncio.sleep(delay_seconds) + else: + console.print(f"[red]Catastrophic error getting TMDB data using ID {meta['tmdb_id']}[/red]") + console.print(f"[red]Check category is set correctly, UA was using {meta.get('category', None)}[/red]") + raise RuntimeError(error_msg) from e + + +async def get_tmdb_localized_data(meta, data_type, language, append_to_response): + endpoint = None + if data_type == 'main': + endpoint = f'/{meta["category"].lower()}/{meta["tmdb"]}' + elif data_type == 'season': + season = meta.get('season_int') + if season is None: + return None + endpoint = f'/tv/{meta["tmdb"]}/season/{season}' + elif data_type == 'episode': + season = meta.get('season_int') + episode = meta.get('episode_int') + if season is None or episode is None: + return None + endpoint = f'/tv/{meta["tmdb"]}/season/{season}/episode/{episode}' + + url = f'{TMDB_BASE_URL}{endpoint}' + params = { + 'api_key': TMDB_API_KEY, + 'language': language + } + if append_to_response: + params.update({'append_to_response': append_to_response}) + + if meta.get('debug', False): + console.print( + '[green]Requesting localized data from TMDB.\n' + f"Type: '{data_type}'.\n" + f"Language: '{language}'\n" + f"Append to response: '{append_to_response}'\n" + f"Endpoint: '{endpoint}'[/green]\n" + ) + + save_dir = f"{meta['base_dir']}/tmp/{meta['uuid']}/" + filename = f"{save_dir}tmdb_localized_data.json" + + # Create a cache key for this specific request + cache_key = filename + + # Get or create a lock for this cache key + if cache_key not in _cache_locks: + _cache_locks[cache_key] = asyncio.Lock() + + cache_lock = _cache_locks[cache_key] + + async with cache_lock: + # Re-read the cache file while holding the lock + localized_data = {} + if os.path.exists(filename): + try: + async with aiofiles.open(filename, 'r', encoding='utf-8') as f: + content = await f.read() + try: + localized_data = json.loads(content) + except json.JSONDecodeError as e: + console.print(f"[red]Warning: JSON decode error in {filename}: {e}. Creating new file.[/red]") + localized_data = {} + except Exception as e: + console.print(f"[red]Error reading localized data file {filename}: {e}[/red]") + localized_data = {} + + # Re-check if we have cached data for this specific language and data_type + cached_result = localized_data.get(language, {}).get(data_type) + if cached_result: + return cached_result + + # Fetch from API if not in cache + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(url, params=params) + if response.status_code == 200: + data = response.json() + + # Merge the fetched data into existing cache + localized_data.setdefault(language, {})[data_type] = data + + # Attempt to write to disk, but don't fail if write errors occur + try: + async with aiofiles.open(filename, 'w', encoding='utf-8') as f: + data_str = json.dumps(localized_data, ensure_ascii=False, indent=4) + await f.write(data_str) + except (OSError, IOError, Exception) as e: + console.print(f'[red]Warning: Failed to write cache to {filename}: {e}[/red]') + + return data + else: + console.print(f'[red]Request failed for {url}: Status code {response.status_code}[/red]') + return None + + except httpx.RequestError as e: + console.print(f'[red]Request failed for {url}: {e}[/red]') + return None + finally: + # Optional cleanup: remove the lock if it's no longer being used + # Only clean up if this is the only reference to avoid race conditions + if cache_key in _cache_locks and not cache_lock.locked(): + try: + del _cache_locks[cache_key] + except KeyError: + pass # Already deleted by another coroutine diff --git a/src/torrentcreate.py b/src/torrentcreate.py index cf9e6367a..c38a5c802 100644 --- a/src/torrentcreate.py +++ b/src/torrentcreate.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 from datetime import datetime import torf from torf import Torrent @@ -58,6 +59,10 @@ def calculate_piece_size(total_size, min_size, max_size, files, meta): else: piece_size = 128 * 1024 * 1024 # 128 MiB + if any(tracker in meta.get('trackers', []) for tracker in ['HDB', 'PTP']): + if piece_size > 16 * 1024 * 1024: + piece_size = 16 * 1024 * 1024 + # Enforce minimum and maximum limits piece_size = max(min_size, min(piece_size, max_size)) @@ -167,56 +172,62 @@ def create_torrent(meta, path, output_filename, tracker_url=None): if not completeness['complete']: just_go = False - missing_list = [f"S{s:02d}E{e:02d}" for s, e in completeness['missing_episodes']] - console.print("[red]Warning: Season pack appears incomplete!") - console.print(f"[yellow]Missing episodes: {', '.join(missing_list)}") - - # Show first 15 files from filelist - filelist = meta['filelist'] - files_shown = 0 - batch_size = 15 - - console.print(f"[cyan]Filelist ({len(filelist)} files):") - for i, file in enumerate(filelist[:batch_size]): - console.print(f"[cyan] {i+1:2d}. {os.path.basename(file)}") - - files_shown = min(batch_size, len(filelist)) - - # Loop to handle showing more files in batches - while files_shown < len(filelist) and not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - remaining_files = len(filelist) - files_shown - console.print(f"[yellow]... and {remaining_files} more files") - - if remaining_files > batch_size: - response = input(f"Show (n)ext {batch_size} files, (a)ll remaining files, (c)ontinue with incomplete pack, or (q)uit? (n/a/c/Q): ") - else: - response = input(f"Show (a)ll remaining {remaining_files} files, (c)ontinue with incomplete pack, or (q)uit? (a/c/Q): ") - - if response.lower() == 'n' and remaining_files > batch_size: - # Show next batch of files - next_batch = filelist[files_shown:files_shown + batch_size] - for i, file in enumerate(next_batch): - console.print(f"[cyan] {files_shown + i + 1:2d}. {os.path.basename(file)}") - files_shown += len(next_batch) - elif response.lower() == 'a': - # Show all remaining files - remaining_batch = filelist[files_shown:] - for i, file in enumerate(remaining_batch): - console.print(f"[cyan] {files_shown + i + 1:2d}. {os.path.basename(file)}") - files_shown = len(filelist) - elif response.lower() == 'c': - just_go = True - break # Continue with incomplete pack - else: # 'q' or any other input - console.print("[red]Aborting torrent creation due to incomplete season pack") - sys.exit(1) - - # Final confirmation if not in unattended mode - if not meta['unattended'] and not just_go or (meta['unattended'] and meta.get('unattended-confirm', False) and not just_go): - response = input("Continue with incomplete season pack? (y/N): ") - if response.lower() != 'y': - console.print("[red]Aborting torrent creation due to incomplete season pack") - sys.exit(1) + try: + missing_list = [f"S{s:02d}E{e:02d}" for s, e in completeness['missing_episodes']] + except ValueError: + console.print("[red]Error determining missing episodes, you should double check the pack manually.") + time.sleep(5) + missing_list = ["Unknown"] + if 'Unknown' not in missing_list: + console.print("[red]Warning: Season pack appears incomplete!") + console.print(f"[yellow]Missing episodes: {', '.join(missing_list)}") + + # Show first 15 files from filelist + filelist = meta['filelist'] + files_shown = 0 + batch_size = 15 + + console.print(f"[cyan]Filelist ({len(filelist)} files):") + for i, file in enumerate(filelist[:batch_size]): + console.print(f"[cyan] {i+1:2d}. {os.path.basename(file)}") + + files_shown = min(batch_size, len(filelist)) + + # Loop to handle showing more files in batches + while files_shown < len(filelist) and not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + remaining_files = len(filelist) - files_shown + console.print(f"[yellow]... and {remaining_files} more files") + + if remaining_files > batch_size: + response = input(f"Show (n)ext {batch_size} files, (a)ll remaining files, (c)ontinue with incomplete pack, or (q)uit? (n/a/c/Q): ") + else: + response = input(f"Show (a)ll remaining {remaining_files} files, (c)ontinue with incomplete pack, or (q)uit? (a/c/Q): ") + + if response.lower() == 'n' and remaining_files > batch_size: + # Show next batch of files + next_batch = filelist[files_shown:files_shown + batch_size] + for i, file in enumerate(next_batch): + console.print(f"[cyan] {files_shown + i + 1:2d}. {os.path.basename(file)}") + files_shown += len(next_batch) + elif response.lower() == 'a': + # Show all remaining files + remaining_batch = filelist[files_shown:] + for i, file in enumerate(remaining_batch): + console.print(f"[cyan] {files_shown + i + 1:2d}. {os.path.basename(file)}") + files_shown = len(filelist) + elif response.lower() == 'c': + just_go = True + break # Continue with incomplete pack + else: # 'q' or any other input + console.print("[red]Aborting torrent creation due to incomplete season pack") + sys.exit(1) + + # Final confirmation if not in unattended mode + if not meta['unattended'] and not just_go or (meta['unattended'] and meta.get('unattended_confirm', False) and not just_go): + response = input("Continue with incomplete season pack? (y/N): ") + if response.lower() != 'y': + console.print("[red]Aborting torrent creation due to incomplete season pack") + sys.exit(1) else: if meta['debug']: console.print("[green]Season pack completeness verified") @@ -253,6 +264,12 @@ def create_torrent(meta, path, output_filename, tracker_url=None): except (ValueError, TypeError): console.print("[yellow]Warning: Invalid max_piece_size value, using default piece length") + if not meta.get('max_piece_size') and tracker_url is None and not any(tracker in meta.get('trackers', []) for tracker in ['HDB', 'PTP', 'MTV']): + cmd.extend(['-m', '27']) + + if meta.get('mkbrr_threads') != '0': + cmd.extend(["--workers", meta['mkbrr_threads']]) + if not meta.get('is_disc', False): exclude_str = build_mkbrr_exclude_string(str(path), meta['filelist']) cmd.extend(["--exclude", exclude_str]) @@ -334,13 +351,13 @@ def create_torrent(meta, path, output_filename, tracker_url=None): meta=meta, path=path, trackers=["https://fake.tracker/"], - source="Audionut UA", + source="UA", private=True, exclude_globs=exclude or [], include_globs=include or [], creation_date=datetime.now(), - comment="Created by Audionut's Upload Assistant", - created_by="Audionut's Upload Assistant", + comment="Created by Upload Assistant", + created_by="Upload Assistant", piece_size=piece_size ) @@ -410,8 +427,8 @@ async def create_base_from_existing_torrent(torrentpath, base_dir, uuid): if os.path.exists(torrentpath): base_torrent = Torrent.read(torrentpath) base_torrent.trackers = ['https://fake.tracker/'] - base_torrent.comment = "Created by Audionut's Upload Assistant" - base_torrent.created_by = "Created by Audionut's Upload Assistant" + base_torrent.comment = "Created by Upload Assistant" + base_torrent.created_by = "Created by Upload Assistant" info_dict = base_torrent.metainfo['info'] valid_keys = ['name', 'piece length', 'pieces', 'private', 'source'] diff --git a/src/trackerhandle.py b/src/trackerhandle.py index cd74b5190..c2affe2fe 100644 --- a/src/trackerhandle.py +++ b/src/trackerhandle.py @@ -1,12 +1,16 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import asyncio -import traceback import cli_ui -from src.trackers.THR import THR +import sys +import traceback + +from cogs.redaction import redact_private_info +from src.cleanup import cleanup, reset_terminal +from src.get_desc import DescriptionBuilder +from src.manualpackage import package from src.trackers.PTP import PTP +from src.trackers.THR import THR from src.trackersetup import TRACKER_SETUP -from src.trackers.COMMON import COMMON -from src.manualpackage import package -from cogs.redaction import redact_private_info async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): @@ -14,7 +18,8 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): 'AITHER': {'mod_q': True, 'draft': False}, 'BHD': {'draft_live': True}, 'BLU': {'mod_q': True, 'draft': False}, - 'LST': {'mod_q': True, 'draft': True} + 'LST': {'mod_q': True, 'draft': True}, + 'LT': {'mod_q': True, 'draft': False} } modq, draft = None, None @@ -35,12 +40,12 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): async def process_trackers(meta, config, client, console, api_trackers, tracker_class_map, http_trackers, other_api_trackers): - common = COMMON(config=config) tracker_setup = TRACKER_SETUP(config=config) enabled_trackers = tracker_setup.trackers_enabled(meta) async def process_single_tracker(tracker): - tracker_class = tracker_class_map[tracker](config=config) + if not tracker == "MANUAL": + tracker_class = tracker_class_map[tracker](config=config) if meta['name'].endswith('DUPE?'): meta['name'] = meta['name'].replace(' DUPE?', '') @@ -79,8 +84,6 @@ async def process_single_tracker(tracker): upload_status = tracker_status.get(tracker, {}).get('upload', False) if upload_status: try: - if tracker == "RTF": - await tracker_class.api_test(meta) try: await tracker_class.upload(meta, disctype) except Exception as e: @@ -101,14 +104,13 @@ async def process_single_tracker(tracker): upload_status = tracker_status.get(tracker, {}).get('upload', False) if upload_status: try: - if tracker == "AR": - await tracker_class.validate_credentials(meta) is True try: await tracker_class.upload(meta, disctype) except Exception as e: console.print(f"[red]Upload failed: {e}") console.print(traceback.format_exc()) return + except Exception: console.print(traceback.format_exc()) return @@ -120,14 +122,20 @@ async def process_single_tracker(tracker): if meta['unattended']: do_manual = True else: - do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) + try: + do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) if do_manual: for manual_tracker in enabled_trackers: if manual_tracker != 'MANUAL': manual_tracker = manual_tracker.replace(" ", "").upper().strip() tracker_class = tracker_class_map[manual_tracker](config=config) if manual_tracker in api_trackers: - await common.unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) + await DescriptionBuilder(config).unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) else: await tracker_class.edit_desc(meta) url = await package(meta) @@ -170,9 +178,21 @@ async def process_single_tracker(tracker): console.print(traceback.format_exc()) return - # Process each tracker sequentially - for tracker in enabled_trackers: - await process_single_tracker(tracker) + multi_screens = int(config['DEFAULT'].get('multiScreens', 2)) + discs = meta.get('discs', []) + one_disc = True + if discs and len(discs) == 1: + one_disc = True + elif discs and len(discs) > 1: + one_disc = False + + if (not meta.get('tv_pack') and one_disc) or multi_screens == 0: + # Run all tracker tasks concurrently + await asyncio.gather(*(process_single_tracker(tracker) for tracker in enabled_trackers)) + else: + # Process each tracker sequentially + for tracker in enabled_trackers: + await process_single_tracker(tracker) try: if meta.get('print_tracker_messages', False): diff --git a/src/trackermeta.py b/src/trackermeta.py index 64b579678..5d417d920 100644 --- a/src/trackermeta.py +++ b/src/trackermeta.py @@ -1,15 +1,19 @@ -from src.console import console -from src.trackers.COMMON import COMMON -from data.config import config +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import aiohttp import asyncio -import sys -from PIL import Image +import click import io from io import BytesIO import os -import click +import sys + +from PIL import Image + +from data.config import config +from src.bbcode import BBCODE from src.btnid import get_bhd_torrents +from src.console import console +from src.trackers.COMMON import COMMON # Define expected amount of screenshots from the config expected_images = int(config['DEFAULT']['screens']) @@ -73,7 +77,7 @@ async def check_images_concurrently(imagelist, meta): # Function to check each image's URL, host, and log resolution save_directory = f"{meta['base_dir']}/tmp/{meta['uuid']}" - timeout = aiohttp.ClientTimeout(total=30, connect=10, sock_connect=10, sock_read=10) + timeout = aiohttp.ClientTimeout(total=15, connect=5, sock_connect=5, sock_read=5) async def check_and_collect(image_dict): img_url = image_dict.get('raw_url') @@ -176,6 +180,8 @@ async def check_image_link(url, timeout=None): # Handle when pixhost url points to web_url and convert to raw_url if url.startswith("https://pixhost.to/show/"): url = url.replace("https://pixhost.to/show/", "https://img1.pixhost.to/images/", 1) + if timeout is None: + timeout = aiohttp.ClientTimeout(total=20, connect=10, sock_connect=10) connector = aiohttp.TCPConnector(ssl=False) # Disable SSL verification for testing @@ -215,7 +221,6 @@ async def check_image_link(url, timeout=None): async def update_meta_with_unit3d_data(meta, tracker_data, tracker_name, only_id=False): # Unpack the expected 9 elements, ignoring any additional ones tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data - if tmdb: meta['tmdb_id'] = tmdb if meta['debug']: @@ -234,10 +239,11 @@ async def update_meta_with_unit3d_data(meta, tracker_data, tracker_name, only_id console.print("set MAL ID:", meta['mal_id']) if desc and not only_id: meta['description'] = desc + meta['saved_description'] = True with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: if len(desc) > 0: description.write((desc or "") + "\n") - if category and not meta.get('category'): + if category and not meta.get('manual_category', None): cat_upper = category.upper() if "MOVIE" in cat_upper: meta['category'] = "MOVIE" @@ -246,20 +252,20 @@ async def update_meta_with_unit3d_data(meta, tracker_data, tracker_name, only_id if meta['debug']: console.print("set Category:", meta['category']) - if not meta.get('image_list'): # Only handle images if image_list is not already populated - if imagelist: # Ensure imagelist is not empty before setting - valid_images = await check_images_concurrently(imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - if meta.get('image_list'): # Double-check if image_list is set before handling it - if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('huno') or meta.get('ulcx')) or meta['unattended']: - await handle_image_list(meta, tracker_name, valid_images) + if imagelist: # Ensure imagelist is not empty before setting + valid_images = await check_images_concurrently(imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + if meta.get('image_list'): # Double-check if image_list is set before handling it + if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('huno') or meta.get('ulcx')) or meta['unattended']: + await handle_image_list(meta, tracker_name, valid_images) if filename: meta[f'{tracker_name.lower()}_filename'] = filename if meta['debug']: console.print(f"[green]{tracker_name} data successfully updated in meta[/green]") + return True async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, search_term, search_file_folder, only_id=False): @@ -307,7 +313,8 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea if valid_images: meta['image_list'] = valid_images else: - console.print("[yellow]Skipping PTP as no match found[/yellow]") + if meta['debug']: + console.print("[yellow]Skipping PTP as no match found[/yellow]") found_match = False else: @@ -359,21 +366,24 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea meta.get('isdir') is True) if meta.get('bhd'): - await get_bhd_torrents(bhd_api, bhd_rss_key, meta, only_id, torrent_id=meta['bhd']) + imdb, tmdb = await get_bhd_torrents(bhd_api, bhd_rss_key, meta, only_id, torrent_id=meta['bhd']) elif use_foldername: # Use folder name from path if available, fall back to UUID folder_path = meta.get('path', '') foldername = os.path.basename(folder_path) if folder_path else meta.get('uuid', '') - await get_bhd_torrents(bhd_api, bhd_rss_key, meta, only_id, foldername=foldername) + imdb, tmdb = await get_bhd_torrents(bhd_api, bhd_rss_key, meta, only_id, foldername=foldername) else: # Only use filename if none of the folder conditions are met filename = os.path.basename(meta['filelist'][0]) if meta.get('filelist') else None - await get_bhd_torrents(bhd_api, bhd_rss_key, meta, only_id, filename=filename) + imdb, tmdb = await get_bhd_torrents(bhd_api, bhd_rss_key, meta, only_id, filename=filename) - if meta.get('imdb_id') or meta.get('tmdb_id'): + if imdb or tmdb: if not meta['unattended']: - console.print(f"[green]{tracker_name} data found: IMDb ID: {meta.get('imdb_id')}, TMDb ID: {meta.get('tmdb_id')}[/green]") + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TMDb ID: {tmdb}[/green]") if await prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): + found_match = True + meta['imdb_id'] = int(imdb) if imdb else meta.get('imdb_id', 0) + meta['tmdb_id'] = int(tmdb) if tmdb else meta.get('tmdb_id', 0) if meta.get('description') and meta.get('description') != "": description = meta.get('description') console.print("[bold green]Successfully grabbed description from BHD") @@ -446,15 +456,14 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea if valid_images: meta['image_list'] = valid_images await handle_image_list(meta, tracker_name, valid_images) - found_match = True - console.print(f"[green]{tracker_name} data retained.[/green]") else: meta['image_list'] = [] + else: console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") meta[tracker_key] = None - meta['imdb_id'] = 0 - meta['tmdb_id'] = 0 + meta['imdb_id'] = meta.get('imdb_id') if meta.get('imdb_id') else 0 + meta['tmdb_id'] = meta.get('tmdb_id') if meta.get('tmdb_id') else 0 meta["framestor"] = False meta["flux"] = False meta["description"] = "" @@ -471,17 +480,17 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea found_match = False else: console.print(f"[green]{tracker_name} data found: IMDb ID: {meta.get('imdb_id')}, TMDb ID: {meta.get('tmdb_id')}[/green]") + found_match = True if meta.get('image_list'): valid_images = await check_images_concurrently(meta.get('image_list'), meta) if valid_images: meta['image_list'] = valid_images - found_match = True else: meta['image_list'] = [] else: found_match = False - elif tracker_name in ["HUNO", "BLU", "AITHER", "LST", "OE", "ULCX"]: + elif tracker_name in ["HUNO", "BLU", "AITHER", "LST", "OE", "ULCX", "RF", "OTW", "YUS", "DP", "SP"]: if meta.get(tracker_key) is not None: if meta['debug']: console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") @@ -507,16 +516,18 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea if any(item not in [None, 0] for item in tracker_data[:3]): # Check for valid tmdb, imdb, or tvdb if meta['debug']: - console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") - await update_meta_with_unit3d_data(meta, tracker_data, tracker_name, only_id) - found_match = True + console.print(f"[green]Valid data found on {tracker_name}[/green]") + selected = await update_meta_with_unit3d_data(meta, tracker_data, tracker_name, only_id) + if selected: + found_match = True + else: + found_match = False else: if meta['debug']: console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") found_match = False elif tracker_name == "HDB": - from src.bbcode import BBCODE bbcode = BBCODE() if meta.get('hdb') is not None: meta[manual_key] = meta[tracker_key] @@ -525,23 +536,20 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea # Use get_info_from_torrent_id function if ID is found in meta imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], meta['hdb_description'] = await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) - if imdb or tvdb_id: + if imdb or tvdb_id or meta['hdb_description']: meta['imdb_id'] = imdb if imdb else meta.get('imdb_id', 0) meta['tvdb_id'] = tvdb_id if tvdb_id else meta.get('tvdb_id', 0) meta['hdb_name'] = hdb_name found_match = True - result = bbcode.clean_hdb_description(meta['hdb_description']) - if meta['hdb_description'] and len(meta['hdb_description']) > 0 and not only_id: - if result is None: - console.print("[yellow]Failed to clean HDB description, it might be empty or malformed[/yellow]") - meta['description'] = "" - meta['image_list'] = [] - else: - meta['description'], meta['image_list'] = result - meta['saved_description'] = True - - if meta.get('image_list') and meta.get('keep_images'): - valid_images = await check_images_concurrently(meta.get('image_list'), meta) + description, image_list = bbcode.clean_hdb_description(meta['hdb_description']) + if description and len(description) > 0 and not only_id: + console.print(f"Description content:\n{description[:500]}...", markup=False) + meta['description'] = description + meta['saved_description'] = True + else: + console.print("[yellow]HDB description empty[/yellow]") + if image_list and meta.get('keep_images'): + valid_images = await check_images_concurrently(image_list, meta) if valid_images: meta['image_list'] = valid_images await handle_image_list(meta, tracker_name, valid_images) @@ -553,7 +561,8 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea console.print(f"[yellow]{tracker_name} data not found for ID: {meta[tracker_key]}[/yellow]") found_match = False else: - console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") + if meta['debug']: + console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") # Use search_filename function if ID is not found in meta imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], meta['hdb_description'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder, meta) @@ -561,7 +570,7 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea if tracker_id: meta[tracker_key] = tracker_id - if imdb or tvdb_id: + if imdb or tvdb_id or meta['hdb_description']: if not meta['unattended']: console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") if await prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): @@ -569,50 +578,61 @@ async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, sea meta['imdb_id'] = imdb if imdb else meta.get('imdb_id') meta['tvdb_id'] = tvdb_id if tvdb_id else meta.get('tvdb_id') found_match = True - if meta['hdb_description'] and len(meta['hdb_description']) > 0 and not only_id: - result = bbcode.clean_hdb_description(meta['hdb_description']) - if result is None: - console.print("[yellow]Failed to clean HDB description, it might be empty or malformed[/yellow]") - meta['description'] = "" - meta['image_list'] = [] - else: - desc, meta['image_list'] = result - console.print("[bold green]Successfully grabbed description from HDB") - console.print(f"Description content:\n{desc[:1000]}...", markup=False) - console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") - - if edit_choice.lower() == 'e': - edited_description = click.edit(desc) - if edited_description: - desc = edited_description.strip() - meta['description'] = desc - meta['saved_description'] = True - console.print(f"[green]Final description after editing:[/green] {desc}", markup=False) - elif edit_choice.lower() == 'd': - meta['description'] = "" - meta['hdb_description'] = "" - console.print("[yellow]Description discarded.[/yellow]") - else: - console.print("[green]Keeping the original description.[/green]") - meta['description'] = desc + description, image_list = bbcode.clean_hdb_description(meta['hdb_description']) + if description and len(description) > 0 and not only_id: + console.print("[bold green]Successfully grabbed description from HDB") + console.print(f"HDB Description content:\n{description[:1000]}.....", markup=False) + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + meta['description'] = description meta['saved_description'] = True - if meta.get('image_list') and meta.get('keep_images'): - valid_images = await check_images_concurrently(meta.get('image_list'), meta) - if valid_images: - meta['image_list'] = valid_images - await handle_image_list(meta, tracker_name, valid_images) + console.print(f"[green]Final description after editing:[/green] {description}", markup=False) + elif edit_choice.lower() == 'd': + meta['hdb_description'] = "" + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print("[green]Keeping the original description.[/green]") + meta['description'] = description + meta['saved_description'] = True + else: + console.print("[yellow]HDB description empty[/yellow]") + if image_list and meta.get('keep_images'): + valid_images = await check_images_concurrently(image_list, meta) + if valid_images: + meta['image_list'] = valid_images + await handle_image_list(meta, tracker_name, valid_images) else: console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") meta[tracker_key] = None meta['tvdb_id'] = meta.get('tvdb_id') if meta.get('tvdb_id') else 0 meta['imdb_id'] = meta.get('imdb_id') if meta.get('imdb_id') else 0 meta['hdb_name'] = None + meta['hdb_description'] = "" found_match = False else: + meta['imdb_id'] = imdb if imdb else meta.get('imdb_id') + meta['tvdb_id'] = tvdb_id if tvdb_id else meta.get('tvdb_id') + description, image_list = bbcode.clean_hdb_description(meta['hdb_description']) + if description and len(description) > 0 and not only_id: + console.print(f"HDB Description content:\n{description[:500]}.....", markup=False) + meta['description'] = description + meta['saved_description'] = True + if image_list and meta.get('keep_images'): + valid_images = await check_images_concurrently(image_list, meta) + if valid_images: + meta['image_list'] = valid_images + await handle_image_list(meta, tracker_name, valid_images) console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {hdb_name}[/green]") found_match = True else: + meta['hdb_name'] = None + meta['hdb_description'] = "" + meta[tracker_key] = None found_match = False return meta, found_match diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 457502571..66102e066 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -1,45 +1,39 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord +import aiofiles import asyncio -import requests +import httpx import os import platform -from src.trackers.COMMON import COMMON -from src.console import console -import bencodepy -import httpx +import re +from src.bbcode import BBCODE +from src.console import console +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class ACM(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class ACM(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='ACM') self.config = config + self.common = COMMON(config) self.tracker = 'ACM' self.source_flag = 'AsianCinema' - self.upload_url = 'https://eiga.moi/api/torrents/upload' - self.search_url = 'https://eiga.moi/api/torrents/filter' - self.signature = None - self.banned_groups = [""] + self.base_url = 'https://eiga.moi/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type(self, meta): + async def get_type_id(self, meta): if meta['is_disc'] == "BDMV": bdinfo = meta['bdinfo'] bd_sizes = [25, 50, 66, 100] + bd_size = 100 # Default to largest size for each in bd_sizes: if bdinfo['size'] < each: bd_size = each @@ -55,12 +49,14 @@ async def get_type(self, meta): type_string = "DVD 5" elif "DVD9" in meta['dvd_size']: type_string = "DVD 9" + else: + type_string = "Other" else: if meta['type'] == "REMUX": - if meta['source'] == "BluRay": - type_string = "REMUX" if meta['uhd'] == "UHD": type_string = "UHD REMUX" + else: + type_string = "REMUX" else: type_string = meta['type'] # else: @@ -69,10 +65,8 @@ async def get_type(self, meta): # type_id = meta['resolution'] # else: # type_id = "Other" - return type_string - async def get_type_id(self, type): - type_id = { + type_id_map = { 'UHD 100': '1', 'UHD 66': '2', 'UHD 50': '3', @@ -84,11 +78,20 @@ async def get_type_id(self, type): 'WEBDL': '9', 'SDTV': '13', 'DVD 9': '16', - 'HDTV': '17' - }.get(type, '0') + 'HDTV': '17', + } + type_id = type_id_map.get(type_string, '0') + return type_id - async def get_res_id(self, resolution): + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_resolution_id(self, meta): resolution_id = { '2160p': '1', '1080p': '2', @@ -98,17 +101,21 @@ async def get_res_id(self, resolution): '576i': '4', '480p': '5', '480i': '5' - }.get(resolution, '10') + }.get(meta['resolution'], '10') return resolution_id # ACM rejects uploads with more that 10 keywords - async def get_keywords(self, keywords): + async def get_keywords(self, meta): + keywords = meta.get('keywords', '') if keywords != '': keywords_list = keywords.split(',') - keywords_list = [keyword for keyword in keywords_list if " " not in keyword][:10] + keywords_list = [keyword.strip() for keyword in keywords_list if " " not in keyword.strip()][:10] keywords = ', '.join(keywords_list) return keywords + async def get_additional_files(self, meta): + return {} + def get_subtitles(self, meta): sub_lang_map = { ("Arabic", "ara", "ar"): 'Ara', @@ -189,15 +196,14 @@ def get_subs_tag(self, subs): return f" [{subs[0]} subs only]" async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) + await self.common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(await self.get_type(meta)) - resolution_id = await self.get_res_id(meta['resolution']) - await self.edit_desc(meta) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - acm_name = await self.edit_name(meta) + type_id = await self.get_type_id(meta) + resolution_id = await self.get_resolution_id(meta) + desc = await self.get_description(meta) + region_id = await self.common.unit3d_region_ids(meta.get('region')) + distributor_id = await self.common.unit3d_distributor_ids(meta.get('distributor')) + acm_name = await self.get_name(meta) if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): anon = 0 else: @@ -210,11 +216,13 @@ async def upload(self, meta, disctype): for each in meta['discs']: bd_dump = bd_dump + each['summary'].strip() + "\n\n" else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as f: + mi_dump = await f.read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + async with aiofiles.open(torrent_file_path, 'rb') as f: + torrent_bytes = await f.read() + files = {'torrent': ('torrent.torrent', torrent_bytes, 'application/x-bittorrent')} data = { 'name': acm_name, 'description': desc, @@ -231,7 +239,7 @@ async def upload(self, meta, disctype): 'anonymous': anon, 'stream': meta['stream'], 'sd': meta['sd'], - 'keywords': await self.get_keywords(meta['keywords']), + 'keywords': await self.get_keywords(meta), 'personal_release': int(meta.get('personalrelease', False)), 'internal': 0, 'featured': 0, @@ -249,33 +257,48 @@ async def upload(self, meta, disctype): if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } + headers = {'User-Agent': f'{meta["ua_name"]} {meta.get("current_version", "")} ({platform.system()} {platform.release()})'} params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - except Exception: - console.print("It may have uploaded, go check") - return + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + meta['tracker_status'][self.tracker]['status_message'] = response.json() + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split('.')[1].split('/')[3] + meta['tracker_status'][self.tracker]['torrent_id'] = t_id + await self.common.add_tracker_torrent( + meta, + self.tracker, + self.source_flag, + self.announce_url, + self.torrent_url + t_id, + headers=headers, + params=params, + downurl=response.json()['data'] + ) + except httpx.TimeoutException: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: {self.tracker} request timed out after 10 seconds' + except httpx.RequestError as e: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: Unable to upload to {self.tracker}: {e}' + except Exception: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: It may have uploaded, go check: {self.tracker}' + return else: console.print("[cyan]Request Data:") console.print(data) meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() async def search_existing(self, meta, disctype): dupes = [] params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdb': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(await self.get_type(meta)), + 'categories[]': (await self.get_category_id(meta)), + 'types[]': (await self.get_type_id(meta)), # A majority of the ACM library doesn't contain resolution information # 'resolutions[]' : await self.get_res_id(meta['resolution']), # 'name' : "" @@ -301,18 +324,10 @@ async def search_existing(self, meta, disctype): return dupes - # async def fix_rtl(self, meta): - # original_title = meta.get('original_title') - # right_to_left_languages: ["Arabic", "Aramaic", "Azeri", "Divehi", "Fula", "Hebrew", "Kurdish", "N'ko", "Persian", "Rohingya", "Syriac", "Urdu"] - # if meta.get('original_language') in right_to_left_languages: - # return f' / {original_title} {chr(int("202A", 16))}' - # return original_title - - async def edit_name(self, meta): + async def get_name(self, meta): name = meta.get('name') aka = meta.get('aka') original_title = meta.get('original_title') - year = str(meta.get('year')) # noqa F841 audio = meta.get('audio') source = meta.get('source') is_disc = meta.get('is_disc') @@ -332,6 +347,7 @@ async def edit_name(self, meta): name = name.replace("UHD BluRay REMUX", "Remux") name = name.replace("BluRay REMUX", "Remux") name = name.replace("H.265", "HEVC") + name = name.replace(" Atmos", "") if is_disc == 'DVD': name = name.replace(f'{source} DVD5', f'{resolution} DVD {source}') name = name.replace(f'{source} DVD9', f'{resolution} DVD {source}') @@ -341,83 +357,63 @@ async def edit_name(self, meta): name = name + self.get_subs_tag(subs) return name - async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: - from src.bbcode import BBCODE - # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '': - descfile.write(f"[center][b][color=#ff00ff][size=18]This release is sourced from {meta['service_longname']} and is not transcoded, just remuxed from the direct {meta['service_longname']} stream[/size][/color][/b][/center]") + async def get_description(self, meta): + async with aiofiles.open(f'{meta["base_dir"]}/tmp/{meta["uuid"]}/DESCRIPTION.txt', 'r', encoding='utf-8') as f: + base = await f.read() + + output_path = f'{meta["base_dir"]}/tmp/{meta["uuid"]}/[{self.tracker}]DESCRIPTION.txt' + + async with aiofiles.open(output_path, 'w', encoding='utf-8') as descfile: + if meta.get('type') == 'WEBDL' and meta.get('service_longname', ''): + await descfile.write( + f'[center][b][color=#ff00ff][size=18]This release is sourced from {meta["service_longname"]} and is not transcoded,' + f'just remuxed from the direct {meta["service_longname"]} stream[/size][/color][/b][/center]\n' + ) + bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n") - descfile.write("\n") + + discs = meta.get('discs', []) + if discs: + if discs[0].get('type') == 'DVD': + await descfile.write(f'[spoiler=VOB MediaInfo][code]{discs[0]["vob_mi"]}[/code][/spoiler]\n\n') + if len(discs) >= 2: for each in discs[1:]: - if each['type'] == "BDMV": + if each.get('type') == 'BDMV': # descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n") # descfile.write("\n") pass - if each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n") - descfile.write("\n") - desc = base + if each.get('type') == 'DVD': + await descfile.write(f'{each.get("name")}:\n') + vob_mi = each.get("vob_mi", '') + ifo_mi = each.get("ifo_mi", '') + await descfile.write( + f'[spoiler={os.path.basename(each["vob"])}][code]{vob_mi}[/code][/spoiler] ' + f'[spoiler={os.path.basename(each["ifo"])}][code]{ifo_mi}[/code][/spoiler]\n\n' + ) + + desc = re.sub(r'\[center\]\[spoiler=Scene NFO:\].*?\[/center\]', '', base, flags=re.DOTALL) desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) desc = bbcode.convert_comparison_to_collapse(desc, 1000) desc = desc.replace('[img]', '[img=300]') - descfile.write(desc) - images = meta['image_list'] - if len(images) > 0: - descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] - descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") - descfile.write("[/center]") - if self.signature is not None: - descfile.write(self.signature) - descfile.close() - return - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } + await descfile.write(desc) - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() + images = meta.get('image_list', []) - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) + if images: + await descfile.write('[center]\n') + for i in range(min(len(images), int(meta.get('screens', 0)))): + image = images[i] + web_url = image.get('web_url', '') + img_url = image.get('img_url', '') + await descfile.write(f'[url={web_url}][img=350]{img_url}[/img][/url]') + await descfile.write('\n[/center]') - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) + await descfile.write(f"\n[right][url=https://github.com/Audionut/Upload-Assistant][size=4]{meta['ua_signature']}[/size][/url][/right]") - return details_link - else: - return None - else: - return None + async with aiofiles.open(output_path, 'r', encoding='utf-8') as f: + final_desc = await f.read() - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None + return final_desc diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index f1d2082b1..edf493d6d 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -1,142 +1,46 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests -import platform -import os -import glob -import httpx -from src.trackers.COMMON import COMMON from src.console import console from src.languages import process_desc_language, has_english_language +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class AITHER(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class AITHER(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='AITHER') self.config = config + self.common = COMMON(config) self.tracker = 'AITHER' self.source_flag = 'Aither' - self.search_url = 'https://aither.cc/api/torrents/filter' - self.upload_url = 'https://aither.cc/api/torrents/upload' - self.torrent_url = 'https://aither.cc/torrents/' - self.id_url = 'https://aither.cc/api/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://aither.cc/' + self.banned_url = f'{self.base_url}/api/blacklists/releasegroups' + self.claims_url = f'{self.base_url}/api/internals/claim' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.requests_url = f'{self.base_url}/api/requests/filter' self.banned_groups = [] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') - name = await self.edit_name(meta) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - open_torrent = open(torrent_file_path, 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - bhd_dir_path = os.path.join(base_dir, "tmp", uuid, "bhd.nfo") - bhd_files = glob.glob(bhd_dir_path) - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files and not bhd_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, - } - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + async def get_additional_checks(self, meta): + should_continue = True + if meta['valid_mi'] is False: + console.print("[bold red]No unique ID in mediainfo, skipping AITHER upload.") + return False - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if meta.get('freeleech', 0) != 0: - data['free'] = meta.get('freeleech', 0) - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://aither.cc/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + return should_continue - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 + async def get_additional_data(self, meta): + data = { + 'mod_queue_opt_in': await self.get_flag(meta, 'modq'), + } - return 1 if meta.get(flag_name, False) else 0 + return data - async def edit_name(self, meta): + async def get_name(self, meta): aither_name = meta['name'] resolution = meta.get('resolution') video_codec = meta.get('video_codec') @@ -144,15 +48,15 @@ async def edit_name(self, meta): name_type = meta.get('type', "") source = meta.get('source', "") - if not meta.get('audio_languages'): + if not meta.get('language_checked', False): await process_desc_language(meta, desc=None, tracker=self.tracker) - elif meta.get('audio_languages'): - audio_languages = meta['audio_languages'][0].upper() - if audio_languages and not await has_english_language(audio_languages): - if (name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): - aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {audio_languages}", 1) - elif not meta.get('is_disc') == "BDMV": - aither_name = aither_name.replace(meta['resolution'], f"{audio_languages} {meta['resolution']}", 1) + audio_languages = meta['audio_languages'] + if audio_languages and not await has_english_language(audio_languages): + foreign_lang = meta['audio_languages'][0].upper() + if (name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): + aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {foreign_lang}", 1) + elif not meta.get('is_disc') == "BDMV": + aither_name = aither_name.replace(meta['resolution'], f"{foreign_lang} {meta['resolution']}", 1) if name_type == "DVDRIP": source = "DVDRip" @@ -165,100 +69,4 @@ async def edit_name(self, meta): aither_name = aither_name.replace((meta['source']), f"{resolution} {meta['source']}", 1) aither_name = aither_name.replace((meta['audio']), f"{video_codec} {meta['audio']}", 1) - return aither_name - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type=None, reverse=False): - type_mapping = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3', - 'DVDRIP': '3', - } - - if reverse: - # Return a reverse mapping of type IDs to type names - return {v: k for k, v in type_mapping.items()} - elif type is not None: - # Return the specific type ID - return type_mapping.get(type, '0') - else: - # Return the full mapping - return type_mapping - - async def get_res_id(self, resolution=None, reverse=False): - resolution_mapping = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9', - } - - if reverse: - # Return reverse mapping of IDs to resolutions - return {v: k for k, v in resolution_mapping.items()} - elif resolution is not None: - # Return the ID for the given resolution - return resolution_mapping.get(resolution, '10') # Default to '10' for unknown resolutions - else: - # Return the full mapping - return resolution_mapping - - async def search_existing(self, meta, disctype): - if meta['valid_mi'] is False: - console.print("[bold red]No unique ID in mediainfo, skipping AITHER upload.") - meta['skipping'] = "AITHER" - return - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'name': "" - } - if not meta.get('sd'): - params['resolutions[]'] = await self.get_res_id(meta['resolution']) - params['types[]'] = await self.get_type_id(meta['type']) - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = { - 'name': each['attributes']['name'], - 'size': each['attributes']['size'] - } - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + return {'name': aither_name} diff --git a/src/trackers/AL.py b/src/trackers/AL.py index 07ba59067..d39a79a0c 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -1,47 +1,50 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio +import re import requests -import platform -import httpx -import json - -from src.trackers.COMMON import COMMON from src.console import console +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class AL(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class AL(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='AL') self.config = config + self.common = COMMON(config) self.tracker = 'AL' self.source_flag = 'al' - self.upload_url = 'https://animelovers.club/api/torrents/upload' - self.search_url = 'https://animelovers.club/api/torrents/filter' - self.torrent_url = 'https://animelovers.club/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant][color=#9400FF]AnimeLovers[/color][/url][/center]" - self.banned_groups = [""] + self.base_url = 'https://animelovers.club/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - async def get_cat_id(self, category_name, meta): + async def get_additional_checks(self, meta): + should_continue = True + + if not meta["mal"]: + console.print("[bold red]MAL ID is missing, cannot upload to AL.[/bold red]") + meta["skipping"] = f'{self.tracker}' + return False + + return should_continue + + async def get_category_id(self, meta): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(meta['category'], '0') if 'HENTAI' in meta.get('mal_rating', "") or 'HENTAI' in str(meta.get('keywords', '')).upper(): - category_id = 7 + category_id = '7' - return category_id + return {'category_id': category_id} - async def get_type_id(self, type): + async def get_type_id(self, meta): type_id = { 'BDMV': '1', 'DISC': '1', @@ -56,10 +59,12 @@ async def get_type_id(self, type): 'BDRIP': '10', 'COLOR': '11', 'MONO': '12' - }.get(type, '1') - return type_id + }.get(meta['type'], '1') + return {'type_id': type_id} - async def get_res_id(self, resolution, bit_depth): + async def get_resolution_id(self, meta): + resolution = meta['resolution'] + bit_depth = meta.get('bit_depth', '') resolution_to_compare = resolution if bit_depth == "10": resolution_to_compare = f"{resolution} 10bit" @@ -80,9 +85,10 @@ async def get_res_id(self, resolution, bit_depth): '480p': '8', '480i': '9' }.get(resolution_to_compare, '10') - return resolution_id + return {'resolution_id': resolution_id} - async def edit_name(self, meta, mal_title=None): + async def get_name(self, meta): + mal_title = await self.get_mal_data(meta) category = meta['category'] title = '' try: @@ -154,21 +160,27 @@ async def edit_name(self, meta, mal_title=None): if len(video_encode.strip()) > 0: name += f" {video_encode.strip()}" - if tag == '': + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + tag = re.sub(f"-{invalid_tag}", "", tag, flags=re.IGNORECASE) tag = '-NoGroup' + if 'AVC' in video_codec and '264' in video_encode: name += f"{tag.strip()}" else: name += f" {video_codec}{tag.strip()}" console.print(f"[yellow]Corrected title : [green]{name}") - return name + return {'name': name} - async def get_mal_data(self, anime_id, meta): + async def get_mal_data(self, meta): + anime_id = meta['mal'] response = requests.get(f"https://api.jikan.moe/v4/anime/%7Banime_id%7D") content = response.json() title = content['data']['title'] if content['data']['title'] else None - meta['mal_rating'] = content['data']['rating'].upper() if content['data']['rating'] else None + meta['mal_rating'] = content['data']['rating'].upper() if content['data']['rating'] else "" return title async def format_audios(self, tracks): @@ -288,123 +300,3 @@ async def get_correct_audio_codec_str(self, audio_codec_str): return 'DD' else: return audio_codec_str - - async def upload(self, meta, disctype): - title = await self.get_mal_data(meta['mal'], meta) - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category'], meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution'], meta.get('bit_depth', '')) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - name = await self.edit_name(meta, mal_title=title) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - json_formatted_str = json.dumps(data, indent=4) - console.print(json_formatted_str) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution'], meta.get('bit_depth', '')), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 414c95156..6068c9b48 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -1,29 +1,26 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import os +import aiofiles import asyncio -import requests -import platform +import cli_ui import httpx import json -from pymediainfo import MediaInfo +import os +import platform +import re from pathlib import Path -from src.trackers.COMMON import COMMON +from src.bbcode import BBCODE from src.console import console +from src.get_desc import DescriptionBuilder from src.torrentcreate import create_torrent +from src.trackers.COMMON import COMMON -class ANT(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class ANT: def __init__(self, config): self.config = config + self.common = COMMON(config) self.tracker = 'ANT' self.source_flag = 'ANT' self.search_url = 'https://anthelion.me/api.php' @@ -37,7 +34,6 @@ def __init__(self, config): 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'TBS', 'Telly', 'TM', 'UPiNSMOKE', 'URANiME', 'WAF', 'xRed', 'XS', 'YIFY', 'YTS', 'Zeus', 'ZKBL', 'ZmN', 'ZMNT' ] - self.signature = None pass async def get_flags(self, meta): @@ -48,7 +44,7 @@ async def get_flags(self, meta): for each in ['Dual-Audio', 'Atmos']: if each in meta['audio']: flags.append(each.replace('-', '')) - if meta.get('has_commentary', False): + if meta.get('has_commentary', False) or meta.get('manual_commentary', False): flags.append('Commentary') if meta['3D'] == "3D": flags.append('3D') @@ -62,86 +58,247 @@ async def get_flags(self, meta): flags.append('Remux') return flags + async def get_type(self, meta): + antType = None + imdb_info = meta.get('imdb_info', {}) + if imdb_info['type'] is not None: + imdbType = imdb_info.get('type', 'movie').lower() + if imdbType in ("movie", "tv movie", 'tvmovie'): + if int(imdb_info.get('runtime', '60')) >= 45 or int(imdb_info.get('runtime', '60')) == 0: + antType = 0 + else: + antType = 1 + if imdbType == "short": + antType = 1 + elif imdbType == "tv mini series": + antType = 2 + elif imdbType == "comedy": + antType = 3 + else: + keywords = meta.get("keywords", "").lower() + tmdb_type = meta.get("tmdb_type", "movie").lower() + if tmdb_type == "movie": + if int(meta.get('runtime', 60)) >= 45 or int(meta.get('runtime', 60)) == 0: + antType = 0 + else: + antType = 1 + if tmdb_type == "miniseries" or "miniseries" in keywords: + antType = 2 + if "short" in keywords or "short film" in keywords: + antType = 1 + elif "stand-up comedy" in keywords: + antType = 3 + + if antType is None: + if not meta['unattended']: + antTypeList = ["Feature Film", "Short Film", "Miniseries", "Other"] + choice = cli_ui.ask_choice("Select the proper type for ANT", choices=antTypeList) + # Map the choice back to the integer + type_map = { + "Feature Film": 0, + "Short Film": 1, + "Miniseries": 2, + "Other": 3 + } + antType = type_map.get(choice) + else: + if meta['debug']: + console.print(f"[bold red]{self.tracker} type could not be determined automatically in unattended mode.") + antType = 0 # Default to Feature Film in unattended mode + + return antType + async def upload(self, meta, disctype): - common = COMMON(config=self.config) torrent_filename = "BASE" torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" torrent_file_size_kib = os.path.getsize(torrent_path) / 1024 + if meta.get('mkbrr', False): + tracker_url = self.config['TRACKERS']['ANT'].get('announce_url', "https://fake.tracker/").strip() + else: + tracker_url = '' # Trigger regeneration automatically if size constraints aren't met if torrent_file_size_kib > 250: # 250 KiB console.print("[yellow]Existing .torrent exceeds 250 KiB and will be regenerated to fit constraints.") - meta['max_piece_size'] = '256' # 256 MiB - create_torrent(meta, Path(meta['path']), "ANT") + meta['max_piece_size'] = '128' # 128 MiB + create_torrent(meta, Path(meta['path']), "ANT", tracker_url=tracker_url) torrent_filename = "ANT" - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) + await self.common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) flags = await self.get_flags(meta) - if meta['bdinfo'] is not None: - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - bd_dump = f'[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]' - path = os.path.join(meta['bdinfo']['path'], 'STREAM') - longest_file = max( - meta['bdinfo']['files'], - key=lambda x: x.get('length', 0) - ) - file_name = longest_file['file'].lower() - m2ts = os.path.join(path, file_name) - media_info_output = str(MediaInfo.parse(m2ts, output="text", full=False)) - mi_dump = media_info_output.replace('\r\n', '\n') - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'file_input': open_torrent} + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + async with aiofiles.open(torrent_file_path, 'rb') as f: + torrent_bytes = await f.read() + files = {'file_input': ('torrent.torrent', torrent_bytes, 'application/x-bittorrent')} data = { + 'type': await self.get_type(meta), + 'audioformat': await self.get_audio(meta), 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'action': 'upload', 'tmdbid': meta['tmdb'], - 'mediainfo': mi_dump, + 'mediainfo': await self.mediainfo(meta), 'flags[]': flags, - 'screenshots': '\n'.join([x['raw_url'] for x in meta['image_list']][:4]), + 'release_desc': await self.edit_desc(meta), } if meta['bdinfo'] is not None: data.update({ 'media': 'Blu-ray', - 'releasegroup': str(meta['tag'])[1:], - 'release_desc': bd_dump, - 'flagchangereason': "BDMV Uploaded with Upload Assistant"}) + 'releasegroup': str(meta['tag'])[1:] + }) if meta['scene']: # ID of "Scene?" checkbox on upload form is actually "censored" data['censored'] = 1 + + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print('[bold red]Adult content detected[/bold red]') + if cli_ui.ask_yes_no("Are the screenshots safe?", default=False): + data.update({'screenshots': '\n'.join([x['raw_url'] for x in meta['image_list']][:4])}) + if meta.get('is_disc') == 'BDMV': + data.update({'flagchangereason': "(Adult with screens) BDMV Uploaded with Upload Assistant"}) + else: + data.update({'flagchangereason': "Adult with screens uploaded with Upload Assistant"}) + else: + data.update({'screenshots': ''}) # No screenshots for adult content + else: + data.update({'screenshots': ''}) + else: + data.update({'screenshots': '\n'.join([x['raw_url'] for x in meta['image_list']][:4])}) + + if meta.get('is_disc') == 'BDMV' and data.get('flagchangereason') is None: + data.update({'flagchangereason': "BDMV Uploaded with Upload Assistant"}) + headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.4 ({platform.system()} {platform.release()})' } try: if not meta['debug']: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) - if response.status_code in [200, 201]: - response_data = response.json() - meta['tracker_status'][self.tracker]['status_message'] = response_data - elif response.status_code == 502: - response_data = { - "error": "Bad Gateway", - "site seems down": "https://ant.trackerstatus.info/" - } - meta['tracker_status'][self.tracker]['status_message'] = f"data error - {response_data}" - else: - response_data = { - "error": f"Unexpected status code: {response.status_code}", - "response_content": response.text - } - meta['tracker_status'][self.tracker]['status_message'] = f"data error - {response_data}" + async with httpx.AsyncClient(timeout=10) as client: + response = await client.post(url=self.upload_url, files=files, data=data, headers=headers) + if response.status_code in [200, 201]: + try: + response_data = response.json() + except json.JSONDecodeError: + meta['tracker_status'][self.tracker]['status_message'] = "data error: ANT json decode error, the API is probably down" + return + if "Success" not in response_data: + meta['tracker_status'][self.tracker]['status_message'] = f"data error - {response_data}" + if meta.get('tag', '') and 'HONE' in meta.get('tag', ''): + meta['tracker_status'][self.tracker]['status_message'] = f"{response_data} - HONE release, fix tag at ANT" + else: + meta['tracker_status'][self.tracker]['status_message'] = response_data + elif response.status_code == 502: + response_data = { + "error": "Bad Gateway", + "site seems down": "https://ant.trackerstatus.info/" + } + meta['tracker_status'][self.tracker]['status_message'] = f"data error - {response_data}" + else: + response_data = { + "error": f"Unexpected status code: {response.status_code}", + "response_content": response.text + } + meta['tracker_status'][self.tracker]['status_message'] = f"data error - {response_data}" else: - console.print("[cyan]Request Data:") + console.print("[cyan]ANT Request Data:") console.print(data) meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - finally: - open_torrent.close() + except Exception as e: + meta['tracker_status'][self.tracker]['status_message'] = f"data error: ANT upload failed: {e}" + + async def get_audio(self, meta): + ''' + Possible values: + MP2, MP3, AAC, AC3, DTS, FLAC, PCM, True-HD, Opus + ''' + audio = meta.get('audio', '').upper() + audio_map = { + 'MP2': 'MP2', + 'MP3': 'MP3', + 'AAC': 'AAC', + 'DD': 'AC3', + 'DTS': 'DTS', + 'FLAC': 'FLAC', + 'PCM': 'PCM', + 'TRUEHD': 'True-HD', + 'OPUS': 'Opus' + } + for key, value in audio_map.items(): + if key in audio: + return value + console.print(f'{self.tracker}: Unexpected audio format: {audio}. The format must be one of the following: MP2, MP3, AAC, AC3, DTS, FLAC, PCM, True-HD, Opus') + return None + + async def mediainfo(self, meta): + if meta.get('is_disc') == 'BDMV': + mediainfo = await self.common.get_bdmv_mediainfo(meta, remove=['File size', 'Overall bit rate']) + else: + mi_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" + async with aiofiles.open(mi_path, 'r', encoding='utf-8') as f: + mediainfo = await f.read() + + return mediainfo async def edit_desc(self, meta): - return + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # Avoid unnecessary descriptions, adding only the logo if there is a user description + user_desc = await builder.get_user_description(meta) + if user_desc: + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo_resize_url = meta.get('tmdb_logo', '') + if logo_resize_url: + desc_parts.append(f"[align=center][img]https://image.tmdb.org/t/p/w300/{logo_resize_url}[/img][/align]") + + # BDinfo + bdinfo = await builder.get_bdinfo_section(meta) + if bdinfo: + desc_parts.append(f"[spoiler=BDInfo][pre]{bdinfo}[/pre][/spoiler]") + + if user_desc: + # User description + desc_parts.append(user_desc) + + # Disc menus screenshots + menu_images = meta.get("menu_images", []) + if menu_images: + desc_parts.append(await builder.menu_screenshot_header(meta, self.tracker)) + + # Disc menus screenshots + menu_screenshots_block = "" + for image in menu_images: + menu_raw_url = image.get("raw_url") + if menu_raw_url: + menu_screenshots_block += f"[img]{menu_raw_url}[/img] " + if menu_screenshots_block: + desc_parts.append(f"[align=center]{menu_screenshots_block}[/align]") + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + bbcode = BBCODE() + description = bbcode.convert_to_align(description) + description = bbcode.remove_img_resize(description) + description = bbcode.remove_sup(description) + description = bbcode.remove_sub(description) + description = description.replace('•', '-').replace('’', "'").replace('–', '-') + description = bbcode.remove_extra_lines(description) + description = description.strip() + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description async def search_existing(self, meta, disctype): if meta.get('category') == "TV": @@ -149,6 +306,13 @@ async def search_existing(self, meta, disctype): console.print('[bold red]ANT only ALLOWS Movies.') meta['skipping'] = "ANT" return [] + + if meta.get('bloated', False): + if not meta['unattended']: + console.print('[bold red]ANT does not allow bloated releases.') + meta['skipping'] = "ANT" + return [] + dupes = [] params = { 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), @@ -178,14 +342,20 @@ async def search_existing(self, meta, disctype): if 'files' in each and len(each['files']) > 0: largest = each['files'][0] for file in each['files']: - if int(file.get('size', 0)) > int(largest.get('size', 0)): + current_size = int(file.get('size', 0)) + largest_size = int(largest.get('size', 0)) + if current_size > largest_size: largest = file largest_file = largest.get('name', '') result = { 'name': largest_file or each.get('fileName', ''), + 'files': [file.get('name', '') for file in each.get('files', [])], 'size': int(each.get('size', 0)), - 'flags': each.get('flags', []) + 'link': each.get('guid', ''), + 'flags': each.get('flags', []), + 'file_count': each.get('fileCount', 0), + 'download': each.get('link', '').replace('&', '&'), } dupes.append(result) @@ -193,20 +363,67 @@ async def search_existing(self, meta, disctype): console.print(f"[green]Found potential dupe: {result['name']} ({result['size']} bytes)") except json.JSONDecodeError: - console.print("[bold yellow]ANT Response content is not valid JSON. Skipping this API call.") + console.print("[bold yellow]ANT response content is not valid JSON. Skipping this API call.") meta['skipping'] = "ANT" else: - console.print(f"[bold red]ANT Failed to search torrents. HTTP Status: {response.status_code}") + console.print(f"[bold red]ANT failed to search torrents. HTTP Status: {response.status_code}") meta['skipping'] = "ANT" except httpx.TimeoutException: console.print("[bold red]ANT Request timed out after 5 seconds") meta['skipping'] = "ANT" except httpx.RequestError as e: - console.print(f"[bold red]ANT Unable to search for existing torrents: {e}") + console.print(f"[bold red]ANT unable to search for existing torrents: {e}") meta['skipping'] = "ANT" except Exception as e: - console.print(f"[bold red]ANT Unexpected error: {e}") + console.print(f"[bold red]ANT unexpected error: {e}") meta['skipping'] = "ANT" await asyncio.sleep(5) return dupes + + async def get_data_from_files(self, meta): + if meta.get('is_disc', False): + return [] + filelist = meta.get('filelist', []) + filename = [os.path.basename(f) for f in filelist][0] + params = { + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 't': 'search', + 'filename': filename, + 'o': 'json' + } + + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url='https://anthelion.me/api', params=params) + if response.status_code == 200: + try: + data = response.json() + imdb_tmdb_list = [] + items = data.get('item', []) + if len(items) == 1: + each = items[0] + imdb_id = each.get('imdb') + tmdb_id = each.get('tmdb') + if imdb_id and imdb_id.startswith('tt'): + imdb_num = int(imdb_id[2:]) + imdb_tmdb_list.append({'imdb_id': imdb_num}) + if tmdb_id and str(tmdb_id).isdigit() and int(tmdb_id) != 0: + imdb_tmdb_list.append({'tmdb_id': int(tmdb_id)}) + except json.JSONDecodeError: + console.print("[bold yellow]Error parsing JSON response from ANT") + imdb_tmdb_list = [] + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + imdb_tmdb_list = [] + except httpx.TimeoutException: + console.print("[bold red]ANT Request timed out after 5 seconds") + imdb_tmdb_list = [] + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + imdb_tmdb_list = [] + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + imdb_tmdb_list = [] + + return imdb_tmdb_list diff --git a/src/trackers/AR.py b/src/trackers/AR.py index 531353c30..a3d671052 100644 --- a/src/trackers/AR.py +++ b/src/trackers/AR.py @@ -1,17 +1,18 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- import aiofiles +import httpx import json import os -import pickle import platform import re import asyncio -import signal from rich.prompt import Prompt import urllib.parse from src.exceptions import * # noqa F403 from bs4 import BeautifulSoup from src.console import console +from src.cookie_auth import CookieValidator, CookieAuthUploader from src.trackers.COMMON import COMMON from pymediainfo import MediaInfo @@ -19,7 +20,8 @@ class AR(): def __init__(self, config): self.config = config - self.session = None + self.cookie_validator = CookieValidator(config) + self.cookie_uploader = CookieAuthUploader(config) self.tracker = 'AR' self.source_flag = 'AlphaRatio' self.username = config['TRACKERS']['AR'].get('username', '').strip() @@ -28,12 +30,14 @@ def __init__(self, config): self.login_url = f'{self.base_url}/login.php' self.upload_url = f'{self.base_url}/upload.php' self.search_url = f'{self.base_url}/torrents.php' - self.user_agent = f'UA ({platform.system()} {platform.release()})' - self.signature = None + self.test_url = f'{self.base_url}/torrents.php' + self.torrent_url = f'{self.base_url}/torrents.php?id=' + self.user_agent = f'Upload Assistant/2.3 ({platform.system()} {platform.release()})' self.banned_groups = [] async def get_type(self, meta): - + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] if (meta['type'] == 'DISC' or meta['type'] == 'REMUX') and meta['source'] == 'Blu-ray': return "14" @@ -81,6 +85,8 @@ async def get_type(self, meta): if meta['category'] == "MOVIE": if meta['sd']: return '7' + elif any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + return '13' else: return { '8640p': '9', @@ -92,167 +98,31 @@ async def get_type(self, meta): '720p': '8', }.get(meta['resolution'], '7') - async def start_session(self): - import aiohttp - if self.session is not None: - console.print("[dim red]Warning: Previous session was not closed properly. Closing it now.") - await self.close_session() - self.session = aiohttp.ClientSession() - - self.attach_signal_handlers() - return aiohttp - - async def close_session(self): - if self.session is not None: - await self.session.close() - self.session = None - - def attach_signal_handlers(self): - loop = asyncio.get_running_loop() - - for sig in (signal.SIGINT, signal.SIGTERM): - try: - loop.add_signal_handler(sig, lambda: asyncio.create_task(self.handle_shutdown(sig))) - except NotImplementedError: - pass - - async def handle_shutdown(self, sig): - console.print(f"[red]Received shutdown signal {sig}. Closing session...[/red]") - await self.close_session() - async def validate_credentials(self, meta): - if self.session: - console.print("[red dim]Warning: Previous session was not closed properly. Using existing session.") - else: - try: - await self.start_session() - except asyncio.CancelledError: - console.print("[red]Session startup interrupted! Cleaning up...[/red]") - await self.close_session() - raise - - if await self.load_session(meta): - response = await self.get_initial_response() - if await self.validate_login(response): - return True - else: - console.print("[yellow]No session file found. Attempting to log in...") - if await self.login(): - console.print("[green]Login successful, saving session file.") - valid = await self.save_session(meta) - if valid: - if meta['debug']: - console.print("[blue]Session file saved successfully.") - return True - else: - return False - else: - console.print('[red]Failed to validate credentials. Please confirm that the site is up and your passkey is valid. Exiting') - - await self.close_session() - return False - - async def get_initial_response(self): - async with self.session.get(self.login_url) as response: - return await response.text() - - async def validate_login(self, response_text): - if 'login.php?act=recover' in response_text: - console.print("[red]Login failed. Check your credentials.") - return False - return True - - async def login(self): - data = { - "username": self.username, - "password": self.password, - "keeplogged": "1", - "login": "Login", - } - async with self.session.post(self.login_url, data=data) as response: - if await self.validate_login(await response.text()): - return True - return False - - async def save_session(self, meta): - try: - session_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{self.tracker}.pkl") - os.makedirs(os.path.dirname(session_file), exist_ok=True) - cookies = self.session.cookie_jar - cookie_dict = {} - for cookie in cookies: - cookie_dict[cookie.key] = cookie.value - - with open(session_file, 'wb') as f: - pickle.dump(cookie_dict, f) - except Exception as e: - console.print(f"[red]Error saving session: {e}[/red]") - return False - - async def load_session(self, meta): - import aiohttp - session_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{self.tracker}.pkl") - retry_count = 0 - max_retries = 2 - - while retry_count < max_retries: - try: - if not os.path.exists(session_file): - console.print(f"[red]Session file not found: {session_file}[/red]") - return False # No session file to load - - with open(session_file, 'rb') as f: - try: - cookie_dict = pickle.load(f) - except (EOFError, pickle.UnpicklingError) as e: - console.print(f"[red]Error loading session cookies: {e}[/red]") - return False # Corrupted session file - - if self.session is None: - await self.start_session() - - for name, value in cookie_dict.items(): - self.session.cookie_jar.update_cookies({name: value}) - - try: - async with self.session.get(f'{self.base_url}/torrents.php', timeout=10) as response: - if response.status == 200: - console.print("[green]Session validated successfully.[/green]") - return True # Session is valid - else: - console.print(f"[yellow]Session validation failed with status {response.status}, retrying...[/yellow]") - - except (aiohttp.ClientError, asyncio.TimeoutError) as e: - console.print(f"[yellow]Session might be invalid: {e}. Retrying...[/yellow]") - - except (FileNotFoundError, EOFError, pickle.UnpicklingError) as e: - console.print(f"[red]Session loading error: {e}. Closing session and retrying.[/red]") - - await self.close_session() - await self.start_session() - await self.validate_credentials(meta) - retry_count += 1 - - console.print("[red]Failed to reuse session after retries. Either try again or delete the cookie.[/red]") - return False + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=self.test_url, + error_text="login.php?act=recover", + ) def get_links(self, movie, subheading, heading_end): description = "" description += "\n" + subheading + "Links" + heading_end + "\n" if 'IMAGES' in self.config: if movie['imdb_id'] != 0: - description += f"[URL=https://www.imdb.com/title/tt{movie['imdb']}][img]{self.config['IMAGES']['imdb_75']}[/img][/URL]" + description += f"[url={movie.get('imdb_info', {}).get('imdb_url', '')}][img]{self.config['IMAGES']['imdb_75']}[/img][/url]" if movie['tmdb'] != 0: - description += f" [URL=https://www.themoviedb.org/{str(movie['category'].lower())}/{str(movie['tmdb'])}][img]{self.config['IMAGES']['tmdb_75']}[/img][/URL]" + description += f" [url=https://www.themoviedb.org/{str(movie['category'].lower())}/{str(movie['tmdb'])}][img]{self.config['IMAGES']['tmdb_75']}[/img][/url]" if movie['tvdb_id'] != 0: - description += f" [URL=https://www.thetvdb.com/?id={str(movie['tvdb_id'])}&tab=series][img]{self.config['IMAGES']['tvdb_75']}[/img][/URL]" + description += f" [url=https://www.thetvdb.com/?id={str(movie['tvdb_id'])}&tab=series][img]{self.config['IMAGES']['tvdb_75']}[/img][/url]" if movie['tvmaze_id'] != 0: - description += f" [URL=https://www.tvmaze.com/shows/{str(movie['tvmaze_id'])}][img]{self.config['IMAGES']['tvmaze_75']}[/img][/URL]" + description += f" [url=https://www.tvmaze.com/shows/{str(movie['tvmaze_id'])}][img]{self.config['IMAGES']['tvmaze_75']}[/img][/url]" if movie['mal_id'] != 0: - description += f" [URL=https://myanimelist.net/anime/{str(movie['mal_id'])}][img]{self.config['IMAGES']['mal_75']}[/img][/URL]" + description += f" [url=https://myanimelist.net/anime/{str(movie['mal_id'])}][img]{self.config['IMAGES']['mal_75']}[/img][/url]" else: if movie['imdb_id'] != 0: - description += f"https://www.imdb.com/title/tt%7Bmovie['imdb']}" + description += f"{movie.get('imdb_info', {}).get('imdb_url', '')}" if movie['tmdb'] != 0: description += f"\nhttps://www.themoviedb.org/{str(movie['category'].lower())}/{str(movie['tmdb'])}" if movie['tvdb_id'] != 0: @@ -264,10 +134,11 @@ def get_links(self, movie, subheading, heading_end): return description async def edit_desc(self, meta): - heading = "[COLOR=GREEN][size=6]" - subheading = "[COLOR=RED][size=4]" - heading_end = "[/size][/COLOR]" - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() + heading = "[color=green][size=6]" + subheading = "[color=red][size=4]" + heading_end = "[/size][/color]" + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8') as f: + base = await f.read() base = re.sub(r'\[center\]\[spoiler=Scene NFO:\].*?\[/center\]', '', base, flags=re.DOTALL) base = re.sub(r'\[center\]\[spoiler=FraMeSToR NFO:\].*?\[/center\]', '', base, flags=re.DOTALL) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: @@ -297,18 +168,19 @@ async def edit_desc(self, meta): # can not use full media info as sometimes its more than max chars per post. mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/summary-mediainfo.csv") if os.path.exists(mi_template): - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) + media_info = await self.parse_mediainfo_async(video, mi_template) description += (f"""[code]\n{media_info}\n[/code]\n""") # adding full mediainfo as spoiler - full_mediainfo = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as MI: + full_mediainfo = await MI.read() description += f"[hide=FULL MEDIAINFO][code]{full_mediainfo}[/code][/hide]\n" else: console.print("[bold red]Couldn't find the MediaInfo template") console.print("[green]Using normal MediaInfo for the description.") - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', - encoding='utf-8') as MI: - description += (f"""[code]\n{MI.read()}\n[/code]\n\n""") + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as MI: + cleaned_mediainfo = await MI.read() + description += (f"""[code]\n{cleaned_mediainfo}\n[/code]\n\n""") description += "\n\n" + subheading + "PLOT" + heading_end + "\n" + str(meta['overview']) if meta['genres']: @@ -328,18 +200,19 @@ async def edit_desc(self, meta): if len(base) > 2: description += "\n\n" + subheading + "Notes" + heading_end + "\n" + str(base) - descfile.write(description) - descfile.close() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: + await descfile.write(description) return - def get_language_tag(self, meta): + async def get_language_tag(self, meta): lang_tag = "" has_eng_audio = False audio_lang = "" if meta['is_disc'] != "BDMV": try: - with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) + async with aiofiles.open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi_content = await f.read() + mi = json.loads(mi_content) for track in mi['media']['track']: if track['@type'] == "Audio": if track.get('Language', 'None').startswith('en'): @@ -358,197 +231,221 @@ def get_language_tag(self, meta): lang_tag = audio_lang return lang_tag - def get_basename(self, meta): + async def get_basename(self, meta): path = next(iter(meta['filelist']), meta['path']) return os.path.basename(path) async def search_existing(self, meta, DISCTYPE): - dupes = {} + dupes = [] + cookie_jar = await self.cookie_validator.load_session_cookies(meta, self.tracker) + if not cookie_jar: + console.print(f"{self.tracker}: Cannot search without valid cookies.") + return dupes # Combine title and year title = str(meta.get('title', '')).strip() year = str(meta.get('year', '')).strip() if not title: - await self.close_session() console.print("[red]Title is missing.") return dupes - search_query = f"{title} {year}".strip() # Concatenate title and year + search_query = f"{title} {year}".strip() search_query_encoded = urllib.parse.quote(search_query) - search_url = f'{self.base_url}/ajax.php?action=browse&searchstr={search_query_encoded}' if meta.get('debug', False): console.print(f"[blue]{search_url}") + headers = { + "User-Agent": f"Upload Assistant {meta.get('current_version', 'github.com/Audionut/Upload-Assistant')}" + } + try: - async with self.session.get(search_url) as response: - if response.status != 200: - await self.close_session() + async with httpx.AsyncClient(headers=headers, timeout=30.0, cookies=cookie_jar) as client: + response = await client.get(search_url) + + if response.status_code != 200: console.print("[bold red]Request failed. Site May be down") return dupes - json_response = await response.json() + json_response = response.json() if json_response.get('status') != 'success': - await self.close_session() console.print("[red]Invalid response status.") return dupes results = json_response.get('response', {}).get('results', []) if not results: - await self.close_session() return dupes - group_names = [result['groupName'] for result in results if 'groupName' in result] + for res in results: + if 'groupName' in res: + dupe = { + 'name': res['groupName'], + 'size': res['size'], + 'files': res['groupName'], + 'file_count': res['fileCount'], + 'link': f'{self.search_url}?id={res["groupId"]}&torrentid={res["torrentId"]}', + 'download': f'{self.base_url}/torrents.php?action=download&id={res["torrentId"]}', + } + dupes.append(dupe) - if group_names: - dupes = group_names - else: - console.print("[yellow]No valid group names found.") + return dupes except Exception as e: console.print(f"[red]Error occurred: {e}") + return dupes + + async def get_auth_key(self, meta): + """Retrieve the saved auth key from cookie_auth.py.""" + auth_key = await self.cookie_validator.get_ar_auth_key(meta, self.tracker) + if auth_key: + return auth_key - if meta['debug']: - console.print(f"[blue]{dupes}") - await self.close_session() - return dupes + console.print(f"{self.tracker}: [yellow]Auth key not found. This may happen if you're using manually exported cookies.[/yellow]") + console.print(f"{self.tracker}: [yellow]Attempting to extract auth key from torrents page...[/yellow]") - def _has_existing_torrents(self, response_text): - """Check the response text for existing torrents.""" - return 'Your search did not match anything.' not in response_text + # Fallback: extract from torrents page if not saved + cookie_jar = await self.cookie_validator.load_session_cookies(meta, self.tracker) + if not cookie_jar: + return None - def extract_auth_key(self, response_text): - soup = BeautifulSoup(response_text, 'html.parser') - logout_link = soup.find('a', href=True, text='Logout') + headers = { + "User-Agent": f"Upload Assistant {meta.get('current_version', 'github.com/Audionut/Upload-Assistant')}" + } + + try: + async with httpx.AsyncClient(headers=headers, timeout=30.0, cookies=cookie_jar) as client: + response = await client.get(self.test_url) + soup = BeautifulSoup(response.text, 'html.parser') + logout_link = soup.find('a', href=True, text='Logout') + + if logout_link: + href = logout_link['href'] + match = re.search(r'auth=([^&]+)', href) + if match: + auth_key = match.group(1) + # Save it for next time + cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{self.tracker}.txt") + auth_file = cookie_file.replace('.txt', '_auth.txt') + try: + async with aiofiles.open(auth_file, 'w', encoding='utf-8') as f: + await f.write(auth_key) + console.print(f"{self.tracker}: [green]Auth key saved for future use[/green]") + except Exception: + pass + return auth_key + except Exception as e: + console.print(f"[red]Error extracting auth key: {e}") - if logout_link: - href = logout_link['href'] - match = re.search(r'auth=([^&]+)', href) - if match: - return match.group(1) return None async def upload(self, meta, disctype): + """Upload torrent to AR using centralized cookie_upload.""" + # Prepare the data for the upload + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await self.edit_desc(meta) + type_id = await self.get_type(meta) + + # Read the description + desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" try: - # Prepare the data for the upload - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await self.edit_desc(meta) - type = await self.get_type(meta) - # Read the description - desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - try: - async with aiofiles.open(desc_path, 'r', encoding='utf-8') as desc_file: - desc = await desc_file.read() - except FileNotFoundError: - raise Exception(f"Description file not found at {desc_path} ") - - # Handle cover image input - cover = meta.get('poster', None) or meta["imdb_info"].get("cover", None) - while cover is None and not meta.get("unattended", False): - cover = Prompt.ask("No Poster was found. Please input a link to a poster:", default="") - if not re.match(r'https?://.*\.(jpg|png|gif)$', cover): - console.print("[red]Invalid image link. Please enter a link that ends with .jpg, .png, or .gif.") - cover = None - # Tag Compilation - genres = meta.get('genres') - if genres: - genres = ', '.join(tag.strip('.') for tag in (item.replace(' ', '.') for item in genres.split(','))) - genres = re.sub(r'\.{2,}', '.', genres) - # adding tags - tags = "" - if meta['imdb_id'] != 0: - tags += f"tt{meta.get('imdb', '')}, " - # no special chars can be used in tags. keep to minimum working tags only. - tags += f"{genres}, " - # Get initial response and extract auth key - initial_response = await self.get_initial_response() - auth_key = self.extract_auth_key(initial_response) - # Access the session cookie - cookies = self.session.cookie_jar.filter_cookies(self.upload_url) - session_cookie = cookies.get('session') - if not session_cookie: - raise Exception("Session cookie not found.") - - # must use scene name if scene release - KNOWN_EXTENSIONS = {".mkv", ".mp4", ".avi", ".ts"} - if meta['scene']: - ar_name = meta.get('scene_name') - else: - ar_name = meta['uuid'] - base, ext = os.path.splitext(ar_name) - if ext.lower() in KNOWN_EXTENSIONS: - ar_name = base - ar_name = ar_name.replace(' ', ".").replace("'", '').replace(':', '').replace("(", '.').replace(")", '.').replace("[", '.').replace("]", '.').replace("{", '.').replace("}", '.') - - if meta['tag'] == "": - # replacing spaces with . as per rules - ar_name += "-NoGRP" - - data = { - "submit": "true", - "auth": auth_key, - "type": type, - "title": ar_name, - "tags": tags, - "image": cover, - "desc": desc, - } - - headers = { - "User-Agent": self.user_agent, - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8", - "Origin": f'{self.base_url}', - "Referer": f'{self.base_url}/upload.php', - "Cookie": f"session={session_cookie.value}", - } - - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - - if meta['debug'] is False: - import aiohttp - try: - async with aiofiles.open(torrent_path, 'rb') as torrent_file: - # Use a single session for all requests - async with aiohttp.ClientSession() as session: - form = aiohttp.FormData() - for key, value in data.items(): - form.add_field(key, value) - form.add_field('file_input', torrent_file, filename=f"{self.tracker}.torrent") - - # Perform the upload - try: - async with session.post(self.upload_url, data=form, headers=headers) as response: - if response.status == 200: - # URL format in case of successful upload: https://alpharatio.cc/torrents.php?id=2989202 - meta['tracker_status'][self.tracker]['status_message'] = str(response.url) - match = re.match(r".*?alpharatio\.cc/torrents\.php\?id=(\d+)", str(response.url)) - if match is None: - await self.close_session() - meta['tracker_status'][self.tracker]['status_message'] = f"data error - failed: result URL {response.url} ({response.status}) is not the expected one." - - # having UA add the torrent link as a comment. - if match: - await self.close_session() - common = COMMON(config=self.config) - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), str(response.url)) - - else: - meta['tracker_status'][self.tracker]['status_message'] = "data error - Response was not 200." - except Exception: - await self.close_session() - meta['tracker_status'][self.tracker]['status_message'] = "data error - It may have uploaded, go check" - return - except FileNotFoundError: - meta['tracker_status'][self.tracker]['status_message'] = f"data error - File not found: {torrent_path}" - return aiohttp - else: - await self.close_session() - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - except Exception as e: - await self.close_session() - meta['tracker_status'][self.tracker]['status_message'] = f"data error - Upload failed: {e}" + async with aiofiles.open(desc_path, 'r', encoding='utf-8') as desc_file: + desc = await desc_file.read() + except FileNotFoundError: + meta['tracker_status'][self.tracker]['status_message'] = f"data error: Description file not found at {desc_path}" + return + + # Handle cover image input + cover = meta.get('poster', None) or meta["imdb_info"].get("cover", None) + while cover is None and not meta.get("unattended", False): + cover = Prompt.ask("No Poster was found. Please input a link to a poster:", default="") + if not re.match(r'https?://.*\.(jpg|png|gif)$', cover): + console.print("[red]Invalid image link. Please enter a link that ends with .jpg, .png, or .gif.") + cover = None + + # Tag Compilation + genres = meta.get('genres') + if genres: + tags = [] + for item in genres.split(','): + for subitem in item.split('&'): + tags.append(subitem.strip()) + genres = ', '.join(tags) + + genres = re.sub(r'\.{2,}', '.', genres) + + # adding tags + tags = "" + if meta['imdb_id'] != 0: + tags += f"tt{meta.get('imdb', '')}, " + tags += f"{genres}, " + + # Get auth key + auth_key = await self.get_auth_key(meta) + if not auth_key: + meta['tracker_status'][self.tracker]['status_message'] = "data error: Failed to extract auth key" return + + # must use scene name if scene release + KNOWN_EXTENSIONS = {".mkv", ".mp4", ".avi", ".ts"} + if meta['scene']: + ar_name = meta.get('scene_name') + else: + ar_name = meta['uuid'] + base, ext = os.path.splitext(ar_name) + if ext.lower() in KNOWN_EXTENSIONS: + ar_name = base + ar_name = ar_name.replace(' ', ".").replace("'", '').replace(':', '').replace("(", '.').replace(")", '.').replace("[", '.').replace("]", '.').replace("{", '.').replace("}", '.') + ar_name = re.sub(r'\.{2,}', '.', ar_name) + + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + ar_name = re.sub(f"-{invalid_tag}", "", ar_name, flags=re.IGNORECASE) + ar_name = f"{ar_name}-NoGRP" + + # Prepare upload data + data = { + "submit": "true", + "auth": auth_key, + "type": type_id, + "title": ar_name, + "tags": tags, + "image": cover, + "desc": desc, + } + + # Load cookies for upload + upload_cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + if not upload_cookies: + meta['tracker_status'][self.tracker]['status_message'] = "data error:Failed to load cookies for upload" + return + + # Use centralized handle_upload from CookieAuthUploader + await self.cookie_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + data=data, + upload_cookies=upload_cookies, + upload_url=self.upload_url, + torrent_field_name="file_input", + source_flag=self.source_flag, + torrent_url=self.torrent_url, + id_pattern=r'torrents\.php\?id=(\d+)', + success_status_code="200", + ) + + async def parse_mediainfo_async(self, video_path, template_path): + """Parse MediaInfo asynchronously using thread executor""" + loop = asyncio.get_running_loop() + return await loop.run_in_executor( + None, + lambda: MediaInfo.parse( + video_path, + output="STRING", + full=False, + mediainfo_options={"inform": f"file://{template_path}"} + ) + ) diff --git a/src/trackers/ASC.py b/src/trackers/ASC.py index 074790f1e..229a5b83a 100644 --- a/src/trackers/ASC.py +++ b/src/trackers/ASC.py @@ -1,187 +1,278 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- +import aiofiles import asyncio +import json import httpx import os +import platform import re -import requests -from .COMMON import COMMON from bs4 import BeautifulSoup from datetime import datetime from pymediainfo import MediaInfo from src.console import console -from src.exceptions import UploadException +from src.cookie_auth import CookieValidator, CookieAuthUploader from src.languages import process_desc_language +from src.tmdb import get_tmdb_localized_data +from src.trackers.COMMON import COMMON -class ASC(COMMON): +class ASC: def __init__(self, config): - super().__init__(config) + self.config = config + self.common = COMMON(config) + self.cookie_validator = CookieValidator(config) + self.cookie_auth_uploader = CookieAuthUploader(config) self.tracker = 'ASC' self.source_flag = 'ASC' - self.banned_groups = [""] - self.base_url = "https://cliente.amigos-share.club/" + self.banned_groups = [] + self.base_url = 'https://cliente.amigos-share.club/' + self.torrent_url = 'https://cliente.amigos-share.club/torrents-details.php?id=' + self.requests_url = f'{self.base_url}/pedidos.php' self.layout = self.config['TRACKERS'][self.tracker].get('custom_layout', '2') - self.session = requests.Session() - self.session.headers.update({ - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' - }) - self.signature = "[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - - def assign_media_properties(self, meta): - self.imdb_id = meta['imdb_info']['imdbID'] - self.tmdb_id = meta['tmdb'] - self.category = meta['category'] - self.season = meta.get('season', '') - self.episode = meta.get('episode', '') - - async def get_title(self, meta): - self.assign_media_properties(meta) - tmdb_ptbr_data = await self.main_tmdb_data(meta) - name = meta['title'] - base_name = name - - if self.category == 'TV': - tv_title_ptbr = tmdb_ptbr_data['name'] - if tv_title_ptbr and tv_title_ptbr.lower() != name.lower(): - base_name = f"{tv_title_ptbr} ({name})" - - return f"{base_name} - {self.season}{self.episode}" - - else: - movie_title_ptbr = tmdb_ptbr_data['title'] - if movie_title_ptbr and movie_title_ptbr.lower() != name.lower(): - base_name = f"{movie_title_ptbr} ({name})" - - return f"{base_name}" + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f'Upload Assistant ({platform.system()} {platform.release()})' + }, timeout=60.0) + + self.language_map = { + 'bg': '15', 'da': '12', + 'de': '3', 'en': '1', + 'es': '6', 'fi': '14', + 'fr': '2', 'hi': '23', + 'it': '4', 'ja': '5', + 'ko': '20', 'nl': '17', + 'no': '16', 'pl': '19', + 'pt': '8', 'ru': '7', + 'sv': '13', 'th': '21', + 'tr': '25', 'zh': '10', + } + self.anime_language_map = { + 'de': '3', 'en': '4', + 'es': '1', 'ja': '8', + 'ko': '11', 'pt': '5', + 'ru': '2', 'zh': '9', + } - async def _determine_language_properties(self, meta): - subtitled = '1' - dual_audio = '2' - dubbed = '3' - original_dub = '4' + async def validate_credentials(self, meta): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/gerador.php', + error_text='Esqueceu sua senha', + ) - no_subs = '0' - embedded_subs = '1' + async def load_localized_data(self, meta): + localized_data_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/tmdb_localized_data.json" + tmdb_data = {} + self.main_tmdb_data = {} + self.season_tmdb_data = {} + self.episode_tmdb_data = {} - if not meta.get('audio_languages') or not meta.get('subtitle_languages'): - await process_desc_language(meta, desc=None, tracker=self.tracker) + try: + async with aiofiles.open(localized_data_file, 'r', encoding='utf-8') as f: + content = await f.read() + try: + tmdb_data = json.loads(content) + except json.JSONDecodeError as e: + console.print(f"[red]Warning: JSON decode error in {localized_data_file}: {e}. Continuing with empty data.[/red]") + tmdb_data = {} + except (FileNotFoundError, json.JSONDecodeError): + pass + + local_results = { + 'main': tmdb_data.get('pt-BR', {}).get('main'), + 'season': tmdb_data.get('pt-BR', {}).get('season'), + 'episode': tmdb_data.get('pt-BR', {}).get('episode') + } - portuguese_languages = ['Portuguese', 'Português'] - audio_languages = set(meta.get('audio_languages', [])) - has_pt_subtitle = any(lang in meta.get('subtitle_languages', []) for lang in portuguese_languages) - has_pt_audio = any(lang in meta.get('audio_languages', []) for lang in portuguese_languages) - is_pt_original_language = meta.get('original_language', '') == 'pt' + tasks_to_run = [] - if not has_pt_audio and not has_pt_subtitle: - if not meta.get('unattended'): - console.print('[bold red]ASC requer pelo menos uma faixa de áudio ou legenda em português.[/bold red]') - meta['skipping'] = "ASC" - return None, None + if local_results['main']: + self.main_tmdb_data = local_results['main'] + else: + tasks_to_run.append( + ('main', get_tmdb_localized_data(meta, data_type='main', language='pt-BR', append_to_response='credits,videos,content_ratings')) + ) - subtitle = embedded_subs if has_pt_subtitle else no_subs + if meta.get('category') == 'TV': + if local_results['season']: + self.season_tmdb_data = local_results['season'] + else: + tasks_to_run.append( + ('season', get_tmdb_localized_data(meta, data_type='season', language='pt-BR', append_to_response='')) + ) - audio = None - if has_pt_audio: - if is_pt_original_language: - audio = original_dub - elif len(audio_languages) > 1: - audio = dual_audio + if meta.get('category') == 'TV' and not meta.get('tv_pack', False): + if local_results['episode']: + self.episode_tmdb_data = local_results['episode'] else: - audio = dubbed - elif has_pt_subtitle: - audio = subtitled + tasks_to_run.append( + ('episode', get_tmdb_localized_data(meta, data_type='episode', language='pt-BR', append_to_response='')) + ) - return subtitle, audio + if tasks_to_run: + data_types, coroutines = zip(*tasks_to_run) - def get_res_id(self, meta): + try: + api_results = await asyncio.gather(*coroutines) + + for data_type, result_data in zip(data_types, api_results, strict=True): + if result_data: # Only assign if result_data is not None + if data_type == 'main': + self.main_tmdb_data = result_data + elif data_type == 'season': + self.season_tmdb_data = result_data + elif data_type == 'episode': + self.episode_tmdb_data = result_data + except Exception as e: + console.print(f"[red]Error loading TMDB data: {e}[/red]") + # Ensure we have at least empty dicts to prevent KeyErrors + if not self.main_tmdb_data: + self.main_tmdb_data = {} + if not self.season_tmdb_data: + self.season_tmdb_data = {} + if not self.episode_tmdb_data: + self.episode_tmdb_data = {} + + async def get_container(self, meta): if meta['is_disc'] == 'BDMV': - res_map = {'2160p': ('3840', '2160'), '1080p': ('1920', '1080'), '1080i': ('1920', '1080'), '720p': ('1280', '720')} - return res_map[meta['resolution']] + return '5' + elif meta['is_disc'] == 'DVD': + return '15' - video_track = next((t for t in meta['mediainfo']['media']['track'] if t['@type'] == 'Video'), None) - if video_track: - return video_track['Width'], video_track['Height'] - return None, None + try: + general_track = next(t for t in meta['mediainfo']['media']['track'] if t['@type'] == 'General') + file_extension = general_track.get('FileExtension', '').lower() + if file_extension == 'mkv': + return '6' + elif file_extension == 'mp4': + return '8' + except (StopIteration, AttributeError, TypeError): + return None + return None - def get_type_id(self, meta): - qualidade_map_disc = {"BD25": "40", "BD50": "41", "BD66": "42", "BD100": "43"} - qualidade_map_files = {"ENCODE": "9", "REMUX": "39", "WEBDL": "23", "WEBRIP": "38", "BDRIP": "8", "DVDRIP": "3"} - qualidade_map_dvd = {"DVD5": "45", "DVD9": "46"} + async def get_type(self, meta): + bd_disc_map = {'BD25': '40', 'BD50': '41', 'BD66': '42', 'BD100': '43'} + standard_map = {'ENCODE': '9', 'REMUX': '39', 'WEBDL': '23', 'WEBRIP': '38', 'BDRIP': '8', 'DVDRIP': '3'} + dvd_map = {'DVD5': '45', 'DVD9': '46'} if meta['type'] == 'DISC': + if meta['is_disc'] == 'HDDVD': + return 15 + if meta['is_disc'] == 'DVD': dvd_size = meta['dvd_size'] - type_id = qualidade_map_dvd[dvd_size] + type_id = dvd_map[dvd_size] if type_id: return type_id disctype = meta['disctype'] - if disctype in qualidade_map_disc: - return qualidade_map_disc[disctype] - - bdinfo_size_gib = meta.get('bdinfo', {}).get('size') - if bdinfo_size_gib: - size_bytes = bdinfo_size_gib * 1_073_741_824 - if size_bytes > 66_000_000_000: - return "43" # BD100 - elif size_bytes > 50_000_000_000: - return "42" # BD66 - elif size_bytes > 25_000_000_000: - return "41" # BD50 - else: - return "40" # BD25 + if disctype in bd_disc_map: + return bd_disc_map[disctype] + + try: + size_in_gb = meta['bdinfo']['size'] + except (KeyError, IndexError, TypeError): + size_in_gb = 0 + + if size_in_gb > 66: + return '43' # BD100 + elif size_in_gb > 50: + return '42' # BD66 + elif size_in_gb > 25: + return '41' # BD50 + else: + return '40' # BD25 else: - return qualidade_map_files.get(meta['type'], "0") + return standard_map.get(meta['type'], '0') - def get_container(self, meta): - if meta['is_disc'] == "BDMV": - return "5" - elif meta['is_disc'] == "DVD": - return "15" + async def get_languages(self, meta): + if meta.get('anime'): + if meta['category'] == 'MOVIE': + type_ = '116' + if meta['category'] == 'TV': + type_ = '118' - try: - general_track = next(t for t in meta['mediainfo']['media']['track'] if t['@type'] == 'General') - file_extension = general_track.get('FileExtension', '').lower() - if file_extension == 'mkv': - return '6' - elif file_extension == 'mp4': - return '8' - except (StopIteration, AttributeError, TypeError): - return None - return None + anime_language = self.anime_language_map.get(meta.get('original_language', '').lower(), '6') - def get_audio_codec(self, meta): - audio_type = (meta['audio'] or '').upper() + if await self.get_audio(meta) in ('2', '3', '4'): + lang = '8' + else: + lang = self.language_map.get(meta.get('original_language', '').lower(), '11') - codec_map = { - "ATMOS": "43", - "DTS:X": "25", - "DTS-HD MA": "24", - "DTS-HD": "23", - "TRUEHD": "29", - "DD+": "26", - "DD": "11", - "DTS": "12", - "FLAC": "13", - "LPCM": "21", - "PCM": "28", - "AAC": "10", - "OPUS": "27", - "MPEG": "17" - } + return { + 'type': type_, + 'idioma': anime_language, + 'lang': lang + } - for key, code in codec_map.items(): - if key in audio_type: - return code + async def get_audio(self, meta): + subtitles = '1' + dual_audio = '2' + dubbed = '3' + national = '4' + original = '7' + + portuguese_languages = {'portuguese', 'português', 'pt'} + + has_pt_subs = (await self.get_subtitle(meta)) == 'Embutida' + + audio_languages = {lang.lower() for lang in meta.get('audio_languages', [])} + has_pt_audio = any(lang in portuguese_languages for lang in audio_languages) + + original_lang = meta.get('original_language', '').lower() + is_original_pt = original_lang in portuguese_languages + + if has_pt_audio: + if is_original_pt: + return national + elif len(audio_languages - portuguese_languages) > 0: + return dual_audio + else: + return dubbed + elif has_pt_subs: + return subtitles + else: + return original + + async def get_subtitle(self, meta): + portuguese_languages = {'portuguese', 'português', 'pt'} + + found_languages = {lang.lower() for lang in meta.get('subtitle_languages', [])} + + if any(lang in portuguese_languages for lang in found_languages): + return 'Embutida' + return 'S_legenda' + + async def get_resolution(self, meta): + if meta.get('is_disc') == 'BDMV': + resolution_str = meta.get('resolution', '') + try: + height_num = int(resolution_str.lower().replace('p', '').replace('i', '')) + height = str(height_num) + + width_num = round((16 / 9) * height_num) + width = str(width_num) + except (ValueError, TypeError): + pass + + else: + video_mi = meta['mediainfo']['media']['track'][1] + width = video_mi['Width'] + height = video_mi['Height'] - return "20" + return { + 'width': width, + 'height': height + } - def get_video_codec(self, meta): + async def get_video_codec(self, meta): codec_video_map = { - "MPEG-4": "31", "AV1": "29", "AVC": "30", "DivX": "9", - "H264": "17", "H265": "18", "HEVC": "27", "M4V": "20", - "MPEG-1": "10", "MPEG-2": "11", "RMVB": "12", "VC-1": "21", - "VP6": "22", "VP9": "23", "WMV": "13", "XviD": "15" + 'MPEG-4': '31', 'AV1': '29', 'AVC': '30', 'DivX': '9', + 'H264': '17', 'H265': '18', 'HEVC': '27', 'M4V': '20', + 'MPEG-1': '10', 'MPEG-2': '11', 'RMVB': '12', 'VC-1': '21', + 'VP6': '22', 'VP9': '23', 'WMV': '13', 'XviD': '15' } codec_video = None @@ -197,488 +288,309 @@ def get_video_codec(self, meta): if not codec_video: codec_video = meta.get('video_codec') - codec_id = codec_video_map.get(codec_video, "16") + codec_id = codec_video_map.get(codec_video, '16') is_hdr = bool(meta.get('hdr')) if is_hdr: - if codec_video in ("HEVC", "H265"): - return "28" - if codec_video in ("AVC", "H264"): - return "32" + if codec_video in ('HEVC', 'H265'): + return '28' + if codec_video in ('AVC', 'H264'): + return '32' return codec_id - async def fetch_tmdb_data(self, endpoint): - tmdb_api = self.config['DEFAULT']['tmdb_api'] - - url = f"https://api.themoviedb.org/3/%7Bendpoint%7D?api_key={tmdb_api}&language=pt-BR&append_to_response=credits,videos" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get(url) - if response.status_code == 200: - return response.json() - else: - return None - except httpx.RequestError: - return None - - async def main_tmdb_data(self, meta): - self.assign_media_properties(meta) - if not self.category or not self.tmdb_id: - return None - - endpoint = f"{self.category.lower()}/{self.tmdb_id}" - return await self.fetch_tmdb_data(endpoint) + async def get_audio_codec(self, meta): + audio_type = (meta['audio'] or '').upper() - async def season_tmdb_data(self, meta): - season = meta.get('season_int') - if not self.tmdb_id or season is None: - return None + codec_map = { + 'ATMOS': '43', + 'DTS:X': '25', + 'DTS-HD MA': '24', + 'DTS-HD': '23', + 'TRUEHD': '29', + 'DD+': '26', + 'DD': '11', + 'DTS': '12', + 'FLAC': '13', + 'LPCM': '21', + 'PCM': '28', + 'AAC': '10', + 'OPUS': '27', + 'MPEG': '17' + } - endpoint = f"tv/{self.tmdb_id}/season/{season}" - return await self.fetch_tmdb_data(endpoint) + for key, code in codec_map.items(): + if key in audio_type: + return code - async def episode_tmdb_data(self, meta): - season = meta.get('season_int') - episode = meta.get('episode_int') - if not self.tmdb_id or season is None or episode is None: - return None + return '20' - endpoint = f"tv/{self.tmdb_id}/season/{season}/episode/{episode}" - return await self.fetch_tmdb_data(endpoint) + async def get_title(self, meta): + name = meta['title'] + base_name = name - def format_image(self, url): - return f"[img]{url}[/img]" if url else "" + if meta['category'] == 'TV': + tv_title_ptbr = self.main_tmdb_data.get('name') + if tv_title_ptbr and tv_title_ptbr.lower() != name.lower(): + base_name = f"{tv_title_ptbr} ({name})" - def format_date(self, date_str): - if not date_str or date_str == 'N/A': - return 'N/A' - for fmt in ('%Y-%m-%d', '%d %b %Y'): - try: - return datetime.strptime(str(date_str), fmt).strftime('%d/%m/%Y') - except (ValueError, TypeError): - continue - return str(date_str) + return f"{base_name} - {meta.get('season', '')}{meta.get('episode', '')}" - def get_file_info(self, meta): - if meta.get('is_disc') != 'BDMV': - video_file = meta['filelist'][0] - template_path = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") - if os.path.exists(template_path): - mi_output = MediaInfo.parse( - video_file, - output="STRING", - full=False, - mediainfo_options={"inform": f"file://{template_path}"} - ) - return str(mi_output).replace('\r', '') else: - summary_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" - if os.path.exists(summary_path): - with open(summary_path, 'r', encoding='utf-8') as f: - return f.read() - return None - - async def fetch_layout_data(self, meta): - url = f"{self.base_url}/search.php" - - async def _fetch(payload): - try: - await self.load_cookies(meta) - response = self.session.post(url, data=payload, timeout=20) - response.raise_for_status() - return response.json().get('ASC') - except Exception: - return None - - primary_payload = {'imdb': self.imdb_id, 'layout': self.layout} - if layout_data := await _fetch(primary_payload): - return layout_data - - # Fallback to a known movie if primary fetch fails - fallback_payload = {'imdb': 'tt0013442', 'layout': self.layout} - return await _fetch(fallback_payload) - - def build_ratings_bbcode(self, ratings_list): - if not ratings_list: - return "" - - ratings_map = { - "Internet Movie Database": "[img]https://i.postimg.cc/Pr8Gv4RQ/IMDB.png[/img]", - "Rotten Tomatoes": "[img]https://i.postimg.cc/rppL76qC/rotten.png[/img]", - "Metacritic": "[img]https://i.postimg.cc/SKkH5pNg/Metacritic45x45.png[/img]", - "TMDb": "[img]https://i.postimg.cc/T13yyzyY/tmdb.png[/img]" - } - parts = [] - for rating in ratings_list: - source = rating.get('Source') - value = rating.get('Value', '').strip() - img_tag = ratings_map.get(source) - if not img_tag: - continue + movie_title_ptbr = self.main_tmdb_data.get('title') + if movie_title_ptbr and movie_title_ptbr.lower() != name.lower(): + base_name = f"{movie_title_ptbr} ({name})" - if source == "Internet Movie Database": - parts.append(f"\n[url=https://www.imdb.com/title/{self.imdb_id}]{img_tag}[/url]\n[b]{value}[/b]\n") - elif source == "TMDb": - parts.append(f"[url=https://www.themoviedb.org/{self.category.lower()}/{self.tmdb_id}]{img_tag}[/url]\n[b]{value}[/b]\n") - else: - parts.append(f"{img_tag}\n[b]{value}[/b]\n") - return "\n".join(parts) + return f"{base_name}" - def build_cast_bbcode(self, cast_list): - if not cast_list: - return "" - - parts = [] - for person in cast_list[:10]: - profile_path = person.get('profile_path') - profile_url = f"https://image.tmdb.org/t/p/w45%7Bprofile_path%7D" if profile_path else "https://i.imgur.com/eCCCtFA.png" - tmdb_url = f"https://www.themoviedb.org/person/%7Bperson.get('id')}?language=pt-BR" - img_tag = self.format_image(profile_url) - character_info = f"({person.get('name', '')}) como {person.get('character', '')}" - parts.append(f"[url={tmdb_url}]{img_tag}[/url]\n[size=2][b]{character_info}[/b][/size]\n") - return "".join(parts) - - async def build_description(self, json_data, meta): - main_tmdb, season_tmdb, episode_tmdb, user_layout = await asyncio.gather( - self.main_tmdb_data(meta), - self.season_tmdb_data(meta), - self.episode_tmdb_data(meta), - self.fetch_layout_data(meta) - ) - fileinfo_dump = await asyncio.to_thread(self.get_file_info, meta) + async def build_description(self, meta): + user_layout = await self.fetch_layout_data(meta) + fileinfo_dump = await self.media_info(meta) if not user_layout: - return "[center]Erro: Não foi possível carregar o layout da descrição.[/center]" + return '[center]Erro: Não foi possível carregar o layout da descrição.[/center]' layout_image = {k: v for k, v in user_layout.items() if k.startswith('BARRINHA_')} - description_parts = ["[center]"] + description_parts = ['[center]'] - def append_section(key: str, content: str): + async def append_section(key: str, content: str): if content and (img := layout_image.get(key)): - description_parts.append(f"\n{self.format_image(img)}") - description_parts.append(f"\n{content}\n") + description_parts.append(f'\n{await self.format_image(img)}') + description_parts.append(f'\n{content}\n') # Title for i in range(1, 4): - description_parts.append(self.format_image(layout_image.get(f'BARRINHA_CUSTOM_T_{i}'))) - description_parts.append(f"\n{self.format_image(layout_image.get('BARRINHA_APRESENTA'))}\n") + description_parts.append(await self.format_image(layout_image.get(f'BARRINHA_CUSTOM_T_{i}'))) + description_parts.append(f"\n{await self.format_image(layout_image.get('BARRINHA_APRESENTA'))}\n") description_parts.append(f"\n[size=3]{await self.get_title(meta)}[/size]\n") # Poster - poster_path = (season_tmdb or {}).get('poster_path') or (main_tmdb or {}).get('poster_path') or meta.get('tmdb_poster') - self.poster = f"https://image.tmdb.org/t/p/w500%7Bposter_path%7D" if poster_path else "" - append_section('BARRINHA_CAPA', self.format_image(self.poster)) + poster_path = (self.season_tmdb_data or {}).get('poster_path') or (self.main_tmdb_data or {}).get('poster_path') or meta.get('tmdb_poster') + poster = f'https://image.tmdb.org/t/p/w500%7Bposter_path%7D' if poster_path else '' + await append_section('BARRINHA_CAPA', await self.format_image(poster)) # Overview - overview = (season_tmdb or {}).get('overview') or (main_tmdb or {}).get('overview') - append_section('BARRINHA_SINOPSE', overview) + overview = (self.season_tmdb_data or {}).get('overview') or (self.main_tmdb_data or {}).get('overview') + await append_section('BARRINHA_SINOPSE', overview) # Episode - if self.category == 'TV' and episode_tmdb: - episode_name = episode_tmdb.get('name') - episode_overview = episode_tmdb.get('overview') - still_path = episode_tmdb.get('still_path') + if meta['category'] == 'TV' and self.episode_tmdb_data: + episode_name = self.episode_tmdb_data.get('name') + episode_overview = self.episode_tmdb_data.get('overview') + still_path = self.episode_tmdb_data.get('still_path') if episode_name and episode_overview and still_path: - still_url = f"https://image.tmdb.org/t/p/w300%7Bstill_path%7D" - description_parts.append(f"\n[size=4][b]Episódio:[/b] {episode_name}[/size]\n") - description_parts.append(f"\n{self.format_image(still_url)}\n\n{episode_overview}\n") + still_url = f'https://image.tmdb.org/t/p/w300%7Bstill_path%7D' + description_parts.append(f'\n[size=4][b]Episódio:[/b] {episode_name}[/size]\n') + description_parts.append(f'\n{await self.format_image(still_url)}\n\n{episode_overview}\n') # Technical Sheet - if main_tmdb: - runtime = (episode_tmdb or {}).get('runtime') or main_tmdb.get('runtime') or meta.get('runtime') + if self.main_tmdb_data: + runtime = (self.episode_tmdb_data or {}).get('runtime') or self.main_tmdb_data.get('runtime') or meta.get('runtime') formatted_runtime = None if runtime: h, m = divmod(runtime, 60) formatted_runtime = f"{h} hora{'s' if h > 1 else ''} e {m:02d} minutos" if h > 0 else f"{m:02d} minutos" - release_date = (episode_tmdb or {}).get('air_date') or (season_tmdb or {}).get('air_date') if self.category != 'MOVIE' else main_tmdb.get('release_date') + release_date = (self.episode_tmdb_data or {}).get('air_date') or (self.season_tmdb_data or {}).get('air_date') if meta['category'] != 'MOVIE' else self.main_tmdb_data.get('release_date') sheet_items = [ - f"Duração: {formatted_runtime}" if formatted_runtime else None, - f"País de Origem: {', '.join(c['name'] for c in main_tmdb.get('production_countries', []))}" if main_tmdb.get('production_countries') else None, - f"Gêneros: {', '.join(g['name'] for g in main_tmdb.get('genres', []))}" if main_tmdb.get('genres') else None, - f"Data de Lançamento: {self.format_date(release_date)}" if release_date else None, - f"Site: [url={main_tmdb.get('homepage')}]Clique aqui[/url]" if main_tmdb.get('homepage') else None + f'Duração: {formatted_runtime}' if formatted_runtime else None, + f"País de Origem: {', '.join(c['name'] for c in self.main_tmdb_data.get('production_countries', []))}" if self.main_tmdb_data.get('production_countries') else None, + f"Gêneros: {', '.join(g['name'] for g in self.main_tmdb_data.get('genres', []))}" if self.main_tmdb_data.get('genres') else None, + f'Data de Lançamento: {await self.format_date(release_date)}' if release_date else None, + f"Site: [url={self.main_tmdb_data.get('homepage')}]Clique aqui[/url]" if self.main_tmdb_data.get('homepage') else None ] - append_section('BARRINHA_FICHA_TECNICA', "\n".join(filter(None, sheet_items))) + await append_section('BARRINHA_FICHA_TECNICA', '\n'.join(filter(None, sheet_items))) # Production Companies - if main_tmdb and main_tmdb.get('production_companies'): - prod_parts = ["[size=4][b]Produtoras[/b][/size]"] - for p in main_tmdb.get('production_companies', []): + if self.main_tmdb_data and self.main_tmdb_data.get('production_companies'): + prod_parts = ['[size=4][b]Produtoras[/b][/size]'] + for p in self.main_tmdb_data.get('production_companies', []): logo_path = p.get('logo_path') - logo = self.format_image(f"https://image.tmdb.org/t/p/w45%7Blogo_path%7D") if logo_path else '' + logo = await self.format_image(f'https://image.tmdb.org/t/p/w45%7Blogo_path%7D') if logo_path else '' prod_parts.append(f"{logo}[size=2] - [b]{p.get('name', '')}[/b][/size]" if logo else f"[size=2][b]{p.get('name', '')}[/b][/size]") - description_parts.append("\n" + "\n".join(prod_parts) + "\n") + description_parts.append('\n' + '\n'.join(prod_parts) + '\n') # Cast - if self.category == 'MOVIE': - cast_data = ((main_tmdb or {}).get('credits') or {}).get('cast', []) + if meta['category'] == 'MOVIE': + cast_data = ((self.main_tmdb_data or {}).get('credits') or {}).get('cast', []) elif meta.get('tv_pack'): - cast_data = ((season_tmdb or {}).get('credits') or {}).get('cast', []) + cast_data = ((self.season_tmdb_data or {}).get('credits') or {}).get('cast', []) else: - cast_data = ((episode_tmdb or {}).get('credits') or {}).get('cast', []) - append_section('BARRINHA_ELENCO', self.build_cast_bbcode(cast_data)) + cast_data = ((self.episode_tmdb_data or {}).get('credits') or {}).get('cast', []) + await append_section('BARRINHA_ELENCO', await self.build_cast_bbcode(cast_data)) # Seasons - if self.category == 'TV' and main_tmdb and main_tmdb.get('seasons'): + if meta['category'] == 'TV' and self.main_tmdb_data and self.main_tmdb_data.get('seasons'): seasons_content = [] - for seasons in main_tmdb.get('seasons', []): + for seasons in self.main_tmdb_data.get('seasons', []): season_name = seasons.get('name', f"Temporada {seasons.get('season_number')}").strip() - poster_temp = self.format_image(f"https://image.tmdb.org/t/p/w185%7Bseasons.get('poster_path')}") if seasons.get('poster_path') else '' + poster_temp = await self.format_image(f"https://image.tmdb.org/t/p/w185%7Bseasons.get('poster_path')}") if seasons.get('poster_path') else '' overview_temp = f"\n\nSinopse:\n{seasons.get('overview')}" if seasons.get('overview') else '' inner_content_parts = [] air_date = seasons.get('air_date') if air_date: - inner_content_parts.append(f"Data: {self.format_date(air_date)}") + inner_content_parts.append(f'Data: {await self.format_date(air_date)}') episode_count = seasons.get('episode_count') if episode_count is not None: - inner_content_parts.append(f"Episódios: {episode_count}") + inner_content_parts.append(f'Episódios: {episode_count}') inner_content_parts.append(poster_temp) inner_content_parts.append(overview_temp) - inner_content = "\n".join(inner_content_parts) - seasons_content.append(f"\n[spoiler={season_name}]{inner_content}[/spoiler]\n") - append_section('BARRINHA_EPISODIOS', "".join(seasons_content)) + inner_content = '\n'.join(inner_content_parts) + seasons_content.append(f'\n[spoiler={season_name}]{inner_content}[/spoiler]\n') + await append_section('BARRINHA_EPISODIOS', ''.join(seasons_content)) # Ratings ratings_list = user_layout.get('Ratings', []) if not ratings_list: if imdb_rating := meta.get('imdb_info', {}).get('rating'): ratings_list.append({'Source': 'Internet Movie Database', 'Value': f'{imdb_rating}/10'}) - if main_tmdb and (tmdb_rating := main_tmdb.get('vote_average')): + if self.main_tmdb_data and (tmdb_rating := self.main_tmdb_data.get('vote_average')): if not any(r.get('Source') == 'TMDb' for r in ratings_list): ratings_list.append({'Source': 'TMDb', 'Value': f'{tmdb_rating:.1f}/10'}) - criticas_key = 'BARRINHA_INFORMACOES' if self.category == 'MOVIE' and 'BARRINHA_INFORMACOES' in layout_image else 'BARRINHA_CRITICAS' - append_section(criticas_key, self.build_ratings_bbcode(ratings_list)) + criticas_key = 'BARRINHA_INFORMACOES' if meta['category'] == 'MOVIE' and 'BARRINHA_INFORMACOES' in layout_image else 'BARRINHA_CRITICAS' + await append_section(criticas_key, await self.build_ratings_bbcode(meta, ratings_list)) # MediaInfo/BDinfo if fileinfo_dump: - description_parts.append(f"\n[spoiler=Informações do Arquivo]\n[left][font=Courier New]{fileinfo_dump}[/font][/left][/spoiler]\n") + description_parts.append(f'\n[spoiler=Informações do Arquivo]\n[left][font=Courier New]{fileinfo_dump}[/font][/left][/spoiler]\n') # Custom Bar for i in range(1, 4): - description_parts.append(self.format_image(layout_image.get(f'BARRINHA_CUSTOM_B_{i}'))) - description_parts.append("[/center]") - - return "".join(filter(None, description_parts)) - - async def prepare_form_data(self, meta): - self.assign_media_properties(meta) - main_tmdb = await self.main_tmdb_data(meta) - - try: - data = {'takeupload': 'yes', 'layout': self.layout} - - subtitle_value, audio_value = await self._determine_language_properties(meta) - if subtitle_value is None and audio_value is None: - return None - - # Description - json_data = {} - tracker_description = await self.build_description(json_data, meta) - - base_desc = f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt" - asc_desc = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - external_desc_content = "" - if os.path.exists(base_desc): - with open(base_desc, 'r', encoding='utf-8') as f: - external_desc_content = f.read().strip() - desc = external_desc_content - desc = desc.replace("[user]", "").replace("[/user]", "") - desc = desc.replace("[align=left]", "").replace("[/align]", "") - desc = desc.replace("[align=right]", "").replace("[/align]", "") - desc = desc.replace("[alert]", "").replace("[/alert]", "") - desc = desc.replace("[note]", "").replace("[/note]", "") - desc = desc.replace("[h1]", "[u][b]").replace("[/h1]", "[/b][/u]") - desc = desc.replace("[h2]", "[u][b]").replace("[/h2]", "[/b][/u]") - desc = desc.replace("[h3]", "[u][b]").replace("[/h3]", "[/b][/u]") - desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) - - final_desc_parts = [ - tracker_description, - desc, - self.signature - ] - - data['descr'] = "\n\n".join(filter(None, final_desc_parts)).strip() - - with open(asc_desc, 'w', encoding='utf-8') as f: - f.write(data['descr']) - - # Poster - data['capa'] = self.poster - - # Title - data['name'] = await self.get_title(meta) - - # Year - data['ano'] = str(meta['year']) - - # Genre - data['genre'] = (', '.join(g['name'] for g in main_tmdb.get('genres', []))) or meta.get('genre', 'Gênero desconhecido') - - # File information - data['legenda'] = subtitle_value - data['audio'] = audio_value - data['qualidade'] = self.get_type_id(meta) - data['extencao'] = self.get_container(meta) - data['codecaudio'] = self.get_audio_codec(meta) - data['codecvideo'] = self.get_video_codec(meta) + description_parts.append(await self.format_image(layout_image.get(f'BARRINHA_CUSTOM_B_{i}'))) + description_parts.append('[/center]') + + # External description + desc = '' + base_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt" + if os.path.exists(base_desc_path): + with open(base_desc_path, 'r', encoding='utf-8') as f: + desc = f.read().strip() + desc = desc.replace('[user]', '').replace('[/user]', '') + desc = desc.replace('[align=left]', '').replace('[/align]', '') + desc = desc.replace('[align=right]', '').replace('[/align]', '') + desc = desc.replace('[alert]', '').replace('[/alert]', '') + desc = desc.replace('[note]', '').replace('[/note]', '') + desc = desc.replace('[h1]', '[u][b]').replace('[/h1]', '[/b][/u]') + desc = desc.replace('[h2]', '[u][b]').replace('[/h2]', '[/b][/u]') + desc = desc.replace('[h3]', '[u][b]').replace('[/h3]', '[/b][/u]') + desc = re.sub(r'(\[img=\d+)]', '[img]', desc, flags=re.IGNORECASE) + description_parts.append(desc) + + custom_description_header = self.config['DEFAULT'].get('custom_description_header', '') + if custom_description_header: + description_parts.append(custom_description_header + '\n') + + description_parts.append(f"[center][url=https://github.com/Audionut/Upload-Assistant]Upload realizado via {meta['ua_name']} {meta['current_version']}[/url][/center]") + + final_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + with open(final_desc_path, 'w', encoding='utf-8') as descfile: + final_description = '\n'.join(filter(None, description_parts)) + descfile.write(final_description) + + return final_description + + async def get_trailer(self, meta): + video_results = self.main_tmdb_data.get('videos', {}).get('results', []) + youtube_code = video_results[-1].get('key', '') if video_results else '' + if youtube_code: + youtube = f'http://www.youtube.com/watch?v={youtube_code}' + else: + youtube = meta.get('youtube') or '' - # IMDb - data['imdb'] = self.imdb_id + return youtube - # Trailer - video_results = main_tmdb.get('videos', {}).get('results', []) - youtube_code = video_results[-1].get('key', '') if video_results else '' - if youtube_code: - data['tube'] = f"http://www.youtube.com/watch?v={youtube_code}" - else: - data['tube'] = meta.get('youtube') or '' - - # Resolution - width, hight = self.get_res_id(meta) - data['largura'] = width - data['altura'] = hight - - # Languages - lang_map = { - "en": "1", "fr": "2", "de": "3", "it": "4", "ja": "5", - "es": "6", "ru": "7", "pt": "8", "zh": "10", "da": "12", - "sv": "13", "fi": "14", "bg": "15", "no": "16", "nl": "17", - "pl": "19", "ko": "20", "th": "21", "hi": "23", "tr": "25" - } + async def get_tags(self, meta): + tags = ', '.join( + g.get('name', '') + for g in self.main_tmdb_data.get('genres', []) + if isinstance(g.get('name'), str) and g.get('name').strip() + ) - # 3D - data['tresd'] = '1' if meta.get('3d') else '2' + if not tags: + tags = meta.get('genre') or await self.common.async_input(prompt=f'Digite os gêneros (no formato do {self.tracker}): ') - if meta.get('anime'): - if '3' in data['audio'] or '2' in data['audio']: - data['lang'] = "8" - else: - data['lang'] = lang_map.get(meta.get('original_language', '').lower(), "11") + return tags - idioma_map = {"de": "3", "zh": "9", "ko": "11", "es": "1", "en": "4", "ja": "8", "pt": "5", "ru": "2"} - data['idioma'] = idioma_map.get(meta.get('original_language', '').lower(), "6") + async def _fetch_file_info(self, torrent_id, torrent_link, size): + ''' + Helper function to fetch file info for a single release in parallel. + ''' + file_page_url = f'{self.base_url}/torrents-arquivos.php?id={torrent_id}' + filename = 'N/A' - if self.category == 'MOVIE': - data['type'] = '116' - elif self.category == 'TV': - data['type'] = '118' - - else: - data['lang'] = lang_map.get(meta.get('original_language', '').lower(), "11") + try: + file_page_response = await self.session.get(file_page_url, timeout=15) + file_page_response.raise_for_status() + file_page_soup = BeautifulSoup(file_page_response.text, 'html.parser') + file_li_tag = file_page_soup.find('li', class_='list-group-item') - # Screenshots - for i, img in enumerate(meta.get('image_list', [])[:4]): - data[f'screens{i+1}'] = img.get('raw_url') + if file_li_tag and file_li_tag.contents: + filename = file_li_tag.contents[0].strip() - return data except Exception as e: - console.print(f"[bold red]A preparação dos dados para o upload falhou: {e}[/bold red]") - raise - - async def upload(self, meta, disctype): - await self.edit_torrent(meta, self.tracker, self.source_flag) - await self.load_cookies(meta) - - data = await self.prepare_form_data(meta) - - if meta.get('debug', False): - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - return - - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - - upload_url = self.get_upload_url(https://github.com/meta) + console.print(f'[bold red]Falha ao obter nome do arquivo para ID {torrent_id}: {e}[/bold red]') - with open(torrent_path, 'rb') as torrent_file: - files = {'torrent': (f"{self.tracker}.{meta.get('infohash', '')}.placeholder.torrent", torrent_file, "application/x-bittorrent")} - response = self.session.post(upload_url, data=data, files=files, timeout=60) + return { + 'name': filename, + 'size': size, + 'link': torrent_link + } - if "foi enviado com sucesso" in response.text: - await self.successful_upload(response.text, meta) - else: - self.failed_upload(response, meta) + async def search_existing(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) - def get_upload_url(https://github.com/self,%20meta): + found_items = [] if meta.get('anime'): - return f"{self.base_url}/enviar-anime.php" - elif self.category == 'MOVIE': - return f"{self.base_url}/enviar-filme.php" - else: - return f"{self.base_url}/enviar-series.php" - - async def successful_upload(self, response_text, meta): - try: - soup = BeautifulSoup(response_text, 'html.parser') - details_link_tag = soup.find('a', href=lambda href: href and "torrents-details.php?id=" in href) - - relative_url = details_link_tag['href'] - torrent_url = f"{self.base_url}/{relative_url}" - announce_url = self.config['TRACKERS'][self.tracker]['announce_url'] - meta['tracker_status'][self.tracker]['status_message'] = torrent_url - - await self.add_tracker_torrent(meta, self.tracker, self.source_flag, announce_url, torrent_url) - - should_approve = await self.get_approval(meta) - if should_approve: - await self.auto_approval(relative_url) - - except Exception as e: - console.print(f"[bold red]Ocorreu um erro no pós-processamento do upload: {e}[/bold red]") + await self.load_localized_data(meta) + search_name = await self.get_title(meta) + search_query = search_name.replace(' ', '+') + search_url = f'{self.base_url}/torrents-search.php?search={search_query}' - async def auto_approval(self, relative_url): - try: - torrent_id = relative_url.split('id=')[-1] - approval_url = f"{self.base_url}/uploader_app.php?id={torrent_id}" - approval_response = self.session.get(approval_url, timeout=30) - approval_response.raise_for_status() - except Exception as e: - console.print(f"[bold red]Erro durante a tentativa de aprovação automática: {e}[/bold red]") + elif meta['category'] == 'MOVIE': + search_url = f"{self.base_url}/busca-filmes.php?search=&imdb={meta['imdb_info']['imdbID']}" - def failed_upload(self, response, meta): - response_save_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]FailedUpload.html" - with open(response_save_path, "w", encoding="utf-8") as f: - f.write(response.text) - console.print("[bold red]Falha no upload para o ASC. A resposta do servidor não indicou sucesso.[/bold red]") - console.print(f"[yellow]A resposta foi salva em: {response_save_path}[/yellow]") - raise UploadException("Falha no upload para o ASC: resposta inesperada do servidor.", 'red') + elif meta['category'] == 'TV': + search_url = f"{self.base_url}/busca-series.php?search={meta.get('season', '')}{meta.get('episode', '')}&imdb={meta['imdb_info']['imdbID']}" - async def get_dupes(self, search_url, meta): - dupes = [] + else: + return found_items try: - await self.load_cookies(meta) - response = self.session.get(search_url, timeout=30) + response = await self.session.get(search_url, timeout=30) response.raise_for_status() soup = BeautifulSoup(response.text, 'html.parser') releases = soup.find_all('li', class_='list-group-item dark-gray') except Exception as e: - console.print(f"[bold red]Falha ao acessar a página de busca do ASC: {e}[/bold red]") - return dupes + console.print(f'[bold red]Falha ao acessar a página de busca do ASC: {e}[/bold red]') + return found_items if not releases: - return dupes + return found_items + + name_search_tasks = [] for release in releases: + details_link_tag = release.find('a', href=lambda href: href and 'torrents-details.php?id=' in href) + torrent_link = details_link_tag.get('href', '') if details_link_tag else '' + size_tag = release.find('span', text=lambda t: t and ('GB' in t.upper() or 'MB' in t.upper()), class_='badge-info') + size = size_tag.get_text(strip=True).strip() if size_tag else '' + try: badges = release.find_all('span', class_='badge') disc_types = ['BD25', 'BD50', 'BD66', 'BD100', 'DVD5', 'DVD9'] is_disc = any(badge.text.strip().upper() in disc_types for badge in badges) if is_disc: - name, year, resolution, disk_type, video_codec, audio_codec = meta['title'], "N/A", "N/A", "N/A", "N/A", "N/A" + name, year, resolution, disk_type, video_codec, audio_codec = meta['title'], 'N/A', 'N/A', 'N/A', 'N/A', 'N/A' video_codec_terms = ['MPEG-4', 'AV1', 'AVC', 'H264', 'H265', 'HEVC', 'MPEG-1', 'MPEG-2', 'VC-1', 'VP6', 'VP9'] audio_codec_terms = ['DTS', 'AC3', 'DDP', 'E-AC-3', 'TRUEHD', 'ATMOS', 'LPCM', 'AAC', 'FLAC'] @@ -689,7 +601,7 @@ async def get_dupes(self, search_url, meta): if badge_text.isdigit() and len(badge_text) == 4: year = badge_text elif badge_text_upper in ['4K', '2160P', '1080P', '720P', '480P']: - resolution = "2160p" if badge_text_upper == '4K' else badge_text + resolution = '2160p' if badge_text_upper == '4K' else badge_text elif any(term in badge_text_upper for term in video_codec_terms): video_codec = badge_text elif any(term in badge_text_upper for term in audio_codec_terms): @@ -697,76 +609,315 @@ async def get_dupes(self, search_url, meta): elif any(term in badge_text_upper for term in disc_types): disk_type = badge_text - dupe_string = f"{name} {year} {resolution} {disk_type} {video_codec} {audio_codec}" - dupes.append(dupe_string) + name = f'{name} {year} {resolution} {disk_type} {video_codec} {audio_codec}' + dupe_entry = { + 'name': name, + 'size': size, + 'link': torrent_link + } + + found_items.append(dupe_entry) + else: - details_link_tag = release.find('a', href=lambda href: href and "torrents-details.php?id=" in href) if not details_link_tag: continue torrent_id = details_link_tag['href'].split('id=')[-1] - file_page_url = f"{self.base_url}/torrents-arquivos.php?id={torrent_id}" - file_page_response = self.session.get(file_page_url, timeout=15) - file_page_response.raise_for_status() - file_page_soup = BeautifulSoup(file_page_response.text, 'html.parser') - - file_li_tag = file_page_soup.find('li', class_='list-group-item') - if file_li_tag and file_li_tag.contents: - filename = file_li_tag.contents[0].strip() - dupes.append(filename) + name_search_tasks.append(self._fetch_file_info(torrent_id, torrent_link, size)) except Exception as e: - console.print(f"[bold red]Falha ao processar um release da lista: {e}[/bold red]") + console.print(f'[bold red]Falha ao processar um release da lista: {e}[/bold red]') continue - return dupes - async def search_existing(self, meta, disctype): - self.assign_media_properties(meta) + if name_search_tasks: + parallel_results = await asyncio.gather(*name_search_tasks) + found_items.extend(parallel_results) + + return found_items + + async def get_upload_url(https://github.com/self,%20meta): if meta.get('anime'): - search_name = await self.get_title(meta) - search_query = search_name.replace(' ', '+') - search_url = f"{self.base_url}/torrents-search.php?search={search_query}" + return f'{self.base_url}/enviar-anime.php' + elif meta['category'] == 'MOVIE': + return f'{self.base_url}/enviar-filme.php' + else: + return f'{self.base_url}/enviar-series.php' - if self.category == 'MOVIE': - search_url = f"{self.base_url}/busca-filmes.php?search=&imdb={self.imdb_id}" + async def format_image(self, url): + return f'[img]{url}[/img]' if url else '' - if self.category == 'TV': - search_url = f"{self.base_url}/busca-series.php?search={self.season}{self.episode}&imdb={self.imdb_id}" + async def format_date(self, date_str): + if not date_str or date_str == 'N/A': + return 'N/A' + for fmt in ('%Y-%m-%d', '%d %b %Y'): + try: + return datetime.strptime(str(date_str), fmt).strftime('%d/%m/%Y') + except (ValueError, TypeError): + continue + return str(date_str) - return await self.get_dupes(search_url, meta) + async def media_info(self, meta): + if meta.get('is_disc') == 'BDMV': + summary_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" + if os.path.exists(summary_path): + with open(summary_path, 'r', encoding='utf-8') as f: + return f.read() + if not meta.get('is_disc'): + video_file = meta['filelist'][0] + template_path = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") + if os.path.exists(template_path): + mi_output = MediaInfo.parse( + video_file, + output='STRING', + full=False, + mediainfo_options={'inform': f'file://{template_path}'} + ) + return str(mi_output).replace('\r', '') - async def validate_credentials(self, meta): - await self.load_cookies(meta) + return None - try: - test_url = f"{self.base_url}/gerador.php" + async def fetch_layout_data(self, meta): + url = f'{self.base_url}/search.php' - response = self.session.get(test_url, timeout=10, allow_redirects=False) + async def _fetch(payload): + try: + response = await self.session.post(url, data=payload, timeout=20) + response.raise_for_status() + return response.json().get('ASC') + except Exception: + return None + + primary_payload = {'imdb': meta['imdb_info']['imdbID'], 'layout': self.layout} + layout_data = await _fetch(primary_payload) + if layout_data: + return layout_data + + # Fallback + fallback_payload = {'imdb': 'tt0013442', 'layout': self.layout} + return await _fetch(fallback_payload) - if response.status_code == 200 and 'gerador.php' in response.url: - return True + async def build_ratings_bbcode(self, meta, ratings_list): + if not ratings_list: + return '' + + ratings_map = { + 'Internet Movie Database': '[img]https://i.postimg.cc/Pr8Gv4RQ/IMDB.png[/img]', + 'Rotten Tomatoes': '[img]https://i.postimg.cc/rppL76qC/rotten.png[/img]', + 'Metacritic': '[img]https://i.postimg.cc/SKkH5pNg/Metacritic45x45.png[/img]', + 'TMDb': '[img]https://i.postimg.cc/T13yyzyY/tmdb.png[/img]' + } + parts = [] + for rating in ratings_list: + source = rating.get('Source') + value = rating.get('Value', '').strip() + img_tag = ratings_map.get(source) + if not img_tag: + continue + + if source == 'Internet Movie Database': + parts.append(f"\n[url={meta.get('imdb_info', {}).get('imdb_url', '')}]{img_tag}[/url]\n[b]{value}[/b]\n") + elif source == 'TMDb': + parts.append(f"[url=https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}]{img_tag}[/url]\n[b]{value}[/b]\n") else: - console.print(f"[bold red]Falha na validação das credenciais do {self.tracker}. O cookie pode estar expirado.[/bold red]") - return False - except Exception as e: - console.print(f"[bold red]Erro ao validar credenciais do {self.tracker}: {e}[/bold red]") + parts.append(f"{img_tag}\n[b]{value}[/b]\n") + return "\n".join(parts) + + async def build_cast_bbcode(self, cast_list): + if not cast_list: + return '' + + parts = [] + for person in cast_list[:10]: + profile_path = person.get('profile_path') + profile_url = f'https://image.tmdb.org/t/p/w45%7Bprofile_path%7D' if profile_path else 'https://i.imgur.com/eCCCtFA.png' + tmdb_url = f"https://www.themoviedb.org/person/%7Bperson.get('id')}?language=pt-BR" + img_tag = await self.format_image(profile_url) + character_info = f"({person.get('name', '')}) como {person.get('character', '')}" + parts.append(f'[url={tmdb_url}]{img_tag}[/url]\n[size=2][b]{character_info}[/b][/size]\n') + return ''.join(parts) + + async def get_requests(self, meta): + if not self.config['DEFAULT'].get('search_requests', False) and not meta.get('search_requests', False): return False + else: + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + try: + category = meta['category'] + if meta.get('anime'): + if category == 'TV': + category = 118 + if category == 'MOVIE': + category = 116 + else: + if category == 'TV': + category = 120 + if category == 'MOVIE': + category = 119 + + query = meta['title'] + search_url = f'{self.requests_url}?search={query}&category={category}' + + response = await self.session.get(search_url) + response.raise_for_status() + response_results_text = response.text + + soup = BeautifulSoup(response_results_text, 'html.parser') + + request_rows = soup.select('.table-responsive table tr') + + results = [] + for row in request_rows: + all_tds = row.find_all('td') + if not all_tds or len(all_tds) < 6: + continue + + info_cell = all_tds[1] + link_element = info_cell.select_one('a[href*="pedidos.php?action=ver"]') + if not link_element: + continue + + name = link_element.text.strip() + link = link_element.get('href') + + reward_td = all_tds[4] + reward = reward_td.text.strip() + + results.append({ + 'Name': name, + 'Reward': reward, + 'Link': link, + }) + + if results: + message = f'\n{self.tracker}: [bold yellow]Seu upload pode atender o(s) seguinte(s) pedido(s), confira:[/bold yellow]\n\n' + for r in results: + message += f"[bold green]Nome:[/bold green] {r['Name']}\n" + message += f"[bold green]Recompensa:[/bold green] {r['Reward']}\n" + message += f"[bold green]Link:[/bold green] {self.base_url}/{r['Link']}\n\n" + console.print(message) + + return results + + except Exception as e: + console.print(f'[bold red]Ocorreu um erro ao buscar pedido(s) no {self.tracker}: {e}[/bold red]') + import traceback + console.print(traceback.format_exc()) + return [] + + async def get_data(self, meta): + await self.load_localized_data(meta) + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + resolution = await self.get_resolution(meta) + + data = { + 'ano': str(meta['year']), + 'audio': await self.get_audio(meta), + 'capa': f"https://image.tmdb.org/t/p/w500%7Bself.main_tmdb_data.get('poster_path') or meta.get('tmdb_poster')}", + 'codecaudio': await self.get_audio_codec(meta), + 'codecvideo': await self.get_video_codec(meta), + 'descr': await self.build_description(meta), + 'extencao': await self.get_container(meta), + 'genre': await self.get_tags(meta), + 'imdb': meta['imdb_info']['imdbID'], + 'altura': resolution['height'], + 'largura': resolution['width'], + 'lang': self.language_map.get(meta.get('original_language', '').lower(), '11'), + 'layout': self.layout, + 'legenda': await self.get_subtitle(meta), + 'name': await self.get_title(meta), + 'qualidade': await self.get_type(meta), + 'takeupload': 'yes', + 'tresd': '1' if meta.get('3d') else '2', + 'tube': await self.get_trailer(meta), + } + + if meta.get('anime'): + anime_info = await self.get_languages(meta) + data.update({ + 'idioma': anime_info['idioma'], + 'lang': anime_info['lang'], + 'type': anime_info['type'], + }) + + # Screenshots + for i, img in enumerate(meta.get('image_list', [])[:4]): + data[f'screens{i+1}'] = img.get('raw_url') + + return data + + async def upload(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + data = await self.get_data(meta) + upload_url = await self.get_upload_url(https://github.com/meta) + + await self.cookie_auth_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + source_flag=self.source_flag, + torrent_url=self.torrent_url, + data=data, + torrent_field_name='torrent', + upload_cookies=self.session.cookies, + upload_url=upload_url, + id_pattern=r'torrents-details\.php\?id=(\d+)', + success_text="torrents-details.php?id=", + ) + + # Approval + should_approve = await self.get_approval(meta) + if should_approve: + await self.auto_approval(meta) + + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != '' and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + await self.set_internal_flag(meta) + + return + + async def auto_approval(self, meta): + if meta.get('debug', False): + console.print( + f'{self.tracker}: Debug mode, skipping automatic approval.' + ) + else: + torrent_id = meta['tracker_status'][self.tracker]['torrent_id'] + try: + approval_url = f'{self.base_url}/uploader_app.php?id={torrent_id}' + approval_response = await self.session.get(approval_url, timeout=30) + approval_response.raise_for_status() + except Exception as e: + console.print(f'{self.tracker}: [bold red]Error during automatic approval attempt: {e}[/bold red]') async def get_approval(self, meta): - uploader = self.config['TRACKERS'][self.tracker].get('uploader_status', False) - if not uploader: + if not self.config['TRACKERS'][self.tracker].get('uploader_status', False): return False - modq = meta.get('modq', False) or meta.get('mq', False) - if modq: - return False, "Enviando para a fila de moderação." + if meta.get('modq', False): + console.print(f'{self.tracker}: Sending to the moderation queue.') + return False return True - async def load_cookies(self, meta): - cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/ASC.txt") - if os.path.exists(cookie_file): - self.session.cookies.update(await self.parseCookieFile(cookie_file)) + async def set_internal_flag(self, meta): + if meta.get('debug', False): + console.print( + f'{self.tracker}: [bold yellow]Debug mode, skipping setting internal flag.[/bold yellow]' + ) else: - console.print(f"[bold red]Arquivo de cookie para o {self.tracker} não encontrado: {cookie_file}[/bold red]") - return False + data = { + 'id': meta['tracker_status'][self.tracker]['torrent_id'], + 'internal': 'yes' + } + + try: + response = await self.session.post( + f"{self.base_url}/torrents-edit.php?action=doedit", + data=data + ) + response.raise_for_status() + + except Exception as e: + console.print(f'{self.tracker}: [bold red]Error setting internal flag: {e}[/bold red]') + return diff --git a/src/trackers/AVISTAZ_NETWORK.py b/src/trackers/AVISTAZ_NETWORK.py new file mode 100644 index 000000000..2459c5ea6 --- /dev/null +++ b/src/trackers/AVISTAZ_NETWORK.py @@ -0,0 +1,1190 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles +import asyncio +import bbcode +import httpx +import importlib +import json +import os +import platform +import re +import uuid +from bs4 import BeautifulSoup +from pathlib import Path +from src.console import console +from src.cookie_auth import CookieValidator +from src.get_desc import DescriptionBuilder +from src.languages import process_desc_language +from src.trackers.COMMON import COMMON +from tqdm.asyncio import tqdm +from typing import Optional +from urllib.parse import urlparse + + +class AZTrackerBase: + def __init__(self, config, tracker_name): + self.config = config + self.tracker = tracker_name + self.common = COMMON(config) + self.cookie_validator = CookieValidator(config) + self.az_class = getattr(importlib.import_module(f"src.trackers.{self.tracker}"), self.tracker) + + tracker_config = self.config['TRACKERS'][self.tracker] + self.base_url = tracker_config.get('base_url') + self.requests_url = tracker_config.get('requests_url') + self.announce_url = tracker_config.get('announce_url') + self.source_flag = tracker_config.get('source_flag') + + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f"Upload Assistant/2.3 ({platform.system()} {platform.release()})" + }, timeout=60.0) + self.media_code = '' + + def get_resolution(self, meta): + resolution = '' + width, height = None, None + + try: + if meta.get('is_disc') == 'BDMV': + resolution_str = meta.get('resolution', '') + height_num = int(resolution_str.lower().replace('p', '').replace('i', '')) + height = str(height_num) + width = str(round((16 / 9) * height_num)) + else: + tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) + if len(tracks) > 1: + video_mi = tracks[1] + width = video_mi.get('Width') + height = video_mi.get('Height') + except (ValueError, TypeError, KeyError, IndexError): + return '' + + if width and height: + resolution = f'{width}x{height}' + + return resolution + + def get_video_quality(self, meta): + resolution = meta.get('resolution') + + if self.tracker != 'PHD': + resolution_int = int(resolution.lower().replace('p', '').replace('i', '')) + if resolution_int < 720 or meta.get('sd', False): + return '1' + + keyword_map = { + '1080i': '7', + '1080p': '3', + '2160p': '6', + '4320p': '8', + '720p': '2', + } + + return keyword_map.get(resolution.lower()) + + async def get_media_code(self, meta): + self.media_code = '' + + if meta['category'] == 'MOVIE': + category = '1' + elif meta['category'] == 'TV': + category = '2' + else: + return False + + imdb_info = meta.get('imdb_info', {}) + imdb_id = imdb_info.get('imdbID') if isinstance(imdb_info, dict) else None + tmdb_id = meta.get('tmdb') + title = meta['title'] + + headers = { + 'Referer': f"{self.base_url}/upload/{meta['category'].lower()}", + 'X-Requested-With': 'XMLHttpRequest' + } + + for attempt in range(2): + try: + if attempt == 1: + console.print(f'{self.tracker}: Trying to search again by ID after adding to media to database...\n') + await asyncio.sleep(5) # Small delay to ensure the DB has been updated + + data = {} + + if imdb_id: + response = await self.session.get(f'{self.base_url}/ajax/movies/{category}?term={imdb_id}', headers=headers) + response.raise_for_status() + data = response.json() + + if not data.get('data', ''): + response = await self.session.get(f'{self.base_url}/ajax/movies/{category}?term={title}', headers=headers) + response.raise_for_status() + data = response.json() + + match = None + for item in data.get('data', []): + if imdb_id and item.get('imdb') == imdb_id: + match = item + break + elif item.get('tmdb') == str(tmdb_id): + match = item + break + + if match: + self.media_code = str(match['id']) + if attempt == 1: + console.print(f"{self.tracker}: [green]Found new ID at:[/green] {self.base_url}/{meta['category'].lower()}/{self.media_code}") + return True + + except Exception as e: + console.print(f'{self.tracker}: Error while trying to fetch media code in attempt {attempt + 1}: {e}') + break + + if attempt == 0 and not self.media_code: + console.print(f"\n{self.tracker}: The media [[yellow]IMDB:{imdb_id}[/yellow]] [[blue]TMDB:{tmdb_id}[/blue]] appears to be missing from the site's database.") + user_choice = await self.common.async_input(prompt=f"{self.tracker}: Do you want to add it to the site database? (y/n): \n") + + if user_choice in ['y', 'yes']: + added_successfully = await self.add_media_to_db(meta, title, category, imdb_id, tmdb_id) + if not added_successfully: + console.print(f'{self.tracker}: Failed to add media. Aborting.') + break + else: + console.print(f'{self.tracker}: User chose not to add media. Aborting.') + break + + if not self.media_code: + console.print(f'{self.tracker}: Unable to get media code.') + + return bool(self.media_code) + + async def add_media_to_db(self, meta, title, category, imdb_id, tmdb_id): + data = { + '_token': self.az_class.secret_token, + 'type_id': category, + 'title': title, + 'imdb_id': imdb_id if imdb_id else '', + 'tmdb_id': tmdb_id if tmdb_id else '', + } + + if meta['category'] == 'TV': + tvdb_id = meta.get('tvdb') + if tvdb_id: + data['tvdb_id'] = str(tvdb_id) + + url = f"{self.base_url}/add/{meta['category'].lower()}" + + headers = { + 'Referer': f'{self.base_url}/upload', + } + + try: + console.print(f'{self.tracker}: Trying to add to database...') + response = await self.session.post(url, data=data, headers=headers) + if response.status_code == 302: + console.print(f'{self.tracker}: The attempt to add the media to the database appears to have been successful..') + return True + else: + console.print(f'{self.tracker}: Error adding media to the database. Status: {response.status_code}') + failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]Failed_DB_attempt.html" + os.makedirs(os.path.dirname(failure_path), exist_ok=True) + with open(failure_path, 'w', encoding='utf-8') as f: + f.write(response.text) + console.print(f'The server response was saved to {failure_path} for analysis.') + return False + + except Exception as e: + console.print(f'{self.tracker}: Exception when trying to add media to the database: {e}') + return False + + async def validate_credentials(self, meta): + cookie_jar = await self.cookie_validator.load_session_cookies(meta, self.tracker) + if cookie_jar: + self.session.cookies = cookie_jar + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/torrents', + error_text='Page not found', + token_pattern=r'name="_token" content="([^"]+)"' + ) + return False + + async def search_existing(self, meta, disctype): + if self.config['TRACKERS'][self.tracker].get('check_for_rules', True): + warnings = await self.rules(meta) + if warnings: + console.print(f"{self.tracker}: [red]Rule check returned the following warning(s):[/red]\n\n{warnings}") + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + choice = await self.common.async_input(prompt='Do you want to continue anyway? [y/N]: ') + if choice != 'y': + meta['skipping'] = f'{self.tracker}' + return + else: + meta['skipping'] = f'{self.tracker}' + return + + if meta['type'] not in ['WEBDL'] and self.tracker == "PHD": + if meta.get('tag', "") in ['FGT', 'EVO']: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]Group {meta["tag"]} is only allowed for web-dl[/bold red]') + choice = await self.common.async_input('Do you want to upload anyway? [y/N]: ') + if choice != 'y': + meta['skipping'] = f'{self.tracker}' + return + else: + meta['skipping'] = f'{self.tracker}' + return + + cookie_jar = await self.cookie_validator.load_session_cookies(meta, self.tracker) + if cookie_jar: + self.session.cookies = cookie_jar + + if not await self.get_media_code(meta): + console.print((f"{self.tracker}: This media is not registered, please add it to the database by following this link: {self.base_url}/add/{meta['category'].lower()}")) + meta['skipping'] = f'{self.tracker}' + return + + if meta.get('resolution') == '2160p': + resolution = 'UHD' + elif meta.get('resolution') in ('720p', '1080p'): + resolution = meta.get('resolution') + else: + resolution = 'all' + + page_url = f'{self.base_url}/movies/torrents/{self.media_code}?quality={resolution}' + + duplicates = [] + visited_urls = set() + + while page_url and page_url not in visited_urls: + visited_urls.add(page_url) + + try: + response = await self.session.get(page_url) + response.raise_for_status() + + soup = BeautifulSoup(response.text, 'html.parser') + + torrent_table = soup.find('table', class_='table-bordered') + if not torrent_table: + page_url = None + continue + + torrent_rows = torrent_table.find('tbody').find_all('tr', recursive=False) + + for row in torrent_rows: + name_tag = row.find('a', class_='torrent-filename') + name = name_tag.get_text(strip=True) if name_tag else '' + + torrent_link = name_tag.get('href') if name_tag and 'href' in name_tag.attrs else '' + if torrent_link: + match = re.search(r'/(\d+)', torrent_link) + if match: + torrent_link = f'{self.torrent_url}{match.group(1)}' + + cells = row.find_all('td') + size = '' + if len(cells) > 4: + size_span = cells[4].find('span') + size = size_span.get_text(strip=True) if size_span else cells[4].get_text(strip=True) + + dupe_entry = { + 'name': name, + 'size': size, + 'link': torrent_link + } + + duplicates.append(dupe_entry) + + next_page_tag = soup.select_one('a[rel=\'next\']') + if next_page_tag and 'href' in next_page_tag.attrs: + page_url = next_page_tag['href'] + else: + page_url = None + + except httpx.RequestError as e: + console.print(f'{self.tracker}: Failed to search for duplicates. {e.request.url}: {e}') + return duplicates + + return duplicates + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_file_info(self, meta): + info_file_path = '' + if meta.get('is_disc') == 'BDMV': + if self.tracker == 'CZ': + summary_file = 'BD_SUMMARY_EXT_00' + else: + summary_file = 'BD_SUMMARY_00' + info_file_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/{summary_file}.txt" + else: + info_file_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MEDIAINFO_CLEANPATH.txt" + + if os.path.exists(info_file_path): + with open(info_file_path, 'r', encoding='utf-8') as f: + return f.read() + + async def get_lang(self, meta): + self.language_map() + audio_ids = set() + subtitle_ids = set() + + if meta.get('is_disc', False): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + + found_subs_strings = meta.get('subtitle_languages', []) + for lang_str in found_subs_strings: + target_id = self.lang_map.get(lang_str.lower()) + if target_id: + subtitle_ids.add(target_id) + + found_audio_strings = meta.get('audio_languages', []) + for lang_str in found_audio_strings: + target_id = self.lang_map.get(lang_str.lower()) + if target_id: + audio_ids.add(target_id) + else: + try: + media_info_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json" + with open(media_info_path, 'r', encoding='utf-8') as f: + data = json.load(f) + + tracks = data.get('media', {}).get('track', []) + + missing_audio_languages = [] + + for track in tracks: + track_type = track.get('@type') + language_code = track.get('Language') + + if not language_code: + if track_type == 'Audio': + missing_audio_languages.append(track) + continue + + target_id = self.lang_map.get(language_code.lower()) + + if not target_id and '-' in language_code: + primary_code = language_code.split('-')[0] + target_id = self.lang_map.get(primary_code.lower()) + + if target_id: + if track_type == 'Audio': + audio_ids.add(target_id) + elif track_type == 'Text': + subtitle_ids.add(target_id) + else: + if track_type == 'Audio': + missing_audio_languages.append(track) + + if missing_audio_languages: + console.print('No audio language/s found.') + console.print('You must enter (comma-separated) languages for all audio tracks, eg: English, Spanish: ') + user_input = await self.common.async_input(prompt='[bold yellow]Enter languages: [/bold yellow]') + + langs = [lang.strip() for lang in user_input.split(',')] + for lang in langs: + target_id = self.lang_map.get(lang.lower()) + if target_id: + audio_ids.add(target_id) + + except FileNotFoundError: + print(f'Warning: MediaInfo.json not found for uuid {meta.get("uuid")}. No languages will be processed.') + except (json.JSONDecodeError, KeyError, TypeError) as e: + print(f'Error processing MediaInfo.json for uuid {meta.get("uuid")}: {e}') + + final_subtitle_ids = sorted(list(subtitle_ids)) + final_audio_ids = sorted(list(audio_ids)) + + return { + 'subtitles[]': final_subtitle_ids, + 'languages[]': final_audio_ids + } + + async def img_host(self, meta, referer, image_bytes: bytes, filename: str) -> Optional[str]: + upload_url = f'{self.base_url}/ajax/image/upload' + + headers = { + 'Referer': referer, + 'X-Requested-With': 'XMLHttpRequest', + 'Accept': 'application/json', + 'Origin': self.base_url, + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:141.0) Gecko/20100101 Firefox/141.0' + } + + data = { + '_token': self.az_class.secret_token, + 'qquuid': str(uuid.uuid4()), + 'qqfilename': filename, + 'qqtotalfilesize': str(len(image_bytes)) + } + + files = {'qqfile': (filename, image_bytes, 'image/png')} + + try: + response = await self.session.post(upload_url, headers=headers, data=data, files=files) + + if response.is_success: + json_data = response.json() + if json_data.get('success'): + image_id = json_data.get('imageId') + return str(image_id) + else: + error_message = json_data.get('error', 'Unknown image host error.') + print(f'{self.tracker}: Error uploading {filename}: {error_message}') + return None + else: + print(f'{self.tracker}: Error uploading {filename}: Status {response.status_code} - {response.text}') + return None + except Exception as e: + print(f'{self.tracker}: Exception when uploading {filename}: {e}') + return None + + async def get_screenshots(self, meta): + screenshot_dir = Path(meta['base_dir']) / 'tmp' / meta['uuid'] + local_files = sorted(screenshot_dir.glob('*.png')) + results = [] + + limit = 3 if meta.get('tv_pack', '') == 0 else 15 + + disc_menu_links = [ + img.get('raw_url') + for img in meta.get('menu_images', []) + if img.get('raw_url') + ][:12] # minimum number of screenshots is 3, so we can allow up to 12 menu images + + async def upload_local_file(path): + with open(path, 'rb') as f: + image_bytes = f.read() + return await self.img_host(meta, self.tracker, image_bytes, path.name) + + async def upload_remote_file(url): + try: + response = await self.session.get(url) + response.raise_for_status() + image_bytes = response.content + filename = os.path.basename(urlparse(url).path) or 'screenshot.png' + return await self.img_host(meta, self.tracker, image_bytes, filename) + except Exception as e: + print(f'Failed to process screenshot from URL {url}: {e}') + return None + + # Upload menu images + for url in disc_menu_links: + if not url.lower().endswith('.png'): + console.print(f"{self.tracker}: Skipping non-PNG menu image: {url}") + else: + result = await upload_remote_file(url) + if result: + results.append(result) + + remaining_slots = max(0, limit - len(results)) + + if local_files and remaining_slots > 0: + paths = local_files[:remaining_slots] + + for path in tqdm( + paths, + total=len(paths), + desc=f'{self.tracker}: Uploading screenshots' + ): + result = await upload_local_file(path) + if result: + results.append(result) + + else: + image_links = [img.get('raw_url') for img in meta.get('image_list', []) if img.get('raw_url')] + remaining_slots = max(0, limit - len(results)) + links = image_links[:remaining_slots] + + for url in tqdm( + links, + total=len(links), + desc=f'{self.tracker}: Uploading screenshots' + ): + result = await upload_remote_file(url) + if result: + results.append(result) + + return results + + async def get_requests(self, meta): + if not self.config['DEFAULT'].get('search_requests', False) and not meta.get('search_requests', False): + return False + else: + try: + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + category = meta.get('category').lower() + + if category == 'tv': + query = meta['title'] + f" {meta.get('season', '')}{meta.get('episode', '')}" + else: + query = meta['title'] + + search_url = f'{self.requests_url}?type={category}&search={query}&condition=new' + + response = await self.session.get(search_url) + response.raise_for_status() + response_results_text = response.text + + soup = BeautifulSoup(response_results_text, 'html.parser') + + request_rows = soup.select('.table-responsive table tbody tr') + + results = [] + for row in request_rows: + link_element = row.select_one('a.torrent-filename') + + if not link_element: + continue + + name = link_element.text.strip() + link = link_element.get('href') + + all_tds = row.find_all('td') + + reward = all_tds[5].text.strip() if len(all_tds) > 5 else 'N/A' + + results.append({ + 'Name': name, + 'Link': link, + 'Reward': reward + }) + + if results: + message = f'\n{self.tracker}: [bold yellow]Your upload may fulfill the following request(s), check it out:[/bold yellow]\n\n' + for r in results: + message += f"[bold green]Name:[/bold green] {r['Name']}\n" + message += f"[bold green]Reward:[/bold green] {r['Reward']}\n" + message += f"[bold green]Link:[/bold green] {r['Link']}\n\n" + console.print(message) + + return results + + except Exception as e: + console.print(f'{self.tracker}: An error occurred while fetching requests: {e}') + return [] + + async def fetch_tag_id(self, word): + tags_url = f'{self.base_url}/ajax/tags' + params = {'term': word} + + headers = { + 'Referer': f'{self.base_url}/upload', + 'X-Requested-With': 'XMLHttpRequest' + } + try: + response = await self.session.get(tags_url, headers=headers, params=params) + response.raise_for_status() + + json_data = response.json() + + for tag_info in json_data.get('data', []): + if tag_info.get('tag') == word: + return tag_info.get('id') + + except Exception as e: + print(f"An unexpected error occurred while processing the tag '{word}': {e}") + + return None + + async def get_tags(self, meta): + genres = meta.get('keywords', '') + if not genres: + return [] + + # divides by commas, cleans spaces and normalizes to lowercase + phrases = [re.sub(r'\s+', ' ', x.strip().lower()) for x in re.split(r',+', genres) if x.strip()] + + words_to_search = set(phrases) + + tasks = [self.fetch_tag_id(word) for word in words_to_search] + + tag_ids_results = await asyncio.gather(*tasks) + + tags = [str(tag_id) for tag_id in tag_ids_results if tag_id is not None] + + if meta.get('personalrelease', False): + if self.tracker == 'AZ': + tags.insert(0, '3773') + elif self.tracker == 'CZ': + tags.insert(0, '1594') + elif self.tracker == 'PHD': + tags.insert(0, '1448') + + if self.config['TRACKERS'][self.tracker].get('internal', False): + if self.tracker == 'AZ': + tags.insert(0, '943') + elif self.tracker == 'CZ': + tags.insert(0, '938') + elif self.tracker == 'PHD': + tags.insert(0, '415') + + return tags + + async def edit_desc(self, meta): + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # TV stuff + title, episode_image, episode_overview = await builder.get_tv_info(meta, self.tracker) + if episode_overview: + desc_parts.append(f'[b]Episode:[/b] {title}') + desc_parts.append(f'[b]Overview:[/b] {episode_overview}') + + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + if not description: + return '' + + processed_desc, amount = re.subn( + r'\[center\]\[spoiler=.*? NFO:\]\[code\](.*?)\[/code\]\[/spoiler\]\[/center\]', + '', + description, + flags=re.DOTALL + ) + if amount > 0: + console.print(f'{self.tracker}: Deleted from description: {amount} NFO section.') + + processed_desc, amount = re.subn(r'http[s]?://\S+|www\.\S+', '', processed_desc) + if amount > 0: + console.print(f'{self.tracker}: Deleted from description: {amount} link(s).') + + bbcode_tags_pattern = r'\[/?(size|align|left|center|right|img|table|tr|td|spoiler|url)[^\]]*\]' + processed_desc, amount = re.subn( + bbcode_tags_pattern, + '', + processed_desc, + flags=re.IGNORECASE + ) + if amount > 0: + console.print(f'{self.tracker}: Deleted from description: {amount} BBCode tag(s).') + + final_html_desc = bbcode.render_html(processed_desc) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(final_html_desc) + + return final_html_desc + + async def create_task_id(self, meta): + await self.get_media_code(meta) + data = { + '_token': self.az_class.secret_token, + 'type_id': await self.get_cat_id(meta['category']), + 'movie_id': self.media_code, + 'media_info': await self.get_file_info(meta), + } + + if self.tracker == 'AZ': + default_announce = 'https://tracker.avistaz.to/announce' + elif self.tracker == 'CZ': + default_announce = 'https://tracker.cinemaz.to/announce' + elif self.tracker == 'PHD': + default_announce = 'https://tracker.privatehd.to/announce' + + if not meta.get('debug', False): + try: + await self.common.edit_torrent(meta, self.tracker, self.source_flag, announce_url=default_announce) + upload_url_step1 = f"{self.base_url}/upload/{meta['category'].lower()}" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + + with open(torrent_path, 'rb') as torrent_file: + files = {'torrent_file': (os.path.basename(torrent_path), torrent_file, 'application/x-bittorrent')} + task_response = await self.session.post(upload_url_step1, data=data, files=files) + + if task_response.status_code == 302 and 'Location' in task_response.headers: + redirect_url = task_response.headers['Location'] + + match = re.search(r'/(\d+)$', redirect_url) + if not match: + console.print(f"{self.tracker}: Could not extract 'task_id' from redirect URL: {redirect_url}") + console.print(f'{self.tracker}: The cookie appears to be expired or invalid.') + + task_id = match.group(1) + + return { + 'task_id': task_id, + 'info_hash': await self.common.get_torrent_hash(meta, self.tracker), + 'redirect_url': redirect_url, + } + + else: + failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]FailedUpload_Step1.html" + with open(failure_path, 'w', encoding='utf-8') as f: + f.write(task_response.text) + status_message = f'''[red]Step 1 of upload failed to {self.tracker}. Status: {task_response.status_code}, URL: {task_response.url}[/red]. + [yellow]The HTML response was saved to '{failure_path}' for analysis.[/yellow]''' + + except Exception as e: + status_message = f'[red]An unexpected error occurred while uploading to {self.tracker}: {e}[/red]' + meta['skipping'] = f'{self.tracker}' + return + + else: + console.print(data) + status_message = 'Debug mode enabled, not uploading.' + + meta['tracker_status'][self.tracker]['status_message'] = status_message + + def edit_name(self, meta): + # https://avistaz.to/guides/how-to-properly-titlename-a-torrent + # https://cinemaz.to/guides/how-to-properly-titlename-a-torrent + # https://privatehd.to/rules/upload-rules + upload_name = meta.get('name').replace(meta['aka'], '').replace('Dubbed', '').replace('Dual-Audio', '') + + if self.tracker == 'PHD': + forbidden_terms = [ + r'\bLIMITED\b', + r'\bCriterion Collection\b', + r'\b\d{1,3}(?:st|nd|rd|th)\s+Anniversary Edition\b' + ] + for term in forbidden_terms: + upload_name = re.sub(term, '', upload_name, flags=re.IGNORECASE).strip() + + upload_name = re.sub(r'\bDirector[’\'`]s\s+Cut\b', 'DC', upload_name, flags=re.IGNORECASE) + upload_name = re.sub(r'\bExtended\s+Cut\b', 'Extended', upload_name, flags=re.IGNORECASE) + upload_name = re.sub(r'\bTheatrical\s+Cut\b', 'Theatrical', upload_name, flags=re.IGNORECASE) + upload_name = re.sub(r'\s{2,}', ' ', upload_name).strip() + + if meta.get('has_encode_settings', False): + upload_name = upload_name.replace('H.264', 'x264').replace('H.265', 'x265') + + tag_lower = meta['tag'].lower() + invalid_tags = ['nogrp', 'nogroup', 'unknown', '-unk-'] + + if meta['tag'] == '' or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + upload_name = re.sub(f'-{invalid_tag}', '', upload_name, flags=re.IGNORECASE) + + if self.tracker == 'CZ': + upload_name = f'{upload_name}-NoGroup' + if self.tracker == 'PHD': + upload_name = f'{upload_name}-NOGROUP' + + if meta['category'] == 'TV': + year_to_use = meta.get('year') + if not meta.get('no_year', False) and not meta.get('search_year', ''): + season_int = meta.get('season_int', 0) + season_info = meta.get('imdb_info', {}).get('seasons_summary', []) + + # Find the correct year for this specific season + season_year = None + if season_int and season_info: + for season_data in season_info: + if season_data.get('season') == season_int: + season_year = season_data.get('year') + break + + # Use the season-specific year if found, otherwise fall back to meta year + if season_year: + year_to_use = season_year + upload_name = upload_name.replace( + meta['title'], + f"{meta['title']} {year_to_use}", + 1 + ) + + if self.tracker == 'PHD': + upload_name = upload_name.replace( + str(year_to_use), + '' + ) + + if self.tracker == 'AZ': + if meta.get('tv_pack', False): + upload_name = upload_name.replace( + f"{meta['title']} {year_to_use} {meta.get('season')}", + f"{meta['title']} {meta.get('season')} {year_to_use}" + ) + + if meta.get('type', '') == 'DVDRIP': + if meta.get('source', ''): + upload_name = upload_name.replace(meta['source'], '') + + return re.sub(r'\s{2,}', ' ', upload_name) + + def get_rip_type(self, meta): + source_type = str(meta.get('type', '') or '').strip().lower() + source = str(meta.get('source', '') or '').strip().lower() + is_disc = str(meta.get('is_disc', '') or '').strip().upper() + + if is_disc == 'BDMV': + return '15' + if is_disc == 'HDDVD': + return '4' + if is_disc == 'DVD': + return '4' + + if source_type == 'remux': + if 'dvd' in source: + return '17' + if source in ('bluray', 'blu-ray'): + return '14' + + keyword_map = { + 'bdrip': '1', + 'brrip': '3', + 'encode': '2', + 'dvdrip': '5', + 'hdrip': '6', + 'hdtv': '7', + 'sdtv': '16', + 'vcd': '8', + 'vcdrip': '9', + 'vhsrip': '10', + 'vodrip': '11', + 'webdl': '12', + 'webrip': '13', + } + + return keyword_map.get(source_type.lower()) + + async def fetch_data(self, meta): + cookie_jar = await self.cookie_validator.load_session_cookies(meta, self.tracker) + if cookie_jar: + self.session.cookies = cookie_jar + task_info = await self.create_task_id(meta) + lang_info = await self.get_lang(meta) or {} + + data = { + '_token': self.az_class.secret_token, + 'torrent_id': '', + 'type_id': await self.get_cat_id(meta['category']), + 'file_name': self.edit_name(meta), + 'anon_upload': '', + 'description': await self.edit_desc(meta), + 'qqfile': '', + 'rip_type_id': self.get_rip_type(meta), + 'video_quality_id': self.get_video_quality(meta), + 'video_resolution': self.get_resolution(meta), + 'movie_id': self.media_code, + 'languages[]': lang_info.get('languages[]'), + 'subtitles[]': lang_info.get('subtitles[]'), + 'media_info': await self.get_file_info(meta), + 'tags[]': await self.get_tags(meta), + 'screenshots[]': [''], + } + + # TV + if meta.get('category') == 'TV': + data.update({ + 'tv_collection': '1' if meta.get('tv_pack') == 0 else '2', + 'tv_season': meta.get('season_int', ''), + 'tv_episode': meta.get('episode_int', ''), + }) + + anon = not (meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False)) + if anon: + data.update({ + 'anon_upload': '1' + }) + + if not meta.get('debug', False): + try: + self.upload_url_step2 = task_info.get('redirect_url') + + # task_id and screenshot cannot be called until Step 1 is completed + data.update({ + 'info_hash': task_info.get('info_hash'), + 'task_id': task_info.get('task_id'), + 'screenshots[]': await self.get_screenshots(meta) + }) + + except Exception as e: + console.print(f'{self.tracker}: An unexpected error occurred while uploading: {e}') + + return data + + async def check_data(self, meta, data): + if not meta.get('debug', False): + if len(data['screenshots[]']) < 3: + return f'UPLOAD FAILED: The {self.tracker} image host did not return the minimum number of screenshots.' + return False + + async def upload(self, meta, disctype): + data = await self.fetch_data(meta) + status_message = '' + + issue = await self.check_data(meta, data) + if issue: + status_message = f'data error - {issue}' + else: + if not meta.get('debug', False): + response = await self.session.post(self.upload_url_step2, data=data) + if response.status_code == 302: + torrent_url = response.headers['Location'] + + # Even if you are uploading, you still need to download the .torrent from the website + # because it needs to be registered as a download before you can start seeding + download_url = torrent_url.replace('/torrent/', '/download/torrent/') + register_download = await self.session.get(download_url) + if register_download.status_code != 200: + status_message = ( + f'data error - Unable to register your upload in your download history, please go to the URL and download the torrent file before you can start seeding: {torrent_url}\n' + f'Error: {register_download.status_code}' + ) + meta['tracker_status'][self.tracker]['status_message'] = status_message + return + + await self.common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.announce_url, torrent_url) + + status_message = 'Torrent uploaded successfully.' + + match = re.search(r'/torrent/(\d+)', torrent_url) + if match: + torrent_id = match.group(1) + meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id + + else: + failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]FailedUpload_Step2.html" + with open(failure_path, 'w', encoding='utf-8') as f: + f.write(response.text) + + status_message = ( + f"data error - It may have uploaded, go check\n" + f'Step 2 of upload to {self.tracker} failed.\n' + f'Status code: {response.status_code}\n' + f'URL: {response.url}\n' + f"The HTML response has been saved to '{failure_path}' for analysis." + ) + meta['tracker_status'][self.tracker]['status_message'] = status_message + return + + else: + console.print(data) + status_message = 'Debug mode enabled, not uploading.' + + meta['tracker_status'][self.tracker]['status_message'] = status_message + + def language_map(self): + all_lang_map = { + ('Abkhazian', 'abk', 'ab'): '1', + ('Afar', 'aar', 'aa'): '2', + ('Afrikaans', 'afr', 'af'): '3', + ('Akan', 'aka', 'ak'): '4', + ('Albanian', 'sqi', 'sq'): '5', + ('Amharic', 'amh', 'am'): '6', + ('Arabic', 'ara', 'ar'): '7', + ('Aragonese', 'arg', 'an'): '8', + ('Armenian', 'hye', 'hy'): '9', + ('Assamese', 'asm', 'as'): '10', + ('Avaric', 'ava', 'av'): '11', + ('Avestan', 'ave', 'ae'): '12', + ('Aymara', 'aym', 'ay'): '13', + ('Azerbaijani', 'aze', 'az'): '14', + ('Bambara', 'bam', 'bm'): '15', + ('Bashkir', 'bak', 'ba'): '16', + ('Basque', 'eus', 'eu'): '17', + ('Belarusian', 'bel', 'be'): '18', + ('Bengali', 'ben', 'bn'): '19', + ('Bihari languages', 'bih', 'bh'): '20', + ('Bislama', 'bis', 'bi'): '21', + ('Bokmål, Norwegian', 'nob', 'nb'): '22', + ('Bosnian', 'bos', 'bs'): '23', + ('Breton', 'bre', 'br'): '24', + ('Bulgarian', 'bul', 'bg'): '25', + ('Burmese', 'mya', 'my'): '26', + ('Cantonese', 'yue', 'zh'): '27', + ('Catalan', 'cat', 'ca'): '28', + ('Central Khmer', 'khm', 'km'): '29', + ('Chamorro', 'cha', 'ch'): '30', + ('Chechen', 'che', 'ce'): '31', + ('Chichewa', 'nya', 'ny'): '32', + ('Chinese', 'zho', 'zh'): '33', + ('Church Slavic', 'chu', 'cu'): '34', + ('Chuvash', 'chv', 'cv'): '35', + ('Cornish', 'cor', 'kw'): '36', + ('Corsican', 'cos', 'co'): '37', + ('Cree', 'cre', 'cr'): '38', + ('Croatian', 'hrv', 'hr'): '39', + ('Czech', 'ces', 'cs'): '40', + ('Danish', 'dan', 'da'): '41', + ('Dhivehi', 'div', 'dv'): '42', + ('Dutch', 'nld', 'nl'): '43', + ('Dzongkha', 'dzo', 'dz'): '44', + ('English', 'eng', 'en'): '45', + ('Esperanto', 'epo', 'eo'): '46', + ('Estonian', 'est', 'et'): '47', + ('Ewe', 'ewe', 'ee'): '48', + ('Faroese', 'fao', 'fo'): '49', + ('Fijian', 'fij', 'fj'): '50', + ('Finnish', 'fin', 'fi'): '51', + ('French', 'fra', 'fr'): '52', + ('Fulah', 'ful', 'ff'): '53', + ('Gaelic', 'gla', 'gd'): '54', + ('Galician', 'glg', 'gl'): '55', + ('Ganda', 'lug', 'lg'): '56', + ('Georgian', 'kat', 'ka'): '57', + ('German', 'deu', 'de'): '58', + ('Greek', 'ell', 'el'): '59', + ('Guarani', 'grn', 'gn'): '60', + ('Gujarati', 'guj', 'gu'): '61', + ('Haitian', 'hat', 'ht'): '62', + ('Hausa', 'hau', 'ha'): '63', + ('Hebrew', 'heb', 'he'): '64', + ('Herero', 'her', 'hz'): '65', + ('Hindi', 'hin', 'hi'): '66', + ('Hiri Motu', 'hmo', 'ho'): '67', + ('Hungarian', 'hun', 'hu'): '68', + ('Icelandic', 'isl', 'is'): '69', + ('Ido', 'ido', 'io'): '70', + ('Igbo', 'ibo', 'ig'): '71', + ('Indonesian', 'ind', 'id'): '72', + ('Interlingua', 'ina', 'ia'): '73', + ('Interlingue', 'ile', 'ie'): '74', + ('Inuktitut', 'iku', 'iu'): '75', + ('Inupiaq', 'ipk', 'ik'): '76', + ('Irish', 'gle', 'ga'): '77', + ('Italian', 'ita', 'it'): '78', + ('Japanese', 'jpn', 'ja'): '79', + ('Javanese', 'jav', 'jv'): '80', + ('Kalaallisut', 'kal', 'kl'): '81', + ('Kannada', 'kan', 'kn'): '82', + ('Kanuri', 'kau', 'kr'): '83', + ('Kashmiri', 'kas', 'ks'): '84', + ('Kazakh', 'kaz', 'kk'): '85', + ('Kikuyu', 'kik', 'ki'): '86', + ('Kinyarwanda', 'kin', 'rw'): '87', + ('Kirghiz', 'kir', 'ky'): '88', + ('Komi', 'kom', 'kv'): '89', + ('Kongo', 'kon', 'kg'): '90', + ('Korean', 'kor', 'ko'): '91', + ('Kuanyama', 'kua', 'kj'): '92', + ('Kurdish', 'kur', 'ku'): '93', + ('Lao', 'lao', 'lo'): '94', + ('Latin', 'lat', 'la'): '95', + ('Latvian', 'lav', 'lv'): '96', + ('Limburgan', 'lim', 'li'): '97', + ('Lingala', 'lin', 'ln'): '98', + ('Lithuanian', 'lit', 'lt'): '99', + ('Luba-Katanga', 'lub', 'lu'): '100', + ('Luxembourgish', 'ltz', 'lb'): '101', + ('Macedonian', 'mkd', 'mk'): '102', + ('Malagasy', 'mlg', 'mg'): '103', + ('Malay', 'msa', 'ms'): '104', + ('Malayalam', 'mal', 'ml'): '105', + ('Maltese', 'mlt', 'mt'): '106', + ('Mandarin', 'cmn', 'cmn'): '107', + ('Manx', 'glv', 'gv'): '108', + ('Maori', 'mri', 'mi'): '109', + ('Marathi', 'mar', 'mr'): '110', + ('Marshallese', 'mah', 'mh'): '111', + ('Mongolian', 'mon', 'mn'): '112', + ('Nauru', 'nau', 'na'): '113', + ('Navajo', 'nav', 'nv'): '114', + ('Ndebele, North', 'nde', 'nd'): '115', + ('Ndebele, South', 'nbl', 'nr'): '116', + ('Ndonga', 'ndo', 'ng'): '117', + ('Nepali', 'nep', 'ne'): '118', + ('Northern Sami', 'sme', 'se'): '119', + ('Norwegian', 'nor', 'no'): '120', + ('Norwegian Nynorsk', 'nno', 'nn'): '121', + ('Occitan (post 1500)', 'oci', 'oc'): '122', + ('Ojibwa', 'oji', 'oj'): '123', + ('Oriya', 'ori', 'or'): '124', + ('Oromo', 'orm', 'om'): '125', + ('Ossetian', 'oss', 'os'): '126', + ('Pali', 'pli', 'pi'): '127', + ('Panjabi', 'pan', 'pa'): '128', + ('Persian', 'fas', 'fa'): '129', + ('Polish', 'pol', 'pl'): '130', + ('Portuguese', 'por', 'pt'): '131', + ('Pushto', 'pus', 'ps'): '132', + ('Quechua', 'que', 'qu'): '133', + ('Romanian', 'ron', 'ro'): '134', + ('Romansh', 'roh', 'rm'): '135', + ('Rundi', 'run', 'rn'): '136', + ('Russian', 'rus', 'ru'): '137', + ('Samoan', 'smo', 'sm'): '138', + ('Sango', 'sag', 'sg'): '139', + ('Sanskrit', 'san', 'sa'): '140', + ('Sardinian', 'srd', 'sc'): '141', + ('Serbian', 'srp', 'sr'): '142', + ('Shona', 'sna', 'sn'): '143', + ('Sichuan Yi', 'iii', 'ii'): '144', + ('Sindhi', 'snd', 'sd'): '145', + ('Sinhala', 'sin', 'si'): '146', + ('Slovak', 'slk', 'sk'): '147', + ('Slovenian', 'slv', 'sl'): '148', + ('Somali', 'som', 'so'): '149', + ('Sotho, Southern', 'sot', 'st'): '150', + ('Spanish', 'spa', 'es'): '151', + ('Sundanese', 'sun', 'su'): '152', + ('Swahili', 'swa', 'sw'): '153', + ('Swati', 'ssw', 'ss'): '154', + ('Swedish', 'swe', 'sv'): '155', + ('Tagalog', 'tgl', 'tl'): '156', + ('Tahitian', 'tah', 'ty'): '157', + ('Tajik', 'tgk', 'tg'): '158', + ('Tamil', 'tam', 'ta'): '159', + ('Tatar', 'tat', 'tt'): '160', + ('Telugu', 'tel', 'te'): '161', + ('Thai', 'tha', 'th'): '162', + ('Tibetan', 'bod', 'bo'): '163', + ('Tigrinya', 'tir', 'ti'): '164', + ('Tongan', 'ton', 'to'): '165', + ('Tsonga', 'tso', 'ts'): '166', + ('Tswana', 'tsn', 'tn'): '167', + ('Turkish', 'tur', 'tr'): '168', + ('Turkmen', 'tuk', 'tk'): '169', + ('Twi', 'twi', 'tw'): '170', + ('Uighur', 'uig', 'ug'): '171', + ('Ukrainian', 'ukr', 'uk'): '172', + ('Urdu', 'urd', 'ur'): '173', + ('Uzbek', 'uzb', 'uz'): '174', + ('Venda', 'ven', 've'): '175', + ('Vietnamese', 'vie', 'vi'): '176', + ('Volapük', 'vol', 'vo'): '177', + ('Walloon', 'wln', 'wa'): '178', + ('Welsh', 'cym', 'cy'): '179', + ('Western Frisian', 'fry', 'fy'): '180', + ('Wolof', 'wol', 'wo'): '181', + ('Xhosa', 'xho', 'xh'): '182', + ('Yiddish', 'yid', 'yi'): '183', + ('Yoruba', 'yor', 'yo'): '184', + ('Zhuang', 'zha', 'za'): '185', + ('Zulu', 'zul', 'zu'): '186', + } + + if self.tracker == 'PHD': + all_lang_map.update({ + ('Portuguese (BR)', 'por', 'pt-br'): '187', + ('Filipino', 'fil', 'fil'): '189', + ('Mooré', 'mos', 'mos'): '188', + }) + + if self.tracker == 'AZ': + all_lang_map.update({ + ('Portuguese (BR)', 'por', 'pt-br'): '189', + ('Filipino', 'fil', 'fil'): '188', + ('Mooré', 'mos', 'mos'): '187', + }) + + if self.tracker == 'CZ': + all_lang_map.update({ + ('Portuguese (BR)', 'por', 'pt-br'): '187', + ('Mooré', 'mos', 'mos'): '188', + ('Filipino', 'fil', 'fil'): '189', + ('Bissa', 'bib', 'bib'): '190', + ('Romani', 'rom', 'rom'): '191', + }) + + self.lang_map = {} + for key_tuple, lang_id in all_lang_map.items(): + for alias in key_tuple: + if alias: + self.lang_map[alias.lower()] = lang_id diff --git a/src/trackers/AZ.py b/src/trackers/AZ.py new file mode 100644 index 000000000..9240a1b96 --- /dev/null +++ b/src/trackers/AZ.py @@ -0,0 +1,177 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +from src.trackers.COMMON import COMMON +from src.trackers.AVISTAZ_NETWORK import AZTrackerBase + + +class AZ(AZTrackerBase): + def __init__(self, config): + super().__init__(config, tracker_name='AZ') + self.config = config + self.common = COMMON(config) + self.tracker = 'AZ' + self.source_flag = 'AvistaZ' + self.banned_groups = [''] + self.base_url = 'https://avistaz.to/' + self.torrent_url = f'{self.base_url}/torrent/' + self.requests_url = f'{self.base_url}/requests' + + async def rules(self, meta): + warnings = [] + + is_disc = False + if meta.get('is_disc', ''): + is_disc = True + + video_codec = meta.get('video_codec', '') + if video_codec: + video_codec = video_codec.strip().lower() + + video_encode = meta.get('video_encode', '') + if video_encode: + video_encode = video_encode.strip().lower() + + type = meta.get('type', '') + if type: + type = type.strip().lower() + + source = meta.get('source', '') + if source: + source = source.strip().lower() + + # This also checks the rule 'FANRES content is not allowed' + if meta['category'] not in ('MOVIE', 'TV'): + warnings.append( + 'The only allowed content to be uploaded are Movies and TV Shows.\n' + 'Anything else, like games, music, software and porn is not allowed!' + ) + + if meta.get('anime', False): + warnings.append("Upload Anime content to our sister site AnimeTorrents.me instead. If it's on AniDB, it's an anime.") + + # https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes + + africa = [ + 'AO', 'BF', 'BI', 'BJ', 'BW', 'CD', 'CF', 'CG', 'CI', 'CM', 'CV', 'DJ', 'DZ', 'EG', 'EH', + 'ER', 'ET', 'GA', 'GH', 'GM', 'GN', 'GQ', 'GW', 'IO', 'KE', 'KM', 'LR', 'LS', 'LY', 'MA', + 'MG', 'ML', 'MR', 'MU', 'MW', 'MZ', 'NA', 'NE', 'NG', 'RE', 'RW', 'SC', 'SD', 'SH', 'SL', + 'SN', 'SO', 'SS', 'ST', 'SZ', 'TD', 'TF', 'TG', 'TN', 'TZ', 'UG', 'YT', 'ZA', 'ZM', 'ZW' + ] + america = [ + 'AG', 'AI', 'AR', 'AW', 'BB', 'BL', 'BM', 'BO', 'BQ', 'BR', 'BS', 'BV', 'BZ', 'CA', 'CL', + 'CO', 'CR', 'CU', 'CW', 'DM', 'DO', 'EC', 'FK', 'GD', 'GF', 'GL', 'GP', 'GS', 'GT', 'GY', + 'HN', 'HT', 'JM', 'KN', 'KY', 'LC', 'MF', 'MQ', 'MS', 'MX', 'NI', 'PA', 'PE', 'PM', 'PR', + 'PY', 'SR', 'SV', 'SX', 'TC', 'TT', 'US', 'UY', 'VC', 'VE', 'VG', 'VI' + ] + asia = [ + 'AE', 'AF', 'AM', 'AZ', 'BD', 'BH', 'BN', 'BT', 'CN', 'CY', 'GE', 'HK', 'ID', 'IL', 'IN', + 'IQ', 'IR', 'JO', 'JP', 'KG', 'KH', 'KP', 'KR', 'KW', 'KZ', 'LA', 'LB', 'LK', 'MM', 'MN', + 'MO', 'MV', 'MY', 'NP', 'OM', 'PH', 'PK', 'PS', 'QA', 'SA', 'SG', 'SY', 'TH', 'TJ', 'TL', + 'TM', 'TR', 'TW', 'UZ', 'VN', 'YE' + ] + europe = [ + 'AD', 'AL', 'AT', 'AX', 'BA', 'BE', 'BG', 'BY', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', + 'FO', 'FR', 'GB', 'GG', 'GI', 'GR', 'HR', 'HU', 'IE', 'IM', 'IS', 'IT', 'JE', 'LI', 'LT', + 'LU', 'LV', 'MC', 'MD', 'ME', 'MK', 'MT', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'RU', 'SE', + 'SI', 'SJ', 'SK', 'SM', 'SU', 'UA', 'VA', 'XC' + ] + oceania = [ + 'AS', 'AU', 'CC', 'CK', 'CX', 'FJ', 'FM', 'GU', 'HM', 'KI', 'MH', 'MP', 'NC', 'NF', 'NR', + 'NU', 'NZ', 'PF', 'PG', 'PN', 'PW', 'SB', 'TK', 'TO', 'TV', 'UM', 'VU', 'WF', 'WS' + ] + + az_allowed_countries = [ + 'BD', 'BN', 'BT', 'CN', 'HK', 'ID', 'IN', 'JP', 'KH', 'KP', 'KR', 'LA', 'LK', + 'MM', 'MN', 'MO', 'MY', 'NP', 'PH', 'PK', 'SG', 'TH', 'TL', 'TW', 'VN' + ] + + phd_countries = [ + 'AG', 'AI', 'AU', 'BB', 'BM', 'BS', 'BZ', 'CA', 'CW', 'DM', 'GB', 'GD', 'IE', + 'JM', 'KN', 'KY', 'LC', 'MS', 'NZ', 'PR', 'TC', 'TT', 'US', 'VC', 'VG', 'VI', + ] + + all_countries = africa + america + asia + europe + oceania + cinemaz_countries = list(set(all_countries) - set(phd_countries) - set(az_allowed_countries)) + + origin_countries_codes = meta.get('origin_country', []) + + if any(code in phd_countries for code in origin_countries_codes): + warnings.append( + 'DO NOT upload content from major English speaking countries ' + '(USA, UK, Canada, etc). Upload this to our sister site PrivateHD.to instead.' + ) + + elif any(code in cinemaz_countries for code in origin_countries_codes): + warnings.append( + 'DO NOT upload non-allowed Asian or Western content. ' + 'Upload this content to our sister site CinemaZ.to instead.' + ) + + if not is_disc: + if meta.get('container') not in ['mkv', 'mp4', 'avi']: + warnings.append('Allowed containers: MKV, MP4, AVI.') + + if not is_disc: + if video_codec not in ('avc', 'h.264', 'h.265', 'x264', 'x265', 'hevc', 'divx', 'xvid'): + warnings.append( + f'Video codec not allowed in your upload: {video_codec}.\n' + 'Allowed: H264/x264/AVC, H265/x265/HEVC, DivX/Xvid\n' + 'Exceptions:\n' + ' MPEG2 for Full DVD discs and HDTV recordings\n' + " VC-1/MPEG2 for Bluray only if that's what is on the disc" + ) + + if is_disc: + pass + else: + allowed_keywords = [ + 'AC3', 'Audio Layer III', 'MP3', 'Dolby Digital', 'Dolby TrueHD', + 'DTS', 'DTS-HD', 'FLAC', 'AAC', 'Dolby' + ] + + is_untouched_opus = False + audio_field = meta.get('audio', '') + if isinstance(audio_field, str) and 'opus' in audio_field.lower() and bool(meta.get('untouched', False)): + is_untouched_opus = True + + audio_tracks = [] + media_tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) + for track in media_tracks: + if track.get('@type') == 'Audio': + codec_info = track.get('Format_Commercial_IfAny') or track.get('Format') + codec = codec_info if isinstance(codec_info, str) else '' + audio_tracks.append({ + 'codec': codec, + 'language': track.get('Language', '') + }) + + invalid_codecs = [] + for track in audio_tracks: + codec = track['codec'] + if not codec: + continue + + if 'opus' in codec.lower(): + if is_untouched_opus: + continue + else: + invalid_codecs.append(codec) + continue + + is_allowed = any(kw.lower() in codec.lower() for kw in allowed_keywords) + if not is_allowed: + invalid_codecs.append(codec) + + if invalid_codecs: + unique_invalid_codecs = sorted(list(set(invalid_codecs))) + warnings.append( + f"Unallowed audio codec(s) detected: {', '.join(unique_invalid_codecs)}\n" + f'Allowed codecs: AC3 (Dolby Digital), Dolby TrueHD, DTS, DTS-HD (MA), FLAC, AAC, MP3, etc.\n' + f'Exceptions: Untouched Opus from source; Uncompressed codecs from Blu-ray discs (PCM, LPCM).' + ) + + if warnings: + all_warnings = '\n\n'.join(filter(None, warnings)) + return all_warnings + + return diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 54320cafc..b7dc67cc2 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -1,15 +1,13 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord import asyncio -import requests -from difflib import SequenceMatcher import os import platform -import bencodepy import httpx import re import cli_ui -import glob +import aiofiles from src.trackers.COMMON import COMMON from src.console import console from src.rehostimages import check_hosts @@ -23,18 +21,18 @@ class BHD(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config self.tracker = 'BHD' self.source_flag = 'BHD' self.upload_url = 'https://beyond-hd.me/api/upload/' self.torrent_url = 'https://beyond-hd.me/details/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD', 'Telly', 'AOC', 'WKS', 'SasukeducK'] + self.requests_url = f"https://beyond-hd.me/api/requests/%7Bself.config['TRACKERS']['BHD']['api_key'].strip()}" + self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD', 'Telly', 'AOC', 'WKS', 'SasukeducK', 'CRUCiBLE', 'iFT'] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) + async def check_image_hosts(self, meta): url_host_mapping = { "ibb.co": "imgbb", "ptpimg.me": "ptpimg", @@ -46,6 +44,10 @@ async def upload(self, meta, disctype): approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost', 'bhd', 'bam'] await check_hosts(meta, self.tracker, url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=approved_image_hosts) + return + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) source_id = await self.get_source(meta['source']) @@ -60,29 +62,24 @@ async def upload(self, meta, disctype): else: anon = 1 - if meta['bdinfo'] is not None: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') + mi_dump = None + if meta['is_disc'] == "BDMV": + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as f: + mi_dump = await f.read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as f: + mi_dump = await f.read() + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8') as f: + desc = await f.read() + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + async with aiofiles.open(torrent_file_path, 'rb') as f: + torrent_bytes = await f.read() - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - torrent_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" files = { 'mediainfo': mi_dump, + 'file': ('torrent.torrent', torrent_bytes, 'application/x-bittorrent'), } - open_torrent = None - if os.path.exists(torrent_file): - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files['file'] = open_torrent.read() - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo_file'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': bhd_name, @@ -120,60 +117,50 @@ async def upload(self, meta, disctype): if len(tags) > 0: data['tags'] = ','.join(tags) headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.3 ({platform.system()} {platform.release()})' } url = self.upload_url + self.config['TRACKERS'][self.tracker]['api_key'].strip() details_link = {} if meta['debug'] is False: - response = requests.post(url=url, files=files, data=data, headers=headers) try: - response = response.json() - if int(response['status_code']) == 0: - console.print(f"[red]{response['status_message']}") - if response['status_message'].startswith('Invalid imdb_id'): - console.print('[yellow]RETRYING UPLOAD') - data['imdb_id'] = 1 - response = requests.post(url=url, files=files, data=data, headers=headers) - response = response.json() - elif response['status_message'].startswith('Invalid name value'): - console.print(f"[bold yellow]Submitted Name: {bhd_name}") - - if 'status_message' in response: - match = re.search(r"https://beyond-hd/.me/torrent/download/.*/.(/d+)/", response['status_message']) - if match: - torrent_id = match.group(1) - meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id - details_link = f"https://beyond-hd.me/details/%7Btorrent_id%7D" - else: - console.print("[yellow]No valid details link found in status_message.") - - meta['tracker_status'][self.tracker]['status_message'] = response + async with httpx.AsyncClient(timeout=60) as client: + response = await client.post(url=url, files=files, data=data, headers=headers) + response_json = response.json() + if int(response_json['status_code']) == 0: + console.print(f"[red]{response_json['status_message']}") + if response_json['status_message'].startswith('Invalid imdb_id'): + console.print('[yellow]RETRYING UPLOAD') + data['imdb_id'] = 1 + response = await client.post(url=url, files=files, data=data, headers=headers) + response_json = response.json() + elif response_json['status_message'].startswith('Invalid name value'): + console.print(f"[bold yellow]Submitted Name: {bhd_name}") + + if 'status_message' in response_json: + match = re.search(r"https://beyond-hd/.me/torrent/download/.*/.(/d+)/", response_json['status_message']) + if match: + torrent_id = match.group(1) + meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id + details_link = f"https://beyond-hd.me/details/%7Btorrent_id%7D" + else: + console.print("[yellow]No valid details link found in status_message.") + + meta['tracker_status'][self.tracker]['status_message'] = response.json() except Exception as e: - console.print("It may have uploaded, go check") - console.print(f"Error: {e}") + meta['tracker_status'][self.tracker]['status_message'] = f"Error: {e}" return else: - console.print("[cyan]Request Data:") + console.print("[cyan]BHD Request Data:") console.print(data) meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." if details_link: try: - open_torrent.seek(0) - torrent_data = open_torrent.read() - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - with open(torrent_file, 'wb') as updated_torrent_file: - updated_torrent_file.write(bencodepy.encode(torrent)) - - console.print(f"Torrent file updated with comment: {details_link}") + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), details_link) except Exception as e: console.print(f"Error while editing the torrent file: {e}") - if open_torrent is not None: - open_torrent.close() - async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', @@ -236,35 +223,32 @@ async def get_type(self, meta): return type_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - base = base.replace("[user]", "").replace("[/user]", "") - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: + desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + base_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt" + async with aiofiles.open(base_path, 'r', encoding='utf-8') as f: + base = await f.read() + async with aiofiles.open(desc_path, 'w', encoding='utf-8') as desc: if meta.get('discs', []) != []: discs = meta['discs'] if discs[0]['type'] == "DVD": - desc.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]") - desc.write("\n") + await desc.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]") + await desc.write("\n") if len(discs) >= 2: for each in discs[1:]: if each['type'] == "BDMV": - desc.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]") - desc.write("\n") + await desc.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]") + await desc.write("\n") elif each['type'] == "DVD": - desc.write(f"{each['name']}:\n") - desc.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]") - desc.write("\n") + await desc.write(f"{each['name']}:\n") + await desc.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]") + await desc.write("\n") elif each['type'] == "HDDVD": - desc.write(f"{each['name']}:\n") - desc.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n") - desc.write("\n") - desc.write(base.replace("[img]", "[img width=300]")) - try: - # If screensPerRow is set, use that to determine how many screenshots should be on each row. Otherwise, use 2 as default - screensPerRow = int(self.config['DEFAULT'].get('screens_per_row', 2)) - except Exception: - screensPerRow = 2 + await desc.write(f"{each['name']}:\n") + await desc.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n") + await desc.write("\n") + await desc.write(base.replace("[img]", "[img width=300]")) if meta.get('comparison') and meta.get('comparison_groups'): - desc.write("[center]") + await desc.write("[center]") comparison_groups = meta.get('comparison_groups', {}) sorted_group_indices = sorted(comparison_groups.keys(), key=lambda x: int(x)) @@ -275,7 +259,7 @@ async def edit_desc(self, meta): comp_sources.append(group_name) sources_string = ", ".join(comp_sources) - desc.write(f"[comparison={sources_string}]\n") + await desc.write(f"[comparison={sources_string}]\n") images_per_group = min([ len(comparison_groups[idx].get('urls', [])) @@ -289,28 +273,35 @@ async def edit_desc(self, meta): if img_idx < len(urls): img_url = urls[img_idx].get('raw_url', '') if img_url: - desc.write(f"{img_url}\n") + await desc.write(f"{img_url}\n") - desc.write("[/comparison][/center]\n\n") + await desc.write("[/comparison][/center]\n\n") + try: + if meta.get('tonemapped', False) and self.config['DEFAULT'].get('tonemapped_header', None): + tonemapped_header = self.config['DEFAULT'].get('tonemapped_header') + await desc.write(tonemapped_header) + await desc.write("\n\n") + except Exception as e: + console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") if f'{self.tracker}_images_key' in meta: images = meta[f'{self.tracker}_images_key'] else: images = meta['image_list'] if len(images) > 0: - desc.write("[align=center]") + await desc.write("[align=center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] if (each == len(images) - 1): - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") - elif (each + 1) % screensPerRow == 0: - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]\n") - desc.write("\n") + await desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") + elif (each + 1) % 2 == 0: + await desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]\n") + await desc.write("\n") else: - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url] ") - desc.write("[/align]") - desc.write(self.signature) - desc.close() + await desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url] ") + await desc.write("[/align]") + await desc.write(f"\n[align=right][url=https://github.com/Audionut/Upload-Assistant][size=10]{meta['ua_signature']}[/size][/url][/align]") + await desc.close() return async def search_existing(self, meta, disctype): @@ -320,7 +311,7 @@ async def search_existing(self, meta, disctype): "-ncmt", "-tdd", "-flux", "-crfw", "-sonny", "-zr-", "-mkvultra", "-rpg", "-w4nk3r", "-irobot", "-beyondhd" )): - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): console.print("[bold red]This is an internal BHD release, skipping upload[/bold red]") if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): pass @@ -340,6 +331,33 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "BHD" return [] + if meta['type'] not in ['WEBDL']: + if meta.get('tag', "") and any(x in meta['tag'] for x in ['EVO']): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]Group {meta["tag"]} is only allowed for raw type content at BHD[/bold red]') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + meta['skipping'] = "BHD" + return [] + else: + meta['skipping'] = "BHD" + return [] + + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + if (not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False))): + console.print('[bold red]Porn/xxx is not allowed at BHD.') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + meta['skipping'] = "BHD" + return [] + else: + meta['skipping'] = "BHD" + return [] + dupes = [] category = meta['category'] tmdbID = "movie" if category == 'MOVIE' else "tv" @@ -351,18 +369,20 @@ async def search_existing(self, meta, disctype): type = None else: type = await self.get_type(meta) - data = { 'action': 'search', 'tmdb_id': f"{tmdbID}/{meta['tmdb']}", - 'categories': category, - 'types': type + 'types': type, + 'categories': category } if meta['sd'] == 1: data['categories'] = None data['types'] = None if meta['category'] == 'TV': data['search'] = f"{meta.get('season', '')}" + rss_key = self.config['TRACKERS']['BHD'].get('bhd_rss_key', "") != "" + if rss_key: + data['rsskey'] = self.config['TRACKERS']['BHD']['bhd_rss_key'].strip() url = f"https://beyond-hd.me/api/torrents/%7Bself.config['TRACKERS']['BHD']['api_key'].strip()}" try: @@ -374,25 +394,22 @@ async def search_existing(self, meta, disctype): for each in data['results']: result = { 'name': each['name'], - 'size': each['size'] + 'link': each['url'], + 'size': each['size'], } - difference = SequenceMatcher( - None, - meta['clean_name'].replace('DD+', 'DDP'), - result['name'] - ).ratio() - if difference >= 0.05: - dupes.append(result) + if rss_key: + result['download'] = each.get('download_url', None) + dupes.append(result) else: - console.print(f"[bold red]Failed to search torrents. API Error: {data.get('message', 'Unknown Error')}") + console.print(f"[bold red]BHD failed to search torrents. API Error: {data.get('message', 'Unknown Error')}") else: - console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + console.print(f"[bold red]BHD HTTP request failed. Status: {response.status_code}") except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") + console.print("[bold red]BHD request timed out after 5 seconds") except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") + console.print(f"[bold red]BHD unable to search for existing torrents: {e}") except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") + console.print(f"[bold red]BHD unexpected error: {e}") await asyncio.sleep(5) return dupes diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index ffa3abed3..4e466b0bf 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord import requests @@ -81,7 +82,7 @@ async def upload(self, meta, disctype): 'sdescr': " ", 'descr': media_info if bd_dump is None else "Disc so Check Mediainfo dump ", 'screen': desc, - 'url': f"https://www.tvmaze.com/shows/%7Bmeta['tvmaze_id']}" if meta['category'] == 'TV' else f"https://www.imdb.com/title/tt%7Bmeta['imdb_id']}", + 'url': f"https://www.tvmaze.com/shows/%7Bmeta['tvmaze_id']}" if meta['category'] == 'TV' else str(meta.get('imdb_info', {}).get('imdb_url', '')), 'format': 'json' } diff --git a/src/trackers/BJS.py b/src/trackers/BJS.py new file mode 100644 index 000000000..ee6970928 --- /dev/null +++ b/src/trackers/BJS.py @@ -0,0 +1,1247 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import aiofiles +import asyncio +import httpx +import json +import langcodes +import os +import platform +import pycountry +import re +import unicodedata +from bs4 import BeautifulSoup +from datetime import datetime +from langcodes.tag_parser import LanguageTagError +from pathlib import Path +from src.bbcode import BBCODE +from src.console import console +from src.cookie_auth import CookieValidator, CookieAuthUploader +from src.get_desc import DescriptionBuilder +from src.languages import process_desc_language +from src.tmdb import get_tmdb_localized_data +from src.trackers.COMMON import COMMON +from tqdm import tqdm +from typing import Optional +from urllib.parse import urlparse + + +class BJS: + def __init__(self, config): + self.config = config + self.common = COMMON(config) + self.cookie_validator = CookieValidator(config) + self.cookie_auth_uploader = CookieAuthUploader(config) + self.tracker = 'BJS' + self.banned_groups = [] + self.source_flag = 'BJ' + self.base_url = 'https://bj-share.info/' + self.torrent_url = 'https://bj-share.info/torrents.php?torrentid=' + self.requests_url = f'{self.base_url}/requests.php?' + self.auth_token = None + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f'Upload Assistant ({platform.system()} {platform.release()})' + }, timeout=60.0) + + async def get_additional_checks(self, meta): + should_continue = True + + # Stops uploading when an external subtitle is detected + video_path = meta.get('path') + directory = video_path if os.path.isdir(video_path) else os.path.dirname(video_path) + subtitle_extensions = ('.srt', '.sub', '.ass', '.ssa', '.idx', '.smi', '.psb') + + if any(f.lower().endswith(subtitle_extensions) for f in os.listdir(directory)): + console.print(f'{self.tracker}: [bold red]ERRO: Esta ferramenta não suporta o upload de legendas em arquivos separados.[/bold red]') + return False + + return should_continue + + async def validate_credentials(self, meta): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/upload.php', + error_text='login.php', + token_pattern=r'name="auth" value="([^"]+)"' + ) + + async def load_localized_data(self, meta): + localized_data_file = f'{meta["base_dir"]}/tmp/{meta["uuid"]}/tmdb_localized_data.json' + main_ptbr_data = {} + episode_ptbr_data = {} + data = {} + + if os.path.isfile(localized_data_file): + try: + async with aiofiles.open(localized_data_file, 'r', encoding='utf-8') as f: + content = await f.read() + data = json.loads(content) + except json.JSONDecodeError: + print(f'Warning: Could not decode JSON from {localized_data_file}') + data = {} + except Exception as e: + print(f'Error reading file {localized_data_file}: {e}') + data = {} + + main_ptbr_data = data.get('pt-BR', {}).get('main') + + if not main_ptbr_data: + main_ptbr_data = await get_tmdb_localized_data( + meta, + data_type='main', + language='pt-BR', + append_to_response='credits,videos,content_ratings' + ) + + if self.config['DEFAULT']['episode_overview']: + if meta['category'] == 'TV' and not meta.get('tv_pack'): + episode_ptbr_data = data.get('pt-BR', {}).get('episode') + if not episode_ptbr_data: + episode_ptbr_data = await get_tmdb_localized_data( + meta, + data_type='episode', + language='pt-BR', + append_to_response='' + ) + + self.main_tmdb_data = main_ptbr_data or {} + self.episode_tmdb_data = episode_ptbr_data or {} + + return + + def get_container(self, meta): + container = meta.get('container', '') + if container in ['mkv', 'mp4', 'avi', 'vob', 'm2ts', 'ts']: + return container.upper() + + return 'Outro' + + def get_type(self, meta): + if meta.get('anime'): + return '13' + + category_map = { + 'TV': '1', + 'MOVIE': '0' + } + + return category_map.get(meta['category']) + + async def get_languages(self, meta): + possible_languages = { + 'Alemão', 'Árabe', 'Argelino', 'Búlgaro', 'Cantonês', 'Chinês', + 'Coreano', 'Croata', 'Dinamarquês', 'Egípcio', 'Espanhol', 'Estoniano', + 'Filipino', 'Finlandês', 'Francês', 'Grego', 'Hebraico', 'Hindi', + 'Holandês', 'Húngaro', 'Indonésio', 'Inglês', 'Islandês', 'Italiano', + 'Japonês', 'Macedônio', 'Malaio', 'Marati', 'Nigeriano', 'Norueguês', + 'Persa', 'Polaco', 'Polonês', 'Português', 'Português (pt)', 'Romeno', + 'Russo', 'Sueco', 'Tailandês', 'Tamil', 'Tcheco', 'Telugo', 'Turco', + 'Ucraniano', 'Urdu', 'Vietnamita', 'Zulu', 'Outro' + } + lang_code = self.main_tmdb_data.get('original_language') + origin_countries = self.main_tmdb_data.get('origin_country', []) + + if not lang_code: + return 'Outro' + + language_name = None + + if lang_code == 'pt': + if 'PT' in origin_countries: + language_name = 'Português (pt)' + else: + language_name = 'Português' + else: + try: + language_name = langcodes.Language.make(lang_code).display_name('pt').capitalize() + except LanguageTagError: + language_name = lang_code + + if language_name in possible_languages: + return language_name + else: + return 'Outro' + + async def get_audio(self, meta): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + + audio_languages = set(meta.get('audio_languages', [])) + + portuguese_languages = ['Portuguese', 'Português', 'pt'] + + has_pt_audio = any(lang in portuguese_languages for lang in audio_languages) + + original_lang = meta.get('original_language', '').lower() + is_original_pt = original_lang in portuguese_languages + + if has_pt_audio: + if is_original_pt: + return 'Nacional' + elif len(audio_languages) > 1: + return 'Dual Áudio' + else: + return 'Dublado' + + return 'Legendado' + + async def get_subtitle(self, meta): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + found_language_strings = meta.get('subtitle_languages', []) + + subtitle_type = 'Nenhuma' + + if 'Portuguese' in found_language_strings: + subtitle_type = 'Embutida' + + return subtitle_type + + def get_resolution(self, meta): + if meta.get('is_disc') == 'BDMV': + resolution_str = meta.get('resolution', '') + try: + height_num = int(resolution_str.lower().replace('p', '').replace('i', '')) + height = str(height_num) + + width_num = round((16 / 9) * height_num) + width = str(width_num) + except (ValueError, TypeError): + pass + + else: + video_mi = meta['mediainfo']['media']['track'][1] + width = video_mi['Width'] + height = video_mi['Height'] + + return { + 'width': width, + 'height': height + } + + def get_video_codec(self, meta): + CODEC_MAP = { + 'x265': 'x265', + 'h.265': 'H.265', + 'x264': 'x264', + 'h.264': 'H.264', + 'av1': 'AV1', + 'divx': 'DivX', + 'h.263': 'H.263', + 'kvcd': 'KVCD', + 'mpeg-1': 'MPEG-1', + 'mpeg-2': 'MPEG-2', + 'realvideo': 'RealVideo', + 'vc-1': 'VC-1', + 'vp6': 'VP6', + 'vp8': 'VP8', + 'vp9': 'VP9', + 'windows media video': 'Windows Media Video', + 'xvid': 'XviD', + 'hevc': 'H.265', + 'avc': 'H.264', + } + + video_encode = meta.get('video_encode', '').lower() + video_codec = meta.get('video_codec', '') + + search_text = f'{video_encode} {video_codec.lower()}' + + for key, value in CODEC_MAP.items(): + if key in search_text: + return value + + return video_codec if video_codec else 'Outro' + + def get_audio_codec(self, meta): + priority_order = [ + 'DTS-X', 'E-AC-3 JOC', 'TrueHD', 'DTS-HD', 'LPCM', 'PCM', 'FLAC', + 'DTS-ES', 'DTS', 'E-AC-3', 'AC3', 'AAC', 'Opus', 'Vorbis', 'MP3', 'MP2' + ] + + codec_map = { + 'DTS-X': ['DTS:X', 'DTS-X'], + 'E-AC-3 JOC': ['E-AC-3 JOC', 'DD+ JOC'], + 'TrueHD': ['TRUEHD'], + 'DTS-HD': ['DTS-HD', 'DTSHD'], + 'LPCM': ['LPCM'], + 'PCM': ['PCM'], + 'FLAC': ['FLAC'], + 'DTS-ES': ['DTS-ES'], + 'DTS': ['DTS'], + 'E-AC-3': ['E-AC-3', 'DD+'], + 'AC3': ['AC3', 'DD'], + 'AAC': ['AAC'], + 'Opus': ['OPUS'], + 'Vorbis': ['VORBIS'], + 'MP2': ['MP2'], + 'MP3': ['MP3'] + } + + audio_description = meta.get('audio') + + if not audio_description or not isinstance(audio_description, str): + return 'Outro' + + audio_upper = audio_description.upper() + + for codec_name in priority_order: + search_terms = codec_map.get(codec_name, []) + + for term in search_terms: + if term.upper() in audio_upper: + return codec_name + + return 'Outro' + + async def get_title(self, meta): + title = self.main_tmdb_data.get('name') or self.main_tmdb_data.get('title') or '' + + return title if title and title != meta.get('title') else '' + + async def build_description(self, meta): + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo_resize_url = meta.get('tmdb_logo', '') + if logo_resize_url: + desc_parts.append(f"[align=center][img]https://image.tmdb.org/t/p/w300/{logo_resize_url}[/img][/align]") + + # TV + title = self.episode_tmdb_data.get('name', '') + episode_image = self.episode_tmdb_data.get('still_path', '') + episode_overview = self.episode_tmdb_data.get('overview', '') + + if episode_overview: + desc_parts.append(f'[align=center]{title}[/align]') + + if episode_image: + desc_parts.append(f"[align=center][img]https://image.tmdb.org/t/p/w300{episode_image}[/img][/align]") + + desc_parts.append(f'[align=center]{episode_overview}[/align]') + + # File information + if meta.get('is_disc', '') == 'DVD': + desc_parts.append(f'[hide=DVD MediaInfo][pre]{await builder.get_mediainfo_section(meta, self.tracker)}[/pre][/hide]') + + bd_info = await builder.get_bdinfo_section(meta) + if bd_info: + desc_parts.append(f'[hide=BDInfo][pre]{bd_info}[/pre][/hide]') + + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Signature + desc_parts.append(f"[align=center][url=https://github.com/Audionut/Upload-Assistant]Upload realizado via {meta['ua_name']} {meta['current_version']}[/url][/align]") + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + bbcode = BBCODE() + description = bbcode.convert_named_spoiler_to_named_hide(description) + description = bbcode.convert_spoiler_to_hide(description) + description = bbcode.remove_img_resize(description) + description = bbcode.convert_to_align(description) + description = bbcode.remove_list(description) + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description + + async def get_trailer(self, meta): + video_results = self.main_tmdb_data.get('videos', {}).get('results', []) + youtube_code = video_results[-1].get('key', '') if video_results else '' + if youtube_code: + youtube = f'http://www.youtube.com/watch?v={youtube_code}' + else: + youtube = meta.get('youtube') or '' + + return youtube + + async def get_rating(self, meta): + ratings = self.main_tmdb_data.get('content_ratings', {}).get('results', []) + + if not ratings: + return '' + + VALID_BR_RATINGS = {'L', '10', '12', '14', '16', '18'} + + br_rating = '' + us_rating = '' + + for item in ratings: + if item.get('iso_3166_1') == 'BR' and item.get('rating') in VALID_BR_RATINGS: + br_rating = item['rating'] + if br_rating == 'L': + br_rating = 'Livre' + else: + br_rating = f'{br_rating} anos' + break + + # Use US rating as fallback + if item.get('iso_3166_1') == 'US' and not us_rating: + us_rating = item.get('rating', '') + + return br_rating or us_rating or '' + + async def get_tags(self, meta): + tags = '' + + if self.main_tmdb_data and isinstance(self.main_tmdb_data.get('genres'), list): + genre_names = [ + g.get('name', '') for g in self.main_tmdb_data['genres'] + if isinstance(g.get('name'), str) and g.get('name').strip() + ] + + if genre_names: + tags = ', '.join( + unicodedata.normalize('NFKD', name) + .encode('ASCII', 'ignore') + .decode('utf-8') + .replace(' ', '.') + .lower() + for name in genre_names + ) + + if not tags: + tags = await self.common.async_input(prompt=f'Digite os gêneros (no formato do {self.tracker}): ') + + return tags + + def _extract_upload_params(self, meta): + is_tv_pack = bool(meta.get('tv_pack')) + upload_season_num = None + upload_episode_num = None + upload_resolution = meta.get('resolution') + + if meta['category'] == 'TV': + season_match = meta.get('season', '').replace('S', '') + if season_match: + upload_season_num = season_match + + if not is_tv_pack: + episode_match = meta.get('episode', '').replace('E', '') + if episode_match: + upload_episode_num = episode_match + + return { + 'is_tv_pack': is_tv_pack, + 'upload_season_num': upload_season_num, + 'upload_episode_num': upload_episode_num, + 'upload_resolution': upload_resolution + } + + def _check_episode_on_page(self, torrent_table, upload_season_num, upload_episode_num): + if not upload_season_num or not upload_episode_num: + return False + + temp_season_on_page = None + upload_episode_str = f'E{upload_episode_num}' + + for row in torrent_table.find_all('tr'): + if 'season_header' in row.get('class', []): + s_match = re.search(r'Temporada (\d+)', row.get_text(strip=True)) + if s_match: + temp_season_on_page = s_match.group(1) + continue + + if (temp_season_on_page == upload_season_num and row.get('id', '').startswith('torrent')): + link = row.find('a', onclick=re.compile(r'loadIfNeeded\(')) + if (link and re.search(r'\b' + re.escape(upload_episode_str) + r'\b', link.get_text(strip=True))): + return True + return False + + def _should_process_torrent(self, row, current_season, current_resolution, params, episode_found_on_page, meta): + description_text = ' '.join(row.find('a', onclick=re.compile(r'loadIfNeeded\(')).get_text(strip=True).split()) + + # TV Logic + if meta['category'] == 'TV': + if current_season == params['upload_season_num']: + existing_episode_match = re.search(r'E(\d+)', description_text) + is_current_row_a_pack = not existing_episode_match + + if params['is_tv_pack']: + return is_current_row_a_pack, False + else: + if episode_found_on_page: + if existing_episode_match: + existing_episode_num = existing_episode_match.group(1) + return existing_episode_num == params['upload_episode_num'], False + else: + return is_current_row_a_pack, True + + # Movie Logic + elif meta['category'] == 'MOVIE': + if params['upload_resolution'] and current_resolution == params['upload_resolution']: + return True, False + + return False, False + + def _extract_torrent_ids(self, rows_to_process): + ajax_tasks = [] + + for row, process_folder_name in rows_to_process: + id_link = row.find('a', onclick=re.compile(r'loadIfNeeded\(')) + if not id_link: + continue + + onclick_attr = id_link['onclick'] + id_match = re.search(r"loadIfNeeded\('(\d+)',\s*'(\d+)'", onclick_attr) + if not id_match: + continue + + torrent_id = id_match.group(1) + group_id = id_match.group(2) + description_text = ' '.join(id_link.get_text(strip=True).split()) + torrent_link = f'{self.torrent_url}{torrent_id}' + + size_tag = row.find('td', class_='number_column nobr') + torrent_size_str = size_tag.get_text(strip=True) if size_tag else None + + ajax_tasks.append({ + 'torrent_id': torrent_id, + 'group_id': group_id, + 'description_text': description_text, + 'process_folder_name': process_folder_name, + 'size': torrent_size_str, + 'link': torrent_link + }) + + return ajax_tasks + + async def _fetch_torrent_content(self, task_info): + torrent_id = task_info['torrent_id'] + group_id = task_info['group_id'] + ajax_url = f'{self.base_url}/ajax.php?action=torrent_content&torrentid={torrent_id}&groupid={group_id}' + + try: + ajax_response = await self.session.get(ajax_url) + ajax_response.raise_for_status() + ajax_soup = BeautifulSoup(ajax_response.text, 'html.parser') + + return { + 'success': True, + 'soup': ajax_soup, + 'task_info': task_info + } + except Exception as e: + console.print(f'[yellow]Não foi possível buscar a lista de arquivos para o torrent {torrent_id}: {e}[/yellow]') + return { + 'success': False, + 'error': e, + 'task_info': task_info + } + + def _extract_item_name(self, ajax_soup, description_text, is_tv_pack, process_folder_name): + item_name = None + is_existing_torrent_a_disc = any( + keyword in description_text.lower() + for keyword in ['bd25', 'bd50', 'bd66', 'bd100', 'dvd5', 'dvd9', 'm2ts'] + ) + + if is_existing_torrent_a_disc or is_tv_pack or process_folder_name: + path_div = ajax_soup.find('div', class_='filelist_path') + if path_div and path_div.get_text(strip=True): + item_name = path_div.get_text(strip=True).strip('/') + else: + file_table = ajax_soup.find('table', class_='filelist_table') + if file_table: + first_file_row = file_table.find('tr', class_=lambda x: x != 'colhead_dark') + if first_file_row and first_file_row.find('td'): + item_name = first_file_row.find('td').get_text(strip=True) + else: + file_table = ajax_soup.find('table', class_='filelist_table') + if file_table: + first_row = file_table.find('tr', class_=lambda x: x != 'colhead_dark') + if first_row and first_row.find('td'): + item_name = first_row.find('td').get_text(strip=True) + + return item_name + + async def _process_ajax_responses(self, ajax_tasks, params): + if not ajax_tasks: + return [] + + ajax_results = await asyncio.gather( + *[self._fetch_torrent_content(task) for task in ajax_tasks], + return_exceptions=True + ) + + found_items = [] + for result in ajax_results: + if isinstance(result, Exception): + console.print(f'[yellow]Erro na chamada AJAX: {result}[/yellow]') + continue + + if not result['success']: + continue + + task_info = result['task_info'] + item_name = self._extract_item_name( + result['soup'], + task_info['description_text'], + params['is_tv_pack'], + task_info['process_folder_name'] + ) + + if item_name: + found_items.append({ + 'name': item_name, + 'size': task_info.get('size', ''), + 'link': task_info.get('link', '') + }) + + return found_items + + async def _fetch_search_page(self, meta): + search_url = f"{self.base_url}/torrents.php?searchstr={meta['imdb_info']['imdbID']}" + + response = await self.session.get(search_url) + if response.status_code in [301, 302, 307] and 'Location' in response.headers: + redirect_url = f"{self.base_url}/{response.headers['Location']}" + response = await self.session.get(redirect_url) + + return BeautifulSoup(response.text, 'html.parser') + + async def search_existing(self, meta, disctype): + should_continue = await self.get_additional_checks(meta) + if not should_continue: + meta['skipping'] = f'{self.tracker}' + return + + try: + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + + BJS.already_has_the_info = False + params = self._extract_upload_params(meta) + + soup = await self._fetch_search_page(meta) + torrent_details_table = soup.find('div', class_='main_column') + + if not torrent_details_table: + return [] + + episode_found_on_page = False + if (meta['category'] == 'TV' and not params['is_tv_pack'] and params['upload_season_num'] and params['upload_episode_num']): + episode_found_on_page = self._check_episode_on_page( + torrent_details_table, + params['upload_season_num'], + params['upload_episode_num'] + ) + + rows_to_process = [] + current_season_on_page = None + current_resolution_on_page = None + + for row in torrent_details_table.find_all('tr'): + if 'resolution_header' in row.get('class', []): + header_text = row.get_text(strip=True) + resolution_match = re.search(r'(\d{3,4}p)', header_text) + if resolution_match: + current_resolution_on_page = resolution_match.group(1) + continue + + if 'season_header' in row.get('class', []): + season_header_text = row.get_text(strip=True) + season_match = re.search(r'Temporada (\d+)', season_header_text) + if season_match: + current_season_on_page = season_match.group(1) + continue + + if not row.get('id', '').startswith('torrent'): + continue + + id_link = row.find('a', onclick=re.compile(r'loadIfNeeded\(')) + if not id_link: + continue + + should_process, process_folder_name = self._should_process_torrent( + row, current_season_on_page, current_resolution_on_page, + params, episode_found_on_page, meta + ) + + if should_process: + rows_to_process.append((row, process_folder_name)) + + ajax_tasks = self._extract_torrent_ids(rows_to_process) + found_items = await self._process_ajax_responses(ajax_tasks, params) + BJS.already_has_the_info = bool(found_items) + + return found_items + + except Exception as e: + console.print(f'[bold red]Ocorreu um erro inesperado ao processar a busca: {e}[/bold red]') + import traceback + traceback.print_exc() + return [] + + def get_edition(self, meta): + edition_str = meta.get('edition', '').lower() + if not edition_str: + return '' + + edition_map = { + "director's cut": "Director's Cut", + 'extended': 'Extended Edition', + 'imax': 'IMAX', + 'open matte': 'Open Matte', + 'noir': 'Noir Edition', + 'theatrical': 'Theatrical Cut', + 'uncut': 'Uncut', + 'unrated': 'Unrated', + 'uncensored': 'Uncensored', + } + + for keyword, label in edition_map.items(): + if keyword in edition_str: + return label + + return '' + + def get_bitrate(self, meta): + if meta.get('type') == 'DISC': + is_disc_type = meta.get('is_disc') + + if is_disc_type == 'BDMV': + disctype = meta.get('disctype') + if disctype in ['BD100', 'BD66', 'BD50', 'BD25']: + return disctype + + try: + size_in_gb = meta['bdinfo']['size'] + except (KeyError, IndexError, TypeError): + size_in_gb = 0 + + if size_in_gb > 66: + return 'BD100' + elif size_in_gb > 50: + return 'BD66' + elif size_in_gb > 25: + return 'BD50' + else: + return 'BD25' + + elif is_disc_type == 'DVD': + dvd_size = meta.get('dvd_size') + if dvd_size in ['DVD9', 'DVD5']: + return dvd_size + return 'DVD9' + + source_type = meta.get('type') + + if not source_type or not isinstance(source_type, str): + return 'Outro' + + keyword_map = { + 'webdl': 'WEB-DL', + 'webrip': 'WEBRip', + 'web': 'WEB', + 'remux': 'Blu-ray', + 'encode': 'Blu-ray', + 'bdrip': 'BDRip', + 'brrip': 'BRRip', + 'hdtv': 'HDTV', + 'sdtv': 'SDTV', + 'dvdrip': 'DVDRip', + 'hd-dvd': 'HD DVD', + 'dvdscr': 'DVDScr', + 'hdrip': 'HDRip', + 'hdtc': 'HDTC', + 'hdtv': 'HDTV', + 'pdtv': 'PDTV', + 'sdtv': 'SDTV', + 'tc': 'TC', + 'uhdtv': 'UHDTV', + 'vhsrip': 'VHSRip', + 'tvrip': 'TVRip', + } + + return keyword_map.get(source_type.lower(), 'Outro') + + async def img_host(self, image_bytes: bytes, filename: str) -> Optional[str]: + upload_url = f'{self.base_url}/ajax.php?action=screen_up' + headers = { + 'Referer': f'{self.base_url}/upload.php', + 'X-Requested-With': 'XMLHttpRequest', + 'Accept': 'application/json', + } + files = {'file': (filename, image_bytes, 'image/png')} + + try: + response = await self.session.post( + upload_url, headers=headers, files=files, timeout=120 + ) + response.raise_for_status() + data = response.json() + return data.get('url', '').replace('\\/', '/') + except Exception as e: + print(f'Exceção no upload de {filename}: {e}') + return None + + async def get_cover(self, meta): + cover_path = self.main_tmdb_data.get('poster_path') or meta.get('tmdb_poster') + if not cover_path: + print('Nenhum poster_path encontrado nos dados do TMDB.') + return None + + cover_tmdb_url = f'https://image.tmdb.org/t/p/w500%7Bcover_path%7D' + if BJS.already_has_the_info: + return cover_tmdb_url + + try: + response = await self.session.get(cover_tmdb_url, timeout=120) + response.raise_for_status() + image_bytes = response.content + filename = os.path.basename(cover_path) + + return await self.img_host(image_bytes, filename) + except Exception as e: + print(f'Falha ao processar pôster da URL {cover_tmdb_url}: {e}') + return None + + async def get_screenshots(self, meta): + screenshot_dir = Path(meta["base_dir"]) / "tmp" / meta["uuid"] + local_files = sorted(screenshot_dir.glob("*.png")) + + disc_menu_links = [img.get("raw_url") for img in meta.get("menu_images", []) if img.get("raw_url")][ + :3 + ] + + async def upload_local_file(path): + with open(path, "rb") as f: + image_bytes = f.read() + return await self.img_host(image_bytes, os.path.basename(path)) + + async def upload_remote_file(url): + try: + response = await self.session.get(url, timeout=120) + response.raise_for_status() + image_bytes = response.content + filename = os.path.basename(urlparse(url).path) or "screenshot.png" + return await self.img_host(image_bytes, filename) + except Exception as e: + print(f"Failed to process screenshot from URL {url}: {e}") + return None + + results = [] + + # Upload menu images + for url in disc_menu_links: + result = await upload_remote_file(url) + if result: + results.append(result) + + # Use existing files + if local_files: + paths = local_files[: 6 - len(results)] + + for coro in tqdm( + asyncio.as_completed([upload_local_file(p) for p in paths]), + total=len(paths), + desc=f"Uploading screenshots to {self.tracker}", + ): + result = await coro + if result: + results.append(result) + + else: + image_links = [img.get("raw_url") for img in meta.get("image_list", []) if img.get("raw_url")][ + : 6 - len(results) + ] + + for coro in tqdm( + asyncio.as_completed([upload_remote_file(url) for url in image_links]), + total=len(image_links), + desc=f"Uploading screenshots to {self.tracker}", + ): + result = await coro + if result: + results.append(result) + + return results + + def get_runtime(self, meta): + try: + minutes_in_total = int(meta.get('runtime')) + if minutes_in_total < 0: + return 0, 0 + except (ValueError, TypeError): + return 0, 0 + + hours, minutes = divmod(minutes_in_total, 60) + return { + 'hours': hours, + 'minutes': minutes + } + + def get_release_date(self, tmdb_data): + raw_date_string = tmdb_data.get('first_air_date') or tmdb_data.get('release_date') + + if not raw_date_string: + return '' + + try: + date_object = datetime.strptime(raw_date_string, '%Y-%m-%d') + formatted_date = date_object.strftime('%d %b %Y') + + return formatted_date + + except ValueError: + return '' + + def find_remaster_tags(self, meta): + found_tags = set() + + edition = self.get_edition(meta) + if edition: + found_tags.add(edition) + + audio_string = meta.get('audio', '') + if 'Atmos' in audio_string: + found_tags.add('Dolby Atmos') + + is_10_bit = False + if meta.get('is_disc') == 'BDMV': + try: + bit_depth_str = meta['discs'][0]['bdinfo']['video'][0]['bit_depth'] + if '10' in bit_depth_str: + is_10_bit = True + except (KeyError, IndexError, TypeError): + pass + else: + if str(meta.get('bit_depth')) == '10': + is_10_bit = True + + if is_10_bit: + found_tags.add('10-bit') + + hdr_string = meta.get('hdr', '').upper() + if 'DV' in hdr_string: + found_tags.add('Dolby Vision') + if 'HDR10+' in hdr_string: + found_tags.add('HDR10+') + if 'HDR' in hdr_string and 'HDR10+' not in hdr_string: + found_tags.add('HDR10') + + if meta.get('type') == 'REMUX': + found_tags.add('Remux') + if meta.get('extras'): + found_tags.add('Com extras') + if meta.get('has_commentary', False) or meta.get('manual_commentary', False): + found_tags.add('Com comentários') + + return found_tags + + def build_remaster_title(self, meta): + tag_priority = [ + 'Dolby Atmos', + 'Remux', + "Director's Cut", + 'Extended Edition', + 'IMAX', + 'Open Matte', + 'Noir Edition', + 'Theatrical Cut', + 'Uncut', + 'Unrated', + 'Uncensored', + '10-bit', + 'Dolby Vision', + 'HDR10+', + 'HDR10', + 'Com extras', + 'Com comentários' + ] + available_tags = self.find_remaster_tags(meta) + + ordered_tags = [] + for tag in tag_priority: + if tag in available_tags: + ordered_tags.append(tag) + + return ' / '.join(ordered_tags) + + async def get_credits(self, meta, role): + if BJS.already_has_the_info: + return 'N/A' + + role_map = { + 'director': ('directors', 'tmdb_directors'), + 'creator': ('creators', 'tmdb_creators'), + 'cast': ('stars', 'tmdb_cast'), + } + + prompt_name_map = { + 'director': 'Diretor(es)', + 'creator': 'Criador(es)', + 'cast': 'Elenco', + } + + if role in role_map: + imdb_key, tmdb_key = role_map[role] + + names = (meta.get('imdb_info', {}).get(imdb_key) or []) + (meta.get(tmdb_key) or []) + + unique_names = list(dict.fromkeys(names))[:5] + if unique_names: + return ', '.join(unique_names) + + else: + role_display_name = prompt_name_map.get(role, role.capitalize()) + prompt_message = ( + f'{role_display_name} não encontrado(s).\n' + 'Por favor, insira manualmente (separados por vírgula): ' + ) + user_input = await self.common.async_input(prompt=prompt_message) + + if user_input.strip(): + return user_input.strip() + else: + return 'skipped' + + async def get_requests(self, meta): + if not self.config['DEFAULT'].get('search_requests', False) and not meta.get('search_requests', False): + return False + else: + try: + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + cat = meta['category'] + if cat == 'TV': + cat = 2 + if cat == 'MOVIE': + cat = 1 + if meta.get('anime'): + cat = 14 + + query = meta['title'] + + search_url = f'{self.requests_url}submit=true&search={query}&showall=on&filter_cat[{cat}]=1' + + response = await self.session.get(search_url) + response.raise_for_status() + response_results_text = response.text + + soup = BeautifulSoup(response_results_text, 'html.parser') + + request_rows = soup.select('#torrent_table tr.torrent') + + results = [] + for row in request_rows: + all_tds = row.find_all('td') + if not all_tds or len(all_tds) < 5: + continue + + info_cell = all_tds[1] + + link_element = info_cell.select_one('a[href*="requests.php?action=view"]') + quality_element = info_cell.select_one('b') + + if not link_element or not quality_element: + continue + + name = link_element.text.strip() + quality = quality_element.text.strip() + link = link_element.get('href') + + reward_td = all_tds[3] + reward_parts = [td.text.replace('\xa0', ' ').strip() for td in reward_td.select('tr > td:first-child')] + reward = ' / '.join(reward_parts) + + results.append({ + 'Name': name, + 'Quality': quality, + 'Reward': reward, + 'Link': link, + }) + + if results: + message = f'\n{self.tracker}: [bold yellow]Seu upload pode atender o(s) seguinte(s) pedido(s), confira:[/bold yellow]\n\n' + for r in results: + message += f"[bold green]Nome:[/bold green] {r['Name']}\n" + message += f"[bold green]Qualidade:[/bold green] {r['Quality']}\n" + message += f"[bold green]Recompensa:[/bold green] {r['Reward']}\n" + message += f"[bold green]Link:[/bold green] {self.base_url}/{r['Link']}\n\n" + console.print(message) + + return results + + except Exception as e: + console.print(f'[bold red]Ocorreu um erro ao buscar pedido(s) no {self.tracker}: {e}[/bold red]') + import traceback + console.print(traceback.format_exc()) + return [] + + async def get_data(self, meta): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + await self.load_localized_data(meta) + category = meta['category'] + + data = {} + + # These fields are common across all upload types + data.update({ + 'audio': await self.get_audio(meta), + 'auth': BJS.secret_token, + 'codecaudio': self.get_audio_codec(meta), + 'codecvideo': self.get_video_codec(meta), + 'duracaoHR': self.get_runtime(meta).get('hours'), + 'duracaoMIN': self.get_runtime(meta).get('minutes'), + 'duracaotipo': 'selectbox', + 'fichatecnica': await self.build_description(meta), + 'formato': self.get_container(meta), + 'idioma': await self.get_languages(meta), + 'imdblink': meta['imdb_info']['imdbID'], + 'qualidade': self.get_bitrate(meta), + 'release': meta.get('service_longname', ''), + 'remaster_title': self.build_remaster_title(meta), + 'resolucaoh': self.get_resolution(meta).get('height'), + 'resolucaow': self.get_resolution(meta).get('width'), + 'sinopse': await self.get_overview(), + 'submit': 'true', + 'tags': await self.get_tags(meta), + 'tipolegenda': await self.get_subtitle(meta), + 'title': meta['title'], + 'titulobrasileiro': await self.get_title(meta), + 'traileryoutube': await self.get_trailer(meta), + 'type': self.get_type(meta), + 'year': f"{meta['year']}-{meta['imdb_info']['end_year']}" if meta.get('imdb_info').get('end_year') else meta['year'], + }) + + # These fields are common in movies and TV shows, even if it's anime + if category == 'MOVIE': + data.update({ + 'adulto': '2', + 'diretor': await self.get_credits(meta, 'director'), + }) + + if category == 'TV': + data.update({ + 'diretor': await self.get_credits(meta, 'creator'), + 'tipo': 'episode' if meta.get('tv_pack') == 0 else 'season', + 'season': meta.get('season_int', ''), + 'episode': meta.get('episode_int', ''), + }) + + # These fields are common in movies and TV shows, if not Anime + if not meta.get('anime'): + data.update({ + 'validimdb': 'yes', + 'imdbrating': str(meta.get('imdb_info', {}).get('rating', '')), + 'elenco': await self.get_credits(meta, 'cast'), + }) + if category == 'MOVIE': + data.update({ + 'datalancamento': self.get_release_date(self.main_tmdb_data), + }) + + if category == 'TV': + # Convert country code to name + country_list = [ + country.name + for code in self.main_tmdb_data.get('origin_country', []) + if (country := pycountry.countries.get(alpha_2=code)) + ] + data.update({ + 'network': ', '.join([p.get('name', '') for p in self.main_tmdb_data.get('networks', [])]) or '', # Optional + 'numtemporadas': self.main_tmdb_data.get('number_of_seasons', ''), # Optional + 'datalancamento': self.get_release_date(self.main_tmdb_data), + 'pais': ', '.join(country_list), # Optional + 'diretorserie': ', '.join(set(meta.get('tmdb_directors', []) or meta.get('imdb_info', {}).get('directors', [])[:5])), # Optional + 'avaliacao': await self.get_rating(meta), # Optional + }) + + # Anime-specific data + if meta.get('anime'): + if category == 'MOVIE': + data.update({ + 'tipo': 'movie', + }) + if category == 'TV': + data.update({ + 'adulto': '2', + }) + + # Anon + anon = not (meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False)) + if anon: + data.update({ + 'anonymous': 'on' + }) + if self.config['TRACKERS'][self.tracker].get('show_group_if_anon', False): + data.update({ + 'anonymousshowgroup': 'on' + }) + + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != '' and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data.update({ + 'internalrel': 1, + }) + + # Only upload images if not debugging + if not meta.get('debug', False): + data.update({ + 'image': await self.get_cover(meta), + 'screenshots[]': await self.get_screenshots(meta), + }) + + return data + + async def get_overview(self): + overview = self.main_tmdb_data.get('overview', '') + if not overview: + if not BJS.already_has_the_info: + console.print( + "[bold red]Sinopse não encontrada no TMDB. Por favor, insira manualmente.[/bold red]" + ) + overview = await self.common.async_input( + prompt='[green]Digite a sinopse:[/green]' + ) + else: + return 'N/A' + return overview + + async def check_data(self, meta, data): + if not meta.get('debug', False): + if len(data['screenshots[]']) < 2: + return 'The number of successful screenshots uploaded is less than 2.' + if any(value == 'skipped' for value in ( + data.get('diretor'), + data.get('elenco'), + data.get('creators') + )): + return 'Missing required credits information (director/cast/creator).' + return False + + async def upload(self, meta, disctype): + data = await self.get_data(meta) + + issue = await self.check_data(meta, data) + if issue: + meta["tracker_status"][self.tracker]["status_message"] = f'data error - {issue}' + else: + await self.cookie_auth_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + source_flag=self.source_flag, + torrent_url=self.torrent_url, + data=data, + torrent_field_name='file_input', + upload_cookies=self.session.cookies, + upload_url=f"{self.base_url}/upload.php", + id_pattern=r'torrentid=(\d+)', + success_text="action=download&id=", + ) + + return diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 5ca4a8389..588e1932a 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -1,175 +1,136 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import os -import glob -import httpx +import cli_ui -from src.trackers.COMMON import COMMON from src.console import console +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class BLU(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class BLU(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='BLU') self.config = config + self.common = COMMON(config) self.tracker = 'BLU' self.source_flag = 'BLU' - self.search_url = 'https://blutopia.cc/api/torrents/filter' - self.torrent_url = 'https://blutopia.cc/torrents/' - self.id_url = 'https://blutopia.cc/api/torrents/' - self.upload_url = 'https://blutopia.cc/api/torrents/upload' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionuts Upload Assistant[/url][/center]" + self.base_url = 'https://blutopia.cc/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [ - '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', - 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'OFT', - 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'Telly', 'TM', - 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', 'AOC', - ['EVO', 'Raw Content Only'], ['TERMiNAL', 'Raw Content Only'], ['ViSION', 'Note the capitalization and characters used'], ['CMRG', 'Raw Content Only'] + '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AOC', 'AROMA', 'aXXo', 'B3LLUM', + 'BHDStudio', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'DTLegacy', 'ELiTE', + 'eSc', 'EZTV', 'EZTV.RE', 'F13', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', 'FUM', 'HAiKU', 'hallowed', + 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'LAMA', 'Leffe', 'LEGi0N', + 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'nSD', 'PiRaTeS', 'playBD', + 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SasukeducK', + 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'Telly', 'TheFarm', 'TM', 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', + 'WKS', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', ] - pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - blu_name = await self.edit_name(meta) - desc_header = "" - if meta.get('webdv', False): - blu_name, desc_header = await self.derived_dv_layer(meta) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True, desc_header=desc_header) - cat_id = await self.get_cat_id(meta['category'], meta.get('edition', '')) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]DESCRIPTION.txt", 'r', encoding='utf-8').read() - - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU].torrent", 'rb') - files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} - - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - - data = { - 'name': blu_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + async def get_additional_checks(self, meta): + should_continue = True + if meta['type'] in ['ENCODE', 'REMUX']: + if 'HDR' in meta.get('hdr', '') and 'DV' in meta.get('hdr', ''): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print('[bold red]Releases using a Dolby Vision layer from a different source have specific description requirements.[/bold red]') + console.print('[bold red]See rule 12.5. You must have a correct pre-formatted description if this release has a derived layer[/bold red]') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + return False + if cli_ui.ask_yes_no("Is this a derived layer release?", default=False): + meta['tracker_status'][self.tracker]['other'] = True + + if meta['type'] not in ['WEBDL'] and not meta['is_disc']: + if meta.get('tag', "") in ['CMRG', 'EVO', 'TERMiNAL', 'ViSION']: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]Group {meta["tag"]} is only allowed for raw type content[/bold red]') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + return False + else: + return False - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + if not meta['valid_mi_settings']: + console.print(f"[bold red]No encoding settings in mediainfo, skipping {self.tracker} upload.[/bold red]") + return False - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://blutopia.cc/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + return should_continue - async def edit_name(self, meta): + async def get_name(self, meta): blu_name = meta['name'] if meta['category'] == 'TV' and meta.get('episode_title', "") != "": blu_name = blu_name.replace(f"{meta['episode_title']} {meta['resolution']}", f"{meta['resolution']}", 1) imdb_name = meta.get('imdb_info', {}).get('title', "") imdb_year = str(meta.get('imdb_info', {}).get('year', "")) + imdb_aka = meta.get('imdb_info', {}).get('aka', "") year = str(meta.get('year', "")) - blu_name = blu_name.replace(f"{meta['title']}", imdb_name, 1) - if not meta.get('category') == "TV": + aka = meta.get('aka', "") + webdv = meta.get('webdv', "") + if imdb_name and imdb_name.strip(): + if aka: + blu_name = blu_name.replace(f"{aka} ", "", 1) + blu_name = blu_name.replace(f"{meta['title']}", imdb_name, 1) + + if imdb_aka and imdb_aka.strip() and imdb_aka != imdb_name and not meta.get('no_aka', False): + blu_name = blu_name.replace(f"{imdb_name}", f"{imdb_name} AKA {imdb_aka}", 1) + + if not meta.get('category') == "TV" and imdb_year and imdb_year.strip() and year and year.strip() and imdb_year != year: blu_name = blu_name.replace(f"{year}", imdb_year, 1) - return blu_name - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 + if webdv: + blu_name = blu_name.replace("HYBRID ", "", 1) + + if meta['tracker_status'][self.tracker].get('other', False): + blu_name = blu_name.replace(f"{meta['resolution']}", f"{meta['resolution']} DVP5/DVP8", 1) + + return {'name': blu_name} - return 1 if meta.get(flag_name, False) else 0 + async def get_additional_data(self, meta): + data = { + 'modq': await self.get_flag(meta, 'modq'), + } + + return data - async def get_cat_id(self, category_name, edition): + async def get_category_id(self, meta, category=None, reverse=False, mapping_only=False): + edition = meta.get('edition', '') + category_name = meta['category'] category_id = { 'MOVIE': '1', 'TV': '2', 'FANRES': '3' - }.get(category_name, '0') + } + + is_fanres = False + if category_name == 'MOVIE' and 'FANRES' in edition: - category_id = '3' - return category_id + is_fanres = True + + if meta['tracker_status'][self.tracker].get('other', False): + is_fanres = True + + if is_fanres: + return {'category_id': '3'} + + if mapping_only: + return category_id + elif reverse: + return {v: k for k, v in category_id.items()} + elif category is not None: + return {'category_id': category_id.get(category, '0')} + else: + meta_category = meta.get('category', '') + resolved_id = category_id.get(meta_category, '0') + return {'category_id': resolved_id} - async def get_type_id(self, type): + async def get_type_id(self, meta, type=None, reverse=False, mapping_only=False): type_id = { 'DISC': '1', 'REMUX': '3', @@ -177,10 +138,20 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '12' - }.get(type, '0') - return type_id + } + + if mapping_only: + return type_id + elif reverse: + return {v: k for k, v in type_id.items()} + elif type is not None: + return {'type_id': type_id.get(type, '0')} + else: + meta_type = meta.get('type', '') + resolved_id = type_id.get(meta_type, '0') + return {'type_id': resolved_id} - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta, mapping_only=False, reverse=False, resolution=None): resolution_id = { '8640p': '10', '4320p': '11', @@ -193,65 +164,14 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def derived_dv_layer(self, meta): - name = meta['name'] - desc_header = "" - # Exit if not DV + HDR - if not all([x in meta['hdr'] for x in ['HDR', 'DV']]): - return name, desc_header - import cli_ui - console.print("[bold yellow]Generating the required description addition for Derived DV Layers. Please respond appropriately.") - ask_comp = True - if meta['type'] == "WEBDL": - if cli_ui.ask_yes_no("Is the DV Layer sourced from the same service as the video?"): - ask_comp = False - desc_header = "[code]This release contains a derived Dolby Vision profile 8 layer. Comparisons not required as DV and HDR are from same provider.[/code]" - - if ask_comp: - while desc_header == "": - desc_input = cli_ui.ask_string("Please provide comparisons between HDR masters. (link or bbcode)", default="") - desc_header = f"[code]This release contains a derived Dolby Vision profile 8 layer. Comparisons between HDR masters: {desc_input}[/code]" - - if "hybrid" not in name.lower(): - if "REPACK" in name: - name = name.replace('REPACK', 'Hybrid REPACK') - else: - name = name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") - return name, desc_header - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + if mapping_only: + return resolution_id + elif reverse: + return {v: k for k, v in resolution_id.items()} + elif resolution is not None: + return {'resolution_id': resolution_id.get(resolution, '10')} + else: + meta_resolution = meta.get('resolution', '') + resolved_id = resolution_id.get(meta_resolution, '10') + return {'resolution_id': resolved_id} diff --git a/src/trackers/BT.py b/src/trackers/BT.py index 7794982b5..9808a3769 100644 --- a/src/trackers/BT.py +++ b/src/trackers/BT.py @@ -1,114 +1,93 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- +import aiofiles import httpx +import json import langcodes import os +import platform import re -import requests import unicodedata -from .COMMON import COMMON from bs4 import BeautifulSoup -from http.cookiejar import MozillaCookieJar from langcodes.tag_parser import LanguageTagError +from src.bbcode import BBCODE from src.console import console +from src.cookie_auth import CookieValidator, CookieAuthUploader +from src.get_desc import DescriptionBuilder from src.languages import process_desc_language +from src.tmdb import get_tmdb_localized_data +from src.trackers.COMMON import COMMON -class BT(COMMON): +class BT: def __init__(self, config): - super().__init__(config) - self.tracker = "BT" - self.banned_groups = [""] - self.source_flag = "BT" - self.base_url = "https://brasiltracker.org/" + self.config = config + self.common = COMMON(config) + self.cookie_validator = CookieValidator(config) + self.cookie_auth_uploader = CookieAuthUploader(config) + self.tracker = 'BT' + self.banned_groups = [] + self.source_flag = 'BT' + self.base_url = 'https://brasiltracker.org/' + self.torrent_url = f'{self.base_url}/torrents.php?id=' self.auth_token = None - self.session = requests.Session() - self.session.headers.update({ - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' - }) - self.signature = "[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f'Upload Assistant ({platform.system()} {platform.release()})' + }, timeout=60.0) target_site_ids = { - 'danish': '10', 'swedish': '11', 'norwegian': '12', 'romanian': '13', - 'chinese': '14', 'finnish': '15', 'italian': '16', 'polish': '17', - 'turkish': '18', 'korean': '19', 'thai': '20', 'arabic': '22', - 'croatian': '23', 'hungarian': '24', 'vietnamese': '25', 'greek': '26', - 'icelandic': '28', 'bulgarian': '29', 'english': '3', 'czech': '30', - 'serbian': '31', 'ukrainian': '34', 'latvian': '37', 'estonian': '38', - 'lithuanian': '39', 'spanish': '4', 'hebrew': '40', 'hindi': '41', - 'slovak': '42', 'slovenian': '43', 'indonesian': '47', - 'português': '49', - 'french': '5', 'german': '6', 'russian': '7', 'japanese': '8', 'dutch': '9', - 'english - forçada': '50', 'persian': '52' + 'arabic': '22', 'bulgarian': '29', 'chinese': '14', 'croatian': '23', + 'czech': '30', 'danish': '10', 'dutch': '9', 'english - forçada': '50', + 'english': '3', 'estonian': '38', 'finnish': '15', 'french': '5', + 'german': '6', 'greek': '26', 'hebrew': '40', 'hindi': '41', + 'hungarian': '24', 'icelandic': '28', 'indonesian': '47', 'italian': '16', + 'japanese': '8', 'korean': '19', 'latvian': '37', 'lithuanian': '39', + 'norwegian': '12', 'persian': '52', 'polish': '17', 'português': '49', + 'romanian': '13', 'russian': '7', 'serbian': '31', 'slovak': '42', + 'slovenian': '43', 'spanish': '4', 'swedish': '11', 'thai': '20', + 'turkish': '18', 'ukrainian': '34', 'vietnamese': '25', } source_alias_map = { - ("Arabic", "ara", "ar"): "arabic", - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br', 'pt-BR', "Portuguese", "por", "pt", "pt-PT", "Português Brasileiro", "Português"): "português", - ("Bulgarian", "bul", "bg"): "bulgarian", - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)", 'cmn-Hant', 'cmn-Hans', 'yue-Hant', 'yue-Hans'): "chinese", - ("Croatian", "hrv", "hr", "scr"): "croatian", - ("Czech", "cze", "cz", "cs"): "czech", - ("Danish", "dan", "da"): "danish", - ("Dutch", "dut", "nl"): "dutch", - ("English", "eng", "en", "en-US", "en-GB", "English (CC)", "English - SDH"): "english", - ("English - Forced", "English (Forced)", "en (Forced)", "en-US (Forced)"): "english - forçada", - ("Estonian", "est", "et"): "estonian", - ("Finnish", "fin", "fi"): "finnish", - ("French", "fre", "fr", "fr-FR", "fr-CA"): "french", - ("German", "ger", "de"): "german", - ("Greek", "gre", "el"): "greek", - ("Hebrew", "heb", "he"): "hebrew", - ("Hindi", "hin", "hi"): "hindi", - ("Hungarian", "hun", "hu"): "hungarian", - ("Icelandic", "ice", "is"): "icelandic", - ("Indonesian", "ind", "id"): "indonesian", - ("Italian", "ita", "it"): "italian", - ("Japanese", "jpn", "ja"): "japanese", - ("Korean", "kor", "ko"): "korean", - ("Latvian", "lav", "lv"): "latvian", - ("Lithuanian", "lit", "lt"): "lithuanian", - ("Norwegian", "nor", "no"): "norwegian", - ("Persian", "fa", "far"): "persian", - ("Polish", "pol", "pl"): "polish", - ("Romanian", "rum", "ro"): "romanian", - ("Russian", "rus", "ru"): "russian", - ("Serbian", "srp", "sr", "scc"): "serbian", - ("Slovak", "slo", "sk"): "slovak", - ("Slovenian", "slv", "sl"): "slovenian", - ("Spanish", "spa", "es", "es-ES", "es-419"): "spanish", - ("Swedish", "swe", "sv"): "swedish", - ("Thai", "tha", "th"): "thai", - ("Turkish", "tur", "tr"): "turkish", - ("Ukrainian", "ukr", "uk"): "ukrainian", - ("Vietnamese", "vie", "vi"): "vietnamese", - } - self.payload_fields_map = { - # Movies - '0': [ - "submit", "auth", "type", "imdb_input", "adulto", "title", "title_br", - "nota_imdb", "year", "diretor", "duracao", "idioma_ori", "tags", - "image", "youtube", "sinopse", "mediainfo", "format", "audio", - "video_c", "audio_c", "legenda", "3d", "resolucao_1", "resolucao_2", - "versao", "bitrate", "screen[]", "desc", "especificas", "subtitles[]" - ], - # TV - '1': [ - "submit", "auth", "type", "imdb_input", "adulto", "title", "title_br", - "nota_imdb", "year", "diretor", "duracao", "idioma_ori", "tags", - "image", "youtube", "sinopse", "mediainfo", "tipo", "temporada", - "temporada_e", "episodio", "ntorrent", "format", "audio", "video_c", - "audio_c", "legenda", "3d", "resolucao_1", "resolucao_2", "bitrate", - "screen[]", "desc", "especificas", "subtitles[]" - ], - # Animes - '5': [ - "submit", "auth", "type", "title", "releasedate", "vote", "rating", - "year", "diretor", "horas", "minutos", "duracao", "tags", "image", - "fundo_torrent", "youtube", "sinopse", "desc", "tipo", "temporada", - "temporada_e", "episodio", "mediainfo", "ntorrent", "idioma_ori", - "format", "bitrate", "audio", "video_c", "audio_c", "legenda", - "resolucao_1", "resolucao_2", "screen[]", "especificas", "subtitles[]" - ] + ('Arabic', 'ara', 'ar'): 'arabic', + ('Brazilian Portuguese', 'Brazilian', 'Portuguese-BR', 'pt-br', 'pt-BR', 'Portuguese', 'por', 'pt', 'pt-PT', 'Português Brasileiro', 'Português'): 'português', + ('Bulgarian', 'bul', 'bg'): 'bulgarian', + ('Chinese', 'chi', 'zh', 'Chinese (Simplified)', 'Chinese (Traditional)', 'cmn-Hant', 'cmn-Hans', 'yue-Hant', 'yue-Hans'): 'chinese', + ('Croatian', 'hrv', 'hr', 'scr'): 'croatian', + ('Czech', 'cze', 'cz', 'cs'): 'czech', + ('Danish', 'dan', 'da'): 'danish', + ('Dutch', 'dut', 'nl'): 'dutch', + ('English - Forced', 'English (Forced)', 'en (Forced)', 'en-US (Forced)'): 'english - forçada', + ('English', 'eng', 'en', 'en-US', 'en-GB', 'English (CC)', 'English - SDH'): 'english', + ('Estonian', 'est', 'et'): 'estonian', + ('Finnish', 'fin', 'fi'): 'finnish', + ('French', 'fre', 'fr', 'fr-FR', 'fr-CA'): 'french', + ('German', 'ger', 'de'): 'german', + ('Greek', 'gre', 'el'): 'greek', + ('Hebrew', 'heb', 'he'): 'hebrew', + ('Hindi', 'hin', 'hi'): 'hindi', + ('Hungarian', 'hun', 'hu'): 'hungarian', + ('Icelandic', 'ice', 'is'): 'icelandic', + ('Indonesian', 'ind', 'id'): 'indonesian', + ('Italian', 'ita', 'it'): 'italian', + ('Japanese', 'jpn', 'ja'): 'japanese', + ('Korean', 'kor', 'ko'): 'korean', + ('Latvian', 'lav', 'lv'): 'latvian', + ('Lithuanian', 'lit', 'lt'): 'lithuanian', + ('Norwegian', 'nor', 'no'): 'norwegian', + ('Persian', 'fa', 'far'): 'persian', + ('Polish', 'pol', 'pl'): 'polish', + ('Romanian', 'rum', 'ro'): 'romanian', + ('Russian', 'rus', 'ru'): 'russian', + ('Serbian', 'srp', 'sr', 'scc'): 'serbian', + ('Slovak', 'slo', 'sk'): 'slovak', + ('Slovenian', 'slv', 'sl'): 'slovenian', + ('Spanish', 'spa', 'es', 'es-ES', 'es-419'): 'spanish', + ('Swedish', 'swe', 'sv'): 'swedish', + ('Thai', 'tha', 'th'): 'thai', + ('Turkish', 'tur', 'tr'): 'turkish', + ('Ukrainian', 'ukr', 'uk'): 'ukrainian', + ('Vietnamese', 'vie', 'vi'): 'vietnamese', } self.ultimate_lang_map = {} @@ -118,31 +97,80 @@ def __init__(self, config): for alias in aliases_tuple: self.ultimate_lang_map[alias.lower()] = correct_id - def assign_media_properties(self, meta): - self.imdb_id = meta['imdb_info']['imdbID'] - self.tmdb_id = meta['tmdb'] - self.category = meta['category'] - self.season = meta.get('season', '') - self.episode = meta.get('episode', '') + async def validate_credentials(self, meta): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/upload.php', + error_text='login.php', + token_pattern=r'name="auth" value="([^"]+)"' + ) + + async def load_localized_data(self, meta): + localized_data_file = f'{meta["base_dir"]}/tmp/{meta["uuid"]}/tmdb_localized_data.json' + main_ptbr_data = {} + episode_ptbr_data = {} + data = {} + + if os.path.isfile(localized_data_file): + try: + async with aiofiles.open(localized_data_file, 'r', encoding='utf-8') as f: + content = await f.read() + data = json.loads(content) + except json.JSONDecodeError: + print(f'Warning: Could not decode JSON from {localized_data_file}') + data = {} + except Exception as e: + print(f'Error reading file {localized_data_file}: {e}') + data = {} + + main_ptbr_data = data.get('pt-BR', {}).get('main') + + if not main_ptbr_data: + main_ptbr_data = await get_tmdb_localized_data( + meta, + data_type='main', + language='pt-BR', + append_to_response='credits,videos,content_ratings' + ) + + if self.config['DEFAULT']['episode_overview']: + if meta['category'] == 'TV' and not meta.get('tv_pack'): + episode_ptbr_data = data.get('pt-BR', {}).get('episode') + if not episode_ptbr_data: + episode_ptbr_data = await get_tmdb_localized_data( + meta, + data_type='episode', + language='pt-BR', + append_to_response='' + ) + + self.main_tmdb_data = main_ptbr_data or {} + self.episode_tmdb_data = episode_ptbr_data or {} + + return + + async def get_container(self, meta): + container = meta.get('container', '') + if container in ['avi', 'm2ts', 'm4v', 'mkv', 'mp4', 'ts', 'vob', 'wmv', 'mkv']: + return container.upper() + + return 'Outro' + + async def get_type(self, meta): + if meta.get('anime'): + return '5' - async def tmdb_data(self, meta): - tmdb_api = self.config['DEFAULT']['tmdb_api'] - self.assign_media_properties(meta) + category_map = { + 'TV': '1', + 'MOVIE': '0' + } - url = f"https://api.themoviedb.org/3/%7Bself.category.lower()%7D/%7Bself.tmdb_id%7D?api_key={tmdb_api}&language=pt-BR&append_to_response=videos" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get(url) - if response.status_code == 200: - return response.json() - else: - return None - except httpx.RequestError: - return None + return category_map.get(meta['category']) - async def get_original_language(self, meta): - tmdb_data = await self.tmdb_data(meta) - lang_code = tmdb_data.get("original_language") + async def get_languages(self, meta): + lang_code = self.main_tmdb_data.get('original_language') if not lang_code: return None @@ -153,168 +181,31 @@ async def get_original_language(self, meta): except LanguageTagError: return lang_code - async def search_existing(self, meta, disctype): - self.assign_media_properties(meta) - is_current_upload_a_tv_pack = meta.get('tv_pack') == 1 - - search_url = f"{self.base_url}/torrents.php?searchstr={self.imdb_id}" - - found_items = [] - try: - response = self.session.get(search_url) - response.raise_for_status() - soup = BeautifulSoup(response.text, 'html.parser') - - torrent_table = soup.find('table', id='torrent_table') - if not torrent_table: - return [] - - group_links = set() - for group_row in torrent_table.find_all('tr'): - link = group_row.find('a', href=re.compile(r'torrents\.php\?id=\d+')) - if link and 'torrentid' not in link.get('href', ''): - group_links.add(link['href']) - - if not group_links: - return [] - - for group_link in group_links: - group_url = f"{self.base_url}/{group_link}" - group_response = self.session.get(group_url) - group_response.raise_for_status() - group_soup = BeautifulSoup(group_response.text, 'html.parser') - - for torrent_row in group_soup.find_all('tr', id=re.compile(r'^torrent\d+$')): - desc_link = torrent_row.find('a', onclick=re.compile(r'gtoggle')) - if not desc_link: - continue - description_text = " ".join(desc_link.get_text(strip=True).split()) - - torrent_id = torrent_row.get('id', '').replace('torrent', '') - file_div = group_soup.find('div', id=f'files_{torrent_id}') - if not file_div: - continue - - is_existing_torrent_a_disc = any(keyword in description_text.lower() for keyword in ['bd25', 'bd50', 'bd66', 'bd100', 'dvd5', 'dvd9', 'm2ts']) - - if is_existing_torrent_a_disc or is_current_upload_a_tv_pack: - path_div = file_div.find('div', class_='filelist_path') - if path_div: - folder_name = path_div.get_text(strip=True).strip('/') - if folder_name: - found_items.append(folder_name) - else: - file_table = file_div.find('table', class_='filelist_table') - if file_table: - for row in file_table.find_all('tr'): - if 'colhead_dark' not in row.get('class', []): - cell = row.find('td') - if cell: - filename = cell.get_text(strip=True) - if filename: - found_items.append(filename) - break - - except requests.exceptions.RequestException as e: - console.print(f"[bold red]Ocorreu um erro de rede ao buscar por duplicatas: {e}[/bold red]") - return [] - except Exception as e: - console.print(f"[bold red]Ocorreu um erro inesperado ao processar a busca: {e}[/bold red]") - return [] - - return found_items - - async def validate_credentials(self, meta): - cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{self.tracker}.txt") - if not os.path.exists(cookie_file): - console.print(f"[bold red]Arquivo de cookie para o {self.tracker} não encontrado: {cookie_file}[/bold red]") - return False + async def get_audio(self, meta): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) - try: - jar = MozillaCookieJar(cookie_file) - jar.load(ignore_discard=True, ignore_expires=True) - self.session.cookies = jar - except Exception as e: - console.print(f"[bold red]Erro ao carregar o arquivo de cookie. Verifique se o formato está correto. Erro: {e}[/bold red]") - return False + audio_languages = set(meta.get('audio_languages', [])) - try: - upload_page_url = f"{self.base_url}/upload.php" - response = self.session.get(upload_page_url, timeout=10, allow_redirects=True) + portuguese_languages = ['Portuguese', 'Português', 'pt'] - if 'login.php' in str(response.url): - console.print(f"[bold red]Falha na validação do {self.tracker}. O cookie parece estar expirado ou é inválido.[/bold red]") - return False + has_pt_audio = any(lang in portuguese_languages for lang in audio_languages) - auth_match = re.search(r'name="auth" value="([^"]+)"', response.text) + original_lang = meta.get('original_language', '').lower() + is_original_pt = original_lang in portuguese_languages - if auth_match: - self.auth_token = auth_match.group(1) - return True + if has_pt_audio: + if is_original_pt: + return 'Nacional' + elif len(audio_languages) > 1: + return 'Dual Audio' else: - console.print(f"[bold red]Falha na validação do {self.tracker}. Não foi possível encontrar o token 'auth' na página de upload.[/bold red]") - console.print("[yellow]Isso pode acontecer se a estrutura do site mudou ou se o login falhou silenciosamente.[/yellow]") - with open(f"{self.tracker}_auth_failure_{meta['uuid']}.html", "w", encoding="utf-8") as f: - f.write(response.text) - console.print(f"[yellow]A resposta do servidor foi salva em '{self.tracker}_auth_failure_{meta['uuid']}.html' para análise.[/yellow]") - return False - - except Exception as e: - console.print(f"[bold red]Erro ao validar credenciais do {self.tracker}: {e}[/bold red]") - return False - - def get_type(self, meta): - self.assign_media_properties(meta) - - if meta.get('anime', False): - return '5' + return 'Dublado' - if self.category == 'TV' or meta.get('season') is not None: - return '1' + return 'Legendado' - if self.category == 'MOVIE': - return '0' - - return '0' - - def get_file_info(self, meta): - info_file_path = "" - if meta.get('is_disc') == 'BDMV': - info_file_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/BD_SUMMARY_00.txt" - else: - info_file_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MEDIAINFO_CLEANPATH.txt" - - if os.path.exists(info_file_path): - try: - with open(info_file_path, 'r', encoding='utf-8') as f: - return f.read() - except Exception as e: - console.print(f"[bold red]Erro ao ler o arquivo de info em '{info_file_path}': {e}[/bold red]") - return "" - else: - console.print(f"[bold red]Arquivo de info não encontrado: {info_file_path}[/bold red]") - return "" - - def get_format(self, meta): - if meta.get('is_disc') == "BDMV": - return "M2TS" - elif meta.get('is_disc') == "DVD": - return "VOB" - - try: - general_track = next(t for t in meta.get('mediainfo', {}).get('media', {}).get('track', []) if t.get('@type') == 'General') - file_extension = general_track.get('FileExtension', '').lower() - if file_extension == 'mkv': - return 'MKV' - elif file_extension == 'mp4': - return 'MP4' - else: - return "Outros" - except (StopIteration, AttributeError, TypeError): - return None - - async def get_subtitles(self, meta): - if not meta.get('subtitle_languages'): + async def get_subtitle(self, meta): + if not meta.get('language_checked', False): await process_desc_language(meta, desc=None, tracker=self.tracker) found_language_strings = meta.get('subtitle_languages', []) @@ -325,41 +216,35 @@ async def get_subtitles(self, meta): if target_id: subtitle_ids.add(target_id) - legenda_value = "Sim" if '49' in subtitle_ids else "Nao" + has_pt_subtitles = 'Sim' if '49' in subtitle_ids else 'Nao' - final_subtitle_ids = sorted(list(subtitle_ids)) - if not final_subtitle_ids: - final_subtitle_ids.append('44') + subtitle_ids = sorted(list(subtitle_ids)) - return { - 'legenda': legenda_value, - 'subtitles[]': final_subtitle_ids - } + if not subtitle_ids: + subtitle_ids.append('44') - async def get_audio(self, meta): - if not meta.get('audio_languages'): - await process_desc_language(meta, desc=None, tracker=self.tracker) + return has_pt_subtitles, subtitle_ids - audio_languages = set(meta.get('audio_languages', [])) - - portuguese_languages = ['Portuguese', 'Português'] - - has_pt_audio = any(lang in portuguese_languages for lang in audio_languages) + async def get_resolution(self, meta): + if meta.get('is_disc') == 'BDMV': + resolution_str = meta.get('resolution', '') + try: + height_num = int(resolution_str.lower().replace('p', '').replace('i', '')) + height = str(height_num) - original_lang = meta.get('original_language', '').lower() - is_original_pt = original_lang in portuguese_languages + width_num = round((16 / 9) * height_num) + width = str(width_num) + except (ValueError, TypeError): + pass - if has_pt_audio: - if is_original_pt: - return "Nacional" - elif len(audio_languages) > 1: - return "Dual Audio" - else: - return "Dublado" + else: + video_mi = meta['mediainfo']['media']['track'][1] + width = video_mi['Width'] + height = video_mi['Height'] - return "Legendado" + return width, height - def get_video_codec(self, meta): + async def get_video_codec(self, meta): video_encode = meta.get('video_encode', '').strip().lower() codec_final = meta.get('video_codec', '') is_hdr = bool(meta.get('hdr')) @@ -376,7 +261,7 @@ def get_video_codec(self, meta): for key, value in encode_map.items(): if key in video_encode: if value in ['x265', 'H.265'] and is_hdr: - return f"{value} HDR" + return f'{value} HDR' return value codec_lower = codec_final.lower() @@ -390,38 +275,38 @@ def get_video_codec(self, meta): for key, value in codec_map.items(): if key in codec_lower: - return f"{value} HDR" if value == 'x265' and is_hdr else value + return f"{value} HDR" if value == "x265" and is_hdr else value return codec_final if codec_final else "Outro" - def get_audio_codec(self, meta): + async def get_audio_codec(self, meta): priority_order = [ - "DTS-X", "E-AC-3 JOC", "TrueHD", "DTS-HD", "PCM", "FLAC", "DTS-ES", - "DTS", "E-AC-3", "AC3", "AAC", "Opus", "Vorbis", "MP3", "MP2" + 'DTS-X', 'E-AC-3 JOC', 'TrueHD', 'DTS-HD', 'PCM', 'FLAC', 'DTS-ES', + 'DTS', 'E-AC-3', 'AC3', 'AAC', 'Opus', 'Vorbis', 'MP3', 'MP2' ] codec_map = { - "DTS-X": ["DTS:X"], - "E-AC-3 JOC": ["DD+ 5.1 Atmos", "DD+ 7.1 Atmos"], - "TrueHD": ["TrueHD"], - "DTS-HD": ["DTS-HD"], - "PCM": ["LPCM"], - "FLAC": ["FLAC"], - "DTS-ES": ["DTS-ES"], - "DTS": ["DTS"], - "E-AC-3": ["DD+"], - "AC3": ["DD"], - "AAC": ["AAC"], - "Opus": ["Opus"], - "Vorbis": ["VORBIS"], - "MP2": ["MP2"], - "MP3": ["MP3"] + 'DTS-X': ['DTS:X'], + 'E-AC-3 JOC': ['DD+ 5.1 Atmos', 'DD+ 7.1 Atmos'], + 'TrueHD': ['TrueHD'], + 'DTS-HD': ['DTS-HD'], + 'PCM': ['LPCM'], + 'FLAC': ['FLAC'], + 'DTS-ES': ['DTS-ES'], + 'DTS': ['DTS'], + 'E-AC-3': ['DD+'], + 'AC3': ['DD'], + 'AAC': ['AAC'], + 'Opus': ['Opus'], + 'Vorbis': ['VORBIS'], + 'MP2': ['MP2'], + 'MP3': ['MP3'] } audio_description = meta.get('audio') if not audio_description or not isinstance(audio_description, str): - return "Outro" + return 'Outro' for codec_name in priority_order: search_terms = codec_map.get(codec_name, []) @@ -430,63 +315,240 @@ def get_audio_codec(self, meta): if term in audio_description: return codec_name - return "Outro" + return 'Outro' + + async def get_title(self, meta): + title = self.main_tmdb_data.get('name') or self.main_tmdb_data.get('title') or '' + + return title if title and title != meta.get('title') else '' + + async def get_description(self, meta): + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo_resize_url = meta.get('tmdb_logo', '') + if logo_resize_url: + desc_parts.append(f"[center][img]https://image.tmdb.org/t/p/w300/{logo_resize_url}[/img][/center]") + + # TV + title = self.episode_tmdb_data.get('name', '') + episode_image = self.episode_tmdb_data.get('still_path', '') + episode_overview = self.episode_tmdb_data.get('overview', '') + + if episode_overview: + desc_parts.append(f'[center]{title}[/center]') + + if episode_image: + desc_parts.append(f"[center][img]https://image.tmdb.org/t/p/w300{episode_image}[/img][/center]") + + desc_parts.append(f'[center]{episode_overview}[/center]') - def get_edition(self, meta): + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Signature + desc_parts.append(f"[center][url=https://github.com/Audionut/Upload-Assistant]Upload realizado via {meta['ua_name']} {meta['current_version']}[/url][/center]") + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + bbcode = BBCODE() + description = bbcode.remove_img_resize(description) + description = bbcode.remove_list(description) + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description + + async def get_trailer(self, meta): + video_results = self.main_tmdb_data.get('videos', {}).get('results', []) + + youtube = '' + + if video_results: + youtube = video_results[-1].get('key', '') + + if not youtube: + meta_trailer = meta.get('youtube', '') + if meta_trailer: + youtube = meta_trailer.replace('https://www.youtube.com/watch?v=', '').replace('/', '') + + return youtube + + async def get_tags(self, meta): + tags = '' + + if self.main_tmdb_data and isinstance(self.main_tmdb_data.get('genres'), list): + genre_names = [ + g.get('name', '') for g in self.main_tmdb_data['genres'] + if isinstance(g.get('name'), str) and g.get('name').strip() + ] + + if genre_names: + tags = ', '.join( + unicodedata.normalize('NFKD', name) + .encode('ASCII', 'ignore') + .decode('utf-8') + .replace(' ', '.') + .lower() + for name in genre_names + ) + + if not tags: + tags = await self.common.async_input(prompt=f'Digite os gêneros (no formato do {self.tracker}): ') + + return tags + + async def search_existing(self, meta, disctype): + is_tv_pack = bool(meta.get('tv_pack')) + + search_url = f"{self.base_url}/torrents.php?searchstr={meta['imdb_info']['imdbID']}" + + found_items = [] + try: + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + + response = await self.session.get(search_url) + response.raise_for_status() + soup = BeautifulSoup(response.text, 'html.parser') + + torrent_table = soup.find('table', id='torrent_table') + if not torrent_table: + return [] + + group_links = set() + for group_row in torrent_table.find_all('tr'): + link = group_row.find('a', href=re.compile(r'torrents\.php\?id=\d+')) + if link and 'torrentid' not in link.get('href', ''): + group_links.add(link['href']) + + if not group_links: + return [] + + for group_link in group_links: + group_url = f'{self.base_url}/{group_link}' + group_response = await self.session.get(group_url) + group_response.raise_for_status() + group_soup = BeautifulSoup(group_response.text, 'html.parser') + + for torrent_row in group_soup.find_all('tr', id=re.compile(r'^torrent\d+$')): + desc_link = torrent_row.find('a', onclick=re.compile(r'gtoggle')) + if not desc_link: + continue + description_text = ' '.join(desc_link.get_text(strip=True).split()) + + torrent_id = torrent_row.get('id', '').replace('torrent', '') + file_div = group_soup.find('div', id=f'files_{torrent_id}') + if not file_div: + continue + + is_existing_torrent_a_disc = any(keyword in description_text.lower() for keyword in ['bd25', 'bd50', 'bd66', 'bd100', 'dvd5', 'dvd9', 'm2ts']) + + if is_existing_torrent_a_disc or is_tv_pack: + path_div = file_div.find('div', class_='filelist_path') + if path_div: + folder_name = path_div.get_text(strip=True).strip('/') + if folder_name: + found_items.append(folder_name) + else: + file_table = file_div.find('table', class_='filelist_table') + if file_table: + for row in file_table.find_all('tr'): + if 'colhead_dark' not in row.get('class', []): + cell = row.find('td') + if cell: + filename = cell.get_text(strip=True) + if filename: + found_items.append(filename) + break + + except Exception as e: + console.print(f'[bold red]Ocorreu um erro inesperado ao processar a busca: {e}[/bold red]') + return [] + + return found_items + + async def get_media_info(self, meta): + info_file_path = '' + if meta.get('is_disc') == 'BDMV': + info_file_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/BD_SUMMARY_00.txt" + else: + info_file_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MEDIAINFO_CLEANPATH.txt" + + if os.path.exists(info_file_path): + try: + with open(info_file_path, 'r', encoding='utf-8') as f: + return f.read() + except Exception as e: + console.print(f'[bold red]Erro ao ler o arquivo de info em {info_file_path}: {e}[/bold red]') + return '' + else: + console.print(f'[bold red]Arquivo de info não encontrado: {info_file_path}[/bold red]') + return '' + + async def get_edition(self, meta): edition_str = meta.get('edition', '').lower() if not edition_str: - return "" + return '' edition_map = { "director's cut": "Director's Cut", - "theatrical": "Theatrical Cut", - "extended": "Extended", - "uncut": "Uncut", - "unrated": "Unrated", - "imax": "IMAX", - "noir": "Noir", - "remastered": "Remastered", + 'theatrical': 'Theatrical Cut', + 'extended': 'Extended', + 'uncut': 'Uncut', + 'unrated': 'Unrated', + 'imax': 'IMAX', + 'noir': 'Noir', + 'remastered': 'Remastered', } for keyword, label in edition_map.items(): if keyword in edition_str: return label - return "" + return '' - def get_bitrate(self, meta): + async def get_bitrate(self, meta): if meta.get('type') == 'DISC': is_disc_type = meta.get('is_disc') if is_disc_type == 'BDMV': disctype = meta.get('disctype') - if disctype in ["BD100", "BD66", "BD50", "BD25"]: + if disctype in ['BD100', 'BD66', 'BD50', 'BD25']: return disctype try: - size_in_gb = meta['torrent_comments'][0]['size'] / (10**9) + size_in_gb = meta['bdinfo']['size'] except (KeyError, IndexError, TypeError): size_in_gb = 0 if size_in_gb > 66: - return "BD100" + return 'BD100' elif size_in_gb > 50: - return "BD66" + return 'BD66' elif size_in_gb > 25: - return "BD50" + return 'BD50' else: - return "BD25" + return 'BD25' elif is_disc_type == 'DVD': dvd_size = meta.get('dvd_size') - if dvd_size in ["DVD9", "DVD5"]: + if dvd_size in ['DVD9', 'DVD5']: return dvd_size - return "DVD9" + return 'DVD9' source_type = meta.get('type') if not source_type or not isinstance(source_type, str): - return "Outro" + return 'Outro' keyword_map = { 'remux': 'Remux', @@ -503,219 +565,123 @@ def get_bitrate(self, meta): 'tvrip': 'TVRip', } - return keyword_map.get(source_type.lower(), "Outro") - - def get_screens(self, meta): - screenshot_urls = [ - image.get('raw_url') - for image in meta.get('image_list', []) - if image.get('raw_url') - ] - - return screenshot_urls - - async def edit_desc(self, meta): - base_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt" - final_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - - base_desc = "" - if os.path.exists(base_desc_path): - with open(base_desc_path, 'r', encoding='utf-8') as f: - base_desc = f.read() - - description_parts = [] - - # WEBDL source note - if meta.get('type') == 'WEBDL' and meta.get('service_longname', ''): - source_note = f"[center][quote]Este lançamento tem como fonte o serviço {meta['service_longname']}[/quote][/center]" - description_parts.append(source_note) + return keyword_map.get(source_type.lower(), 'Outro') - description_parts.append(base_desc) + async def get_screens(self, meta): + urls = [] + for image in meta.get('menu_images', []) + meta.get('image_list', []): + if image.get('raw_url'): + urls.append(image['raw_url']) - if self.signature: - description_parts.append(self.signature) - - with open(final_desc_path, 'w', encoding='utf-8') as descfile: - final_description = "\n\n".join(filter(None, description_parts)) - descfile.write(final_description) - - def get_resolution(self, meta): - width, height = "", "" - - if meta.get('is_disc') == 'BDMV': - resolution_str = meta.get('resolution', '') - try: - height_num = int(resolution_str.lower().replace('p', '').replace('i', '')) - height = str(height_num) - - width_num = round((16 / 9) * height_num) - width = str(width_num) - except (ValueError, TypeError): - pass + return urls + async def get_credits(self, meta): + director = (meta.get('imdb_info', {}).get('directors') or []) + (meta.get('tmdb_directors') or []) + if director: + unique_names = list(dict.fromkeys(director))[:5] + return ', '.join(unique_names) else: - try: - tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) - video_track = next((t for t in tracks if t.get('@type') == 'Video'), None) - if video_track: - width = video_track.get('Width', '') - height = video_track.get('Height', '') - except (AttributeError, TypeError): - pass + return 'N/A' - return { - 'resolucao_1': width, - 'resolucao_2': height - } + async def get_data(self, meta): + await self.load_localized_data(meta) + has_pt_subtitles, subtitle_ids = await self.get_subtitle(meta) + resolution_width, resolution_height = await self.get_resolution(meta) - async def upload(self, meta, disctype): - tmdb_data = await self.tmdb_data(meta) - original_language = await self.get_original_language(meta) - - if not await self.validate_credentials(meta): - console.print(f"[bold red]Upload para {self.tracker} abortado.[/bold red]") - return - - await COMMON(config=self.config).edit_torrent(meta, self.tracker, self.source_flag) - await self.edit_desc(meta) - - category_type = self.get_type(meta) - - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - all_possible_data = {} - - all_possible_data.update({ + data = { + 'audio_c': await self.get_audio_codec(meta), + 'audio': await self.get_audio(meta), + 'auth': BT.secret_token, + 'bitrate': await self.get_bitrate(meta), + 'desc': '', + 'diretor': await self.get_credits(meta), + 'duracao': f"{str(meta.get('runtime', ''))} min", + 'especificas': await self.get_description(meta), + 'format': await self.get_container(meta), + 'idioma_ori': await self.get_languages(meta) or meta.get('original_language', ''), + 'image': f"https://image.tmdb.org/t/p/w500%7Bself.main_tmdb_data.get('poster_path', '') or meta.get('tmdb_poster', '')}", + 'legenda': has_pt_subtitles, + 'mediainfo': await self.get_media_info(meta), + 'resolucao_1': resolution_width, + 'resolucao_2': resolution_height, + 'screen[]': await self.get_screens(meta), + 'sinopse': self.main_tmdb_data.get('overview', 'Nenhuma sinopse disponível.'), 'submit': 'true', - 'auth': self.auth_token, - 'type': category_type, - 'imdb_input': meta.get('imdb_info', {}).get('imdbID', ''), - 'adulto': '0' - }) - - all_possible_data.update({ + 'subtitles[]': subtitle_ids, + 'tags': await self.get_tags(meta), 'title': meta['title'], - 'title_br': tmdb_data.get('name') or tmdb_data.get('title') or '', - 'nota_imdb': str(meta.get('imdb_info', {}).get('rating', '')), + 'type': await self.get_type(meta), + 'video_c': await self.get_video_codec(meta), 'year': str(meta['year']), - 'diretor': ", ".join(set(meta.get('tmdb_directors', []))), - 'idioma_ori': original_language or meta.get('original_language', ''), - 'sinopse': tmdb_data.get('overview', 'Nenhuma sinopse disponível.'), - 'tags': ', '.join(unicodedata.normalize('NFKD', g['name']).encode('ASCII', 'ignore').decode('utf-8').replace(' ', '.').lower() for g in tmdb_data.get('genres', [])), - 'duracao': f"{str(meta.get('runtime', ''))} min", - 'image': f"https://image.tmdb.org/t/p/w500%7Btmdb_data.get('poster_path', '')}", - }) - - bt_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='', encoding='utf-8').read() - subtitles_info = await self.get_subtitles(meta) - resolution = self.get_resolution(meta) - - all_possible_data.update({ - 'mediainfo': self.get_file_info(meta), - 'format': self.get_format(meta), - 'audio': await self.get_audio(meta), - 'video_c': self.get_video_codec(meta), - 'audio_c': self.get_audio_codec(meta), - 'legenda': subtitles_info.get('legenda', 'Nao'), - 'subtitles[]': subtitles_info.get('subtitles[]'), - '3d': 'Sim' if meta.get('3d') else 'Nao', - 'resolucao_1': resolution['resolucao_1'], - 'resolucao_2': resolution['resolucao_2'], - 'bitrate': self.get_bitrate(meta), - 'screen[]': self.get_screens(meta), - 'desc': '', - 'especificas': bt_desc - }) - - # Movies - all_possible_data['versao'] = self.get_edition(meta) - - # TV/Anime - all_possible_data.update({ - 'ntorrent': f"{self.season}{self.episode}", - 'tipo': 'ep_individual' if meta.get('tv_pack') == 0 else 'completa', - 'temporada': self.season if meta.get('tv_pack') == 1 else '', - 'temporada_e': self.season if meta.get('tv_pack') == 0 else '', - 'episodio': self.episode - }) - - # Anime specific data - duracao_min = 0 - try: - duracao_apenas_numeros = re.search(r'\d+', all_possible_data.get('duracao', '0')) - if duracao_apenas_numeros: - duracao_min = int(duracao_apenas_numeros.group(0)) - except (ValueError, TypeError): - pass - - all_possible_data.update({ - 'releasedate': str(all_possible_data.get('year', '')), - 'rating': str(all_possible_data.get('nota_imdb', '')), - 'horas': str(duracao_min // 60), - 'minutos': str(duracao_min % 60), - 'fundo_torrent': meta.get('backdrop'), - }) - - required_fields = self.payload_fields_map.get(category_type) - if not required_fields: - console.print(f"[bold red]Erro: Modelo de payload não encontrado para a categoria '{category_type}'. Upload abortado.[/bold red]") - return - - final_data = {} - for field in required_fields: - if field in all_possible_data: - final_data[field] = all_possible_data[field] - - if anon == 1: - final_data['anonymous'] = '1' - - video_results = tmdb_data.get('videos', {}).get('results', []) - youtube_code = video_results[-1].get('key', '') if video_results else '' - if youtube_code: - final_data['youtube'] = youtube_code - else: - youtube_url = meta.get('youtube', '') - if youtube_url: - match = re.search(r'(?:v=|\/)([0-9A-Za-z_-]{11}).*', youtube_url) - if match: - final_data['youtube'] = match.group(1) - - if meta.get('debug', False): - console.print(final_data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - return - - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - if not os.path.exists(torrent_path): - return + 'youtube': await self.get_trailer(meta), + } - upload_url = f"{self.base_url}/upload.php" - with open(torrent_path, 'rb') as torrent_file: - files = {'file_input': (f"{self.tracker}.placeholder.torrent", torrent_file, "application/x-bittorrent")} + # Common data MOVIE/TV + if not meta.get('anime'): + if meta['category'] in ('MOVIE', 'TV'): + data.update({ + '3d': 'Sim' if meta.get('3d') else 'Nao', + 'adulto': '0', + 'imdb_input': meta.get('imdb_info', {}).get('imdbID', ''), + 'nota_imdb': str(meta.get('imdb_info', {}).get('rating', '')), + 'title_br': await self.get_title(meta), + }) + if meta.get('scene', False): + data['scene'] = 'on' + + # Common data TV/Anime + tv_pack = bool(meta.get('tv_pack')) + if meta['category'] == 'TV' or meta.get('anime'): + data.update({ + 'episodio': meta.get('episode', ''), + 'ntorrent': f"{meta.get('season', '')}{meta.get('episode', '')}", + 'temporada_e': meta.get('season', '') if not tv_pack else '', + 'temporada': meta.get('season', '') if tv_pack else '', + 'tipo': 'ep_individual' if not tv_pack else 'completa', + }) + + # Specific + if meta['category'] == 'MOVIE': + data['versao'] = await self.get_edition(meta) + elif meta.get('anime'): + data.update({ + 'fundo_torrent': meta.get('backdrop'), + 'horas': '', + 'minutos': '', + 'rating': str(meta.get('imdb_info', {}).get('rating', '')), + 'releasedate': str(meta['year']), + 'vote': '', + }) + + # Anon + anon = not (meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False)) + if anon: + data['anonymous'] = '1' + + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != '' and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data.update({ + 'internal': 1, + }) + + return data - try: - response = self.session.post(upload_url, data=final_data, files=files, timeout=60) - - if response.status_code == 200 and 'torrents.php?id=' in str(response.url): - final_url = str(response.url) - meta['tracker_status'][self.tracker]['status_message'] = final_url - id_match = re.search(r'id=(\d+)', final_url) - if id_match: - torrent_id = id_match.group(1) - details_url = f"{self.base_url}/torrents.php?id={torrent_id}" - announce_url = self.config['TRACKERS'][self.tracker].get('announce_url') - await COMMON(config=self.config).add_tracker_torrent(meta, self.tracker, self.source_flag, announce_url, details_url) - else: - console.print(f"[bold yellow]Redirecionamento para a página do torrent ocorreu, mas não foi possível extrair o ID da URL: {final_url}[/bold yellow]") - else: - console.print(f"[bold red]Falha no upload para {self.tracker}. Status: {response.status_code}, URL: {response.url}[/bold red]") - failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]FailedUpload.html" - with open(failure_path, "w", encoding="utf-8") as f: - f.write(response.text) - console.print(f"[yellow]A resposta HTML foi salva em '{failure_path}' para análise.[/yellow]") - - except requests.exceptions.RequestException as e: - console.print(f"[bold red]Erro de conexão ao fazer upload para {self.tracker}: {e}[/bold red]") + async def upload(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + data = await self.get_data(meta) + + await self.cookie_auth_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + source_flag=self.source_flag, + torrent_url=self.torrent_url, + data=data, + torrent_field_name='file_input', + upload_cookies=self.session.cookies, + upload_url=f"{self.base_url}/upload.php", + id_pattern=r'groupid=(\d+)', + success_status_code="200, 302, 303", + ) + + return diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 4718b85b3..f1086528d 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -1,45 +1,43 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import httpx -import re from src.trackers.COMMON import COMMON -from src.console import console -from src.languages import process_desc_language +from src.trackers.UNIT3D import UNIT3D +import re -class CBR(): +class CBR(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='CBR') self.config = config + self.common = COMMON(config) self.tracker = 'CBR' self.source_flag = 'CapybaraBR' - self.upload_url = 'https://capybarabr.com/api/torrents/upload' - self.search_url = 'https://capybarabr.com/api/torrents/filter' - self.torrent_url = 'https://capybarabr.com/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://capybarabr.com/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [ - '3LTON', '4yEo', 'ADE', 'AFG', 'AROMA', 'AniHLS', 'AniURL', 'AnimeRG', 'BLUDV', 'CHD', 'CM8', 'Comando', 'CrEwSaDe', 'DNL', 'DeadFish', - 'ELiTE', 'FGT', 'FRDS', 'FUM', 'FaNGDiNG0', 'Flights', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'Hiro360', 'ION10', 'JIVE', 'KiNGDOM', + '3LTON', '4yEo', 'ADE', 'ASM', 'AFG', 'AROMA', 'AniHLS', 'AniURL', 'AnimeRG', 'BLUDV', 'CHD', 'CM8', 'Comando', 'CrEwSaDe', 'DNL', 'DeadFish', + 'DragsterPS', 'DRENAN', 'ELiTE', 'FGT', 'FRDS', 'FUM', 'FaNGDiNG0', 'Flights', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'Hiro360', 'ION10', 'JIVE', 'KiNGDOM', 'LEGi0N', 'LOAD', 'Lapumia', 'Leffe', 'MACCAULAY', 'MeGusta', 'NOIVTC', 'NhaNc3', 'OFT', 'Oj', 'PRODJi', 'PiRaTeS', 'PlaySD', 'RAPiDCOWS', - 'RARBG', 'RDN', 'REsuRRecTioN', 'RMTeam', 'RetroPeeps', 'SANTi', 'SILVEIRATeam', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'SicFoI', 'TGx', 'TM', + 'RARBG', 'RDN', 'REsuRRecTioN', 'RMTeam', 'RetroPeeps', 'S74Ll10n', 'SANTi', 'SILVEIRATeam', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'SicFoI', 'TGx', 'TM', 'TRiToN', 'Telly', 'UPiNSMOKE', 'URANiME', 'WAF', 'XS', 'YIFY', 'ZKBL', 'ZMNT', 'ZmN', 'aXXo', 'd3g', 'eSc', 'iPlanet', 'mHD', 'mSD', 'nHD', 'nSD', 'nikt0', 'playXD', 'x0r', 'xRed' ] pass - async def get_cat_id(self, category_name, meta): + async def get_category_id(self, meta): category_id = { 'MOVIE': '1', 'TV': '2', 'ANIMES': '4' - }.get(category_name, '0') + }.get(meta['category'], '0') if meta['anime'] is True and category_id == '2': category_id = '4' - return category_id + return {'category_id': category_id} - async def get_type_id(self, type): + async def get_type_id(self, meta): type_id = { 'DISC': '1', 'REMUX': '2', @@ -48,10 +46,10 @@ async def get_type_id(self, type): 'WEBDL': '4', 'WEBRIP': '5', 'HDTV': '6' - }.get(type, '0') - return type_id + }.get(meta['type'], '0') + return {'type_id': type_id} - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta): resolution_id = { '4320p': '1', '2160p': '2', @@ -63,24 +61,24 @@ async def get_res_id(self, resolution): '480p': '8', '480i': '9', 'Other': '10', - }.get(resolution, '10') - return resolution_id + }.get(meta['resolution'], '10') + return {'resolution_id': resolution_id} - async def edit_name(self, meta): + async def get_name(self, meta): name = meta['name'].replace('DD+ ', 'DDP').replace('DD ', 'DD').replace('AAC ', 'AAC').replace('FLAC ', 'FLAC') - # Se for Series ou Anime, remove o ano do título + # If it is a Series or Anime, remove the year from the title. if meta.get('category') in ['TV', 'ANIMES']: year = str(meta.get('year', '')) if year and year in name: name = name.replace(year, '').replace(f"({year})", '').strip() name = re.sub(r'\s{2,}', ' ', name) - # Remove o título AKA, exceto se for nacional + # Remove the AKA title, unless it is Brazilian if meta.get('original_language') != 'pt': name = name.replace(meta["aka"], '') - # Se for nacional, usa apenas o título de AKA, apagando o título estrangeiro + # If it is Brazilian, use only the AKA title, deleting the foreign title if meta.get('original_language') == 'pt' and meta.get('aka'): aka_clean = meta['aka'].replace('AKA', '').strip() title = meta.get('title') @@ -102,156 +100,31 @@ async def edit_name(self, meta): audio_tag = ' DUAL' else: audio_tag = '' - if audio_tag: - if meta.get('dual_audio', False): - cbr_name = cbr_name.replace("Dual-Audio ", '') - if '-' in cbr_name: - parts = cbr_name.rsplit('-', 1) - cbr_name = f"{parts[0]}{audio_tag}-{parts[1]}" - else: - cbr_name += audio_tag + + if audio_tag: + if meta.get('dual_audio', False): + cbr_name = cbr_name.replace("Dual-Audio ", '') + if '-' in cbr_name: + parts = cbr_name.rsplit('-', 1) + cbr_name = f"{parts[0]}{audio_tag}-{parts[1]}" + else: + cbr_name += audio_tag if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): for invalid_tag in invalid_tags: cbr_name = re.sub(f"-{invalid_tag}", "", cbr_name, flags=re.IGNORECASE) cbr_name = f"{cbr_name}-NoGroup" - return cbr_name + return {'name': cbr_name} - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - modq = await self.get_flag(meta, 'modq') - cat_id = await self.get_cat_id(meta['category'], meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - cbr_name = await self.edit_name(meta) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} + async def get_additional_data(self, meta): data = { - 'name': cbr_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + 'mod_queue_opt_in': await self.get_flag(meta, 'modq'), } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - if not meta['is_disc'] == "BDMV": - if not meta.get('audio_languages') or not meta.get('subtitle_languages'): - await process_desc_language(meta, desc=None, tracker=self.tracker) - portuguese_languages = ['Portuguese', 'Português'] - if not any(lang in meta.get('audio_languages', []) for lang in portuguese_languages) and not any(lang in meta.get('subtitle_languages', []) for lang in portuguese_languages): - if not meta['unattended']: - console.print('[bold red]CBR requires at least one Portuguese audio or subtitle track.') - meta['skipping'] = "CBR" - return - - dupes = [] - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 + return data - return 1 if meta.get(flag_name, False) else 0 + async def get_additional_checks(self, meta): + return await self.common.check_language_requirements( + meta, self.tracker, languages_to_check=["portuguese", "português"], check_audio=True, check_subtitle=True + ) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index a4ee248d8..6e40eea31 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -1,18 +1,21 @@ -from torf import Torrent +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles +import asyncio +import bencodepy +import click +import hashlib +import httpx +import json import os -import requests import re -import json -import click -import sys -import glob -from pymediainfo import MediaInfo +import requests import secrets +import sys from src.bbcode import BBCODE from src.console import console -from src.uploadscreens import upload_screens -from src.takescreens import disc_screenshots, dvd_screenshots, screenshots +from src.exportmi import exportInfo from src.languages import process_desc_language +from torf import Torrent class COMMON(): @@ -21,607 +24,116 @@ def __init__(self, config): self.parser = self.MediaInfoParser() pass - async def edit_torrent(self, meta, tracker, source_flag, torrent_filename="BASE"): - if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}.torrent"): - new_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}.torrent") + async def path_exists(self, path): + """Async wrapper for os.path.exists""" + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, os.path.exists, path) + + async def remove_file(self, path): + """Async wrapper for os.remove""" + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, os.remove, path) + + async def makedirs(self, path, exist_ok=True): + """Async wrapper for os.makedirs""" + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, lambda p, e: os.makedirs(p, exist_ok=e), path, exist_ok) + + async def async_input(self, prompt=False): + """Gets user input in a non-blocking way using asyncio.to_thread""" + if prompt: + console.print(prompt) + user_input = await asyncio.to_thread(input) + return user_input.strip() + + async def edit_torrent(self, meta, tracker, source_flag, torrent_filename="BASE", announce_url=None): + path = f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}.torrent" + if await self.path_exists(path): + loop = asyncio.get_running_loop() + new_torrent = await loop.run_in_executor(None, Torrent.read, path) for each in list(new_torrent.metainfo): if each not in ('announce', 'comment', 'creation date', 'created by', 'encoding', 'info'): new_torrent.metainfo.pop(each, None) - new_torrent.metainfo['announce'] = self.config['TRACKERS'][tracker].get('announce_url', "https://fake.tracker/").strip() + new_torrent.metainfo['announce'] = announce_url if announce_url else self.config['TRACKERS'][tracker].get('announce_url', "https://fake.tracker/").strip() new_torrent.metainfo['info']['source'] = source_flag if 'created by' in new_torrent.metainfo and isinstance(new_torrent.metainfo['created by'], str): created_by = new_torrent.metainfo['created by'] if "mkbrr" in created_by.lower(): - new_torrent.metainfo['created by'] = f"{created_by} using Audionut's Upload Assistant" + new_torrent.metainfo['created by'] = f"{created_by} using Upload Assistant" + # setting comment as blank as if BASE.torrent is manually created then it can result in private info such as download link being exposed. + new_torrent.metainfo['comment'] = '' if int(meta.get('entropy', None)) == 32: new_torrent.metainfo['info']['entropy'] = secrets.randbelow(2**31) elif int(meta.get('entropy', None)) == 64: new_torrent.metainfo['info']['entropy'] = secrets.randbelow(2**64) - # setting comment as blank as if BASE.torrent is manually created then it can result in private info such as download link being exposed. - new_torrent.metainfo['comment'] = '' + out_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent" + await loop.run_in_executor(None, lambda: Torrent.copy(new_torrent).write(out_path, overwrite=True)) - Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent", overwrite=True) + async def download_tracker_torrent(self, meta, tracker, headers=None, params=None, downurl=None, hash_is_id=False, cross=False): + if cross: + path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}_cross].torrent" + else: + path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent" + if downurl is not None: + try: + async with httpx.AsyncClient(headers=headers, params=params, timeout=30.0) as session: + async with session.stream("GET", downurl) as r: + r.raise_for_status() + async with aiofiles.open(path, "wb") as f: + async for chunk in r.aiter_bytes(): + await f.write(chunk) - # used to add tracker url, comment and source flag to torrent file - async def add_tracker_torrent(self, meta, tracker, source_flag, new_tracker, comment): - if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent"): - new_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent") - new_torrent.metainfo['announce'] = new_tracker - new_torrent.metainfo['comment'] = comment - new_torrent.metainfo['info']['source'] = source_flag - Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent", overwrite=True) + if cross: + return None - async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, desc_header="", image_list=None): - if image_list is not None: - images = image_list - multi_screens = 0 - else: - images = meta['image_list'] - multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) + if hash_is_id: + torrent_hash = await self.get_torrent_hash(meta, tracker) + return torrent_hash + return None - # Check for saved pack_image_links.json file - pack_images_file = os.path.join(meta['base_dir'], "tmp", meta['uuid'], "pack_image_links.json") - pack_images_data = {} - if os.path.exists(pack_images_file): - try: - with open(pack_images_file, 'r', encoding='utf-8') as f: - pack_images_data = json.load(f) - if meta['debug']: - console.print(f"[green]Loaded previously uploaded images from {pack_images_file}") - console.print(f"[blue]Found {pack_images_data.get('total_count', 0)} previously uploaded images") except Exception as e: - console.print(f"[yellow]Warning: Could not load pack image data: {str(e)}[/yellow]") - - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() - char_limit = int(self.config['DEFAULT'].get('charLimit', 14000)) - file_limit = int(self.config['DEFAULT'].get('fileLimit', 5)) - thumb_size = int(self.config['DEFAULT'].get('pack_thumb_size', '300')) - cover_size = int(self.config['DEFAULT'].get('bluray_image_size', '250')) - process_limit = int(self.config['DEFAULT'].get('processLimit', 10)) - episode_overview = int(self.config['DEFAULT'].get('episode_overview', False)) - try: - # If tracker has screenshot header specified in config, use that. Otherwise, check if screenshot default is used. Otherwise, fall back to None - screenheader = self.config['TRACKERS'][tracker].get('custom_screenshot_header', self.config['DEFAULT'].get('screenshot_header', None)) - except Exception: - screenheader = None - try: - # If tracker has description header specified in config, use that. Otherwise, check if custom description header default is used. - desc_header = self.config['TRACKERS'][tracker].get('custom_description_header', self.config['DEFAULT'].get('custom_description_header', desc_header)) - except Exception as e: - console.print(f"[yellow]Warning: Error setting custom description header: {str(e)}[/yellow]") - try: - # If screensPerRow is set, use that to determine how many screenshots should be on each row. Otherwise, use 2 as default - screensPerRow = int(self.config['DEFAULT'].get('screens_per_row', 2)) - except Exception: - screensPerRow = 2 - try: - # If custom signature set and isn't empty, use that instead of the signature parameter - custom_signature = self.config['TRACKERS'][tracker].get('custom_signature', signature) - if custom_signature != '': - signature = custom_signature - except Exception as e: - console.print(f"[yellow]Warning: Error setting custom signature: {str(e)}[/yellow]") - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: - if desc_header: - if not desc_header.endswith('\n'): - descfile.write(desc_header + '\n') - else: - descfile.write(desc_header) - await process_desc_language(meta, descfile, tracker) - add_logo_enabled = self.config["DEFAULT"].get("add_logo", False) - if add_logo_enabled and 'logo' in meta: - logo = meta['logo'] - logo_size = self.config["DEFAULT"].get("logo_size", 420) - if logo != "": - descfile.write(f"[center][img={logo_size}]{logo}[/img][/center]\n\n") - bluray_link = self.config['DEFAULT'].get("add_bluray_link", False) - if meta.get('is_disc') == "BDMV" and bluray_link and meta.get('release_url', ''): - descfile.write(f"[center]{meta['release_url']}[/center]\n") - covers = False - if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/covers.json"): - covers = True - if meta.get('is_disc') == "BDMV" and self.config['DEFAULT'].get('use_bluray_images', False) and covers: - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/covers.json", 'r', encoding='utf-8') as f: - cover_data = json.load(f) - if isinstance(cover_data, list): - descfile.write("[center]") - - for img_data in cover_data: - if 'raw_url' in img_data and 'web_url' in img_data: - web_url = img_data['web_url'] - raw_url = img_data['raw_url'] - descfile.write(f"[url={web_url}][img={cover_size}]{raw_url}[/img][/url]") - - descfile.write("[/center]\n\n") - season_name = meta.get('tvdb_season_name') if meta.get('tvdb_season_name') is not None and meta.get('tvdb_season_name') != "" else None - season_number = meta.get('tvdb_season_number') if meta.get('tvdb_season_number') is not None and meta.get('tvdb_season_number') != "" else None - episode_number = meta.get('tvdb_episode_number') if meta.get('tvdb_episode_number') is not None and meta.get('tvdb_episode_number') != "" else None - episode_title = meta.get('auto_episode_title') if meta.get('auto_episode_title') is not None and meta.get('auto_episode_title') != "" else None - if episode_title is None: - episode_title = meta.get('tvmaze_episode_data', {}).get('episode_name') if meta.get('tvmaze_episode_data', {}).get('episode_name') else None - if episode_overview and season_name and season_number and episode_number and episode_title: - if not tracker == "HUNO": - descfile.write("[center][pre]") - else: - descfile.write("[center]") - descfile.write(f"{season_name} - S{season_number}E{episode_number}: {episode_title}") - if not tracker == "HUNO": - descfile.write("[/pre][/center]\n\n") - else: - descfile.write("[/center]\n\n") - if episode_overview and meta.get('overview_meta') is not None and meta.get('overview_meta') != "": - episode_data = meta.get('overview_meta') - if not tracker == "HUNO": - descfile.write("[center][pre]") - else: - descfile.write("[center]") - descfile.write(episode_data) - if not tracker == "HUNO": - descfile.write("[/pre][/center]\n\n") - else: - descfile.write("[/center]\n\n") + console.print(f"[yellow]Warning: Could not download torrent file: {str(e)}[/yellow]") + console.print("[yellow]Download manually from the tracker.[/yellow]") + return None - try: - if meta.get('tonemapped', False) and self.config['DEFAULT'].get('tonemapped_header', None): - descfile.write(self.config['DEFAULT'].get('tonemapped_header')) - except Exception as e: - console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") - - bbcode = BBCODE() - discs = meta.get('discs', []) - filelist = meta.get('filelist', []) - desc = base - desc = re.sub(r'\[center\]\[spoiler=Scene NFO:\].*?\[/center\]', '', desc, flags=re.DOTALL) - if not tracker == "AITHER": - desc = re.sub(r'\[center\]\[spoiler=FraMeSToR NFO:\].*?\[/center\]', '', desc, flags=re.DOTALL) + # used to add tracker url, comment and source flag to torrent file + async def add_tracker_torrent(self, meta, tracker, source_flag, new_tracker, comment, headers=None, params=None, downurl=None, hash_is_id=False): + path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent" + if downurl is not None: + await self.download_tracker_torrent(meta, tracker, headers=headers, params=params, downurl=downurl, hash_is_id=hash_is_id) + + if await self.path_exists(path): + loop = asyncio.get_running_loop() + new_torrent = await loop.run_in_executor(None, Torrent.read, path) + if isinstance(new_tracker, list): + new_torrent.metainfo['announce'] = new_tracker[0] + new_torrent.metainfo['announce-list'] = [new_tracker] else: - if "framestor" in meta and meta['framestor']: - desc = re.sub(r'\[center\]\[spoiler=FraMeSToR NFO:\]', '', desc, count=1) - desc = re.sub(r'\[/spoiler\]\[/center\]', '', desc, count=1) - desc = desc.replace("https://i.imgur.com/e9o0zpQ.png", "https://beyondhd.co/images/2017/11/30/c5802892418ee2046efba17166f0cad9.png") - images = [] - desc = bbcode.convert_pre_to_code(desc) - desc = bbcode.convert_hide_to_spoiler(desc) - desc = desc.replace("[user]", "").replace("[/user]", "") - desc = desc.replace("[hr]", "").replace("[/hr]", "") - desc = desc.replace("[ul]", "").replace("[/ul]", "") - desc = desc.replace("[ol]", "").replace("[/ol]", "") - if comparison is False: - desc = bbcode.convert_comparison_to_collapse(desc, 1000) - desc = desc.replace('[img]', '[img=300]') - descfile.write(desc) - # Handle single disc case - if len(discs) == 1: - each = discs[0] - if each['type'] == "DVD": - descfile.write("[center]") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]\n\n") - descfile.write("[/center]") - if screenheader is not None: - descfile.write(screenheader + '\n') - descfile.write("[center]") - for img_index in range(len(images[:int(meta['screens'])])): - web_url = images[img_index]['web_url'] - raw_url = images[img_index]['raw_url'] - descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] ") - - # If screensPerRow is set and we have reached that number of screenshots, add a new line - if screensPerRow and (img_index + 1) % screensPerRow == 0: - descfile.write("\n") - descfile.write("[/center]") - if each['type'] == "BDMV": - bdinfo_keys = [key for key in each if key.startswith("bdinfo")] - if len(bdinfo_keys) > 1: - if 'retry_count' not in meta: - meta['retry_count'] = 0 - - for i, key in enumerate(bdinfo_keys[1:], start=1): # Skip the first bdinfo - new_images_key = f'new_images_playlist_{i}' - bdinfo = each[key] - edition = bdinfo.get("edition", "Unknown Edition") - - # Find the corresponding summary for this bdinfo - summary_key = f"summary_{i}" if i > 0 else "summary" - summary = each.get(summary_key, "No summary available") - - # Check for saved images first - if pack_images_data and 'keys' in pack_images_data and new_images_key in pack_images_data['keys']: - saved_images = pack_images_data['keys'][new_images_key]['images'] - if saved_images: - if meta['debug']: - console.print(f"[yellow]Using saved images from pack_image_links.json for {new_images_key}") - - meta[new_images_key] = [] - for img in saved_images: - meta[new_images_key].append({ - 'img_url': img.get('img_url', ''), - 'raw_url': img.get('raw_url', ''), - 'web_url': img.get('web_url', '') - }) - - if new_images_key in meta and meta[new_images_key]: - descfile.write("[center]\n\n") - # Use the summary corresponding to the current bdinfo - descfile.write(f"[spoiler={edition}][code]{summary}[/code][/spoiler]\n\n") - if meta['debug']: - console.print("[yellow]Using original uploaded images for first disc") - descfile.write("[center]") - for img in meta[new_images_key]: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " - descfile.write(image_str) - descfile.write("[/center]\n ") - else: - descfile.write("[center]\n\n") - # Use the summary corresponding to the current bdinfo - descfile.write(f"[spoiler={edition}][code]{summary}[/code][/spoiler]\n\n") - descfile.write("[/center]\n\n") - meta['retry_count'] += 1 - meta[new_images_key] = [] - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"PLAYLIST_{i}-*.png") - if not new_screens: - use_vs = meta.get('vapoursynth', False) - try: - await disc_screenshots(meta, f"PLAYLIST_{i}", bdinfo, meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True) - except Exception as e: - print(f"Error during BDMV screenshot capture: {e}") - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"PLAYLIST_{i}-*.png") - if new_screens and not meta.get('skip_imghost_upload', False): - uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}) - if uploaded_images and not meta.get('skip_imghost_upload', False): - await self.save_image_links(meta, new_images_key, uploaded_images) - for img in uploaded_images: - meta[new_images_key].append({ - 'img_url': img['img_url'], - 'raw_url': img['raw_url'], - 'web_url': img['web_url'] - }) - - descfile.write("[center]") - for img in uploaded_images: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " - descfile.write(image_str) - descfile.write("[/center]\n") - - meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" - with open(meta_filename, 'w') as f: - json.dump(meta, f, indent=4) - - # Handle multiple discs case - elif len(discs) > 1: - # Initialize retry_count if not already set - if 'retry_count' not in meta: - meta['retry_count'] = 0 - - total_discs_to_process = min(len(discs), process_limit) - processed_count = 0 - if multi_screens != 0: - console.print("[cyan]Processing screenshots for packed content (multiScreens)[/cyan]") - console.print(f"[cyan]{total_discs_to_process} files (processLimit)[/cyan]") - - for i, each in enumerate(discs): - # Set a unique key per disc for managing images - new_images_key = f'new_images_disc_{i}' - - if i == 0: - descfile.write("[center]") - if each['type'] == "BDMV": - descfile.write(f"{each.get('name', 'BDINFO')}\n\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]") - descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") - # For the first disc, use images from `meta['image_list']` and add screenheader if applicable - if meta['debug']: - console.print("[yellow]Using original uploaded images for first disc") - if screenheader is not None: - descfile.write("[/center]\n\n") - descfile.write(screenheader + '\n') - descfile.write("[center]") - for img_index in range(len(images[:int(meta['screens'])])): - web_url = images[img_index]['web_url'] - raw_url = images[img_index]['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " - descfile.write(image_str) - - # If screensPerRow is set and we have reached that number of screenshots, add a new line - if screensPerRow and (img_index + 1) % screensPerRow == 0: - descfile.write("\n") - descfile.write("[/center]\n\n") - else: - if multi_screens != 0: - processed_count += 1 - disc_name = each.get('name', f"Disc {i}") - print(f"\rProcessing disc {processed_count}/{total_discs_to_process}: {disc_name[:40]}{'...' if len(disc_name) > 40 else ''}", end="", flush=True) - # Check if screenshots exist for the current disc key - # Check for saved images first - if pack_images_data and 'keys' in pack_images_data and new_images_key in pack_images_data['keys']: - saved_images = pack_images_data['keys'][new_images_key]['images'] - if saved_images: - if meta['debug']: - console.print(f"[yellow]Using saved images from pack_image_links.json for {new_images_key}") - - meta[new_images_key] = [] - for img in saved_images: - meta[new_images_key].append({ - 'img_url': img.get('img_url', ''), - 'raw_url': img.get('raw_url', ''), - 'web_url': img.get('web_url', '') - }) - if new_images_key in meta and meta[new_images_key]: - if meta['debug']: - console.print(f"[yellow]Found needed image URLs for {new_images_key}") - descfile.write("[center]") - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") - descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") - descfile.write("[/center]\n\n") - # Use existing URLs from meta to write to descfile - descfile.write("[center]") - for img in meta[new_images_key]: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" - descfile.write(image_str) - descfile.write("[/center]\n\n") - else: - # Increment retry_count for tracking but use unique disc keys for each disc - meta['retry_count'] += 1 - meta[new_images_key] = [] - descfile.write("[center]") - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") - descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") - descfile.write("[/center]\n\n") - # Check if new screenshots already exist before running prep.screenshots - if each['type'] == "BDMV": - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - elif each['type'] == "DVD": - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - if not new_screens: - if meta['debug']: - console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") - # Run prep.screenshots if no screenshots are present - if each['type'] == "BDMV": - use_vs = meta.get('vapoursynth', False) - try: - await disc_screenshots(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True) - except Exception as e: - print(f"Error during BDMV screenshot capture: {e}") - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - if each['type'] == "DVD": - try: - await dvd_screenshots(meta, i, multi_screens, True) - except Exception as e: - print(f"Error during DVD screenshot capture: {e}") - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - - if new_screens and not meta.get('skip_imghost_upload', False): - uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}) - if uploaded_images and not meta.get('skip_imghost_upload', False): - await self.save_image_links(meta, new_images_key, uploaded_images) - # Append each uploaded image's data to `meta[new_images_key]` - for img in uploaded_images: - meta[new_images_key].append({ - 'img_url': img['img_url'], - 'raw_url': img['raw_url'], - 'web_url': img['web_url'] - }) - - # Write new URLs to descfile - descfile.write("[center]") - for img in uploaded_images: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " - descfile.write(image_str) - descfile.write("[/center]\n") - - # Save the updated meta to `meta.json` after upload - meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" - with open(meta_filename, 'w') as f: - json.dump(meta, f, indent=4) - console.print() - - # Handle single file case - if len(filelist) == 1: - if meta.get('comparison') and meta.get('comparison_groups'): - descfile.write("[center]") - comparison_groups = meta.get('comparison_groups', {}) - sorted_group_indices = sorted(comparison_groups.keys(), key=lambda x: int(x)) - - comp_sources = [] - for group_idx in sorted_group_indices: - group_data = comparison_groups[group_idx] - group_name = group_data.get('name', f'Group {group_idx}') - comp_sources.append(group_name) - - sources_string = ", ".join(comp_sources) - descfile.write(f"[comparison={sources_string}]\n") - - images_per_group = min([ - len(comparison_groups[idx].get('urls', [])) - for idx in sorted_group_indices - ]) - - for img_idx in range(images_per_group): - for group_idx in sorted_group_indices: - group_data = comparison_groups[group_idx] - urls = group_data.get('urls', []) - if img_idx < len(urls): - img_url = urls[img_idx].get('raw_url', '') - if img_url: - descfile.write(f"{img_url}\n") - - descfile.write("[/comparison][/center]\n\n") - - if screenheader is not None: - descfile.write(screenheader + '\n') - descfile.write("[center]") - for img_index in range(len(images[:int(meta['screens'])])): - web_url = images[img_index]['web_url'] - raw_url = images[img_index]['raw_url'] - descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] ") - if screensPerRow and (img_index + 1) % screensPerRow == 0: - descfile.write("\n") - descfile.write("[/center]") - - # Handle multiple files case - # Initialize character counter - char_count = 0 - max_char_limit = char_limit # Character limit - other_files_spoiler_open = False # Track if "Other files" spoiler has been opened - total_files_to_process = min(len(filelist), process_limit) - processed_count = 0 - if multi_screens != 0 and total_files_to_process > 1: - console.print("[cyan]Processing screenshots for packed content (multiScreens)[/cyan]") - console.print(f"[cyan]{total_files_to_process} files (processLimit)[/cyan]") - - # First Pass: Create and Upload Images for Each File - for i, file in enumerate(filelist): - if i >= process_limit: - # console.print("[yellow]Skipping processing more files as they exceed the process limit.") - continue - if multi_screens != 0: - if total_files_to_process > 1: - processed_count += 1 - filename = os.path.basename(file) - print(f"\rProcessing file {processed_count}/{total_files_to_process}: {filename[:40]}{'...' if len(filename) > 40 else ''}", end="", flush=True) - if i > 0: - new_images_key = f'new_images_file_{i}' - # Check for saved images first - if pack_images_data and 'keys' in pack_images_data and new_images_key in pack_images_data['keys']: - saved_images = pack_images_data['keys'][new_images_key]['images'] - if saved_images: - if meta['debug']: - console.print(f"[yellow]Using saved images from pack_image_links.json for {new_images_key}") - - meta[new_images_key] = [] - for img in saved_images: - meta[new_images_key].append({ - 'img_url': img.get('img_url', ''), - 'raw_url': img.get('raw_url', ''), - 'web_url': img.get('web_url', '') - }) - if new_images_key not in meta or not meta[new_images_key]: - meta[new_images_key] = [] - # Proceed with image generation if not already present - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - - # If no screenshots exist, create them - if not new_screens: - if meta['debug']: - console.print(f"[yellow]No existing screenshots for {new_images_key}; generating new ones.") - try: - await screenshots(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None) - except Exception as e: - print(f"Error during generic screenshot capture: {e}") - - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - - # Upload generated screenshots - if new_screens and not meta.get('skip_imghost_upload', False): - uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}) - if uploaded_images and not meta.get('skip_imghost_upload', False): - await self.save_image_links(meta, new_images_key, uploaded_images) - for img in uploaded_images: - meta[new_images_key].append({ - 'img_url': img['img_url'], - 'raw_url': img['raw_url'], - 'web_url': img['web_url'] - }) - - # Save updated meta - meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" - with open(meta_filename, 'w') as f: - json.dump(meta, f, indent=4) + new_torrent.metainfo['announce'] = new_tracker + new_torrent.metainfo['info']['source'] = source_flag - # Second Pass: Process MediaInfo and Write Descriptions - if len(filelist) > 1: - for i, file in enumerate(filelist): - if i >= process_limit: - continue - # Extract filename directly from the file path - filename = os.path.splitext(os.path.basename(file.strip()))[0].replace('[', '').replace(']', '') - - # If we are beyond the file limit, add all further files in a spoiler - if multi_screens != 0: - if i >= file_limit: - if not other_files_spoiler_open: - descfile.write("[center][spoiler=Other files]\n") - char_count += len("[center][spoiler=Other files]\n") - other_files_spoiler_open = True - - # Write filename in BBCode format with MediaInfo in spoiler if not the first file - if multi_screens != 0: - if i > 0 and char_count < max_char_limit: - mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) - formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) - descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") - char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") - else: - # If there are screen shots and screen shot header, write the header above the first filename - if i == 0 and images and screenheader is not None: - descfile.write(screenheader + '\n') - char_count += len(screenheader + '\n') - descfile.write(f"[center]{filename}\n[/center]\n") - char_count += len(f"[center]{filename}\n[/center]\n") - - # Write images if they exist - new_images_key = f'new_images_file_{i}' - if i == 0: # For the first file, use 'image_list' key and add screenheader if applicable - if images: - descfile.write("[center]") - char_count += len("[center]") - for img_index in range(len(images)): - web_url = images[img_index]['web_url'] - raw_url = images[img_index]['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " - descfile.write(image_str) - char_count += len(image_str) - - # If screensPerRow is set and we have reached that number of screenshots, add a new line - if screensPerRow and (img_index + 1) % screensPerRow == 0: - descfile.write("\n") - descfile.write("[/center]\n\n") - char_count += len("[/center]\n\n") - elif multi_screens != 0: - if new_images_key in meta and meta[new_images_key]: - descfile.write("[center]") - char_count += len("[center]") - for img in meta[new_images_key]: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url] " - descfile.write(image_str) - char_count += len(image_str) - descfile.write("[/center]\n") - char_count += len("[/center]\n\n") - - if other_files_spoiler_open: - descfile.write("[/spoiler][/center]\n") - char_count += len("[/spoiler][/center]\n") - - if char_count >= 1 and meta['debug']: - console.print(f"[yellow]Total characters written to description: {char_count}") - if total_files_to_process > 1: - console.print() - - # Append signature if provided - if signature: - descfile.write(signature) - descfile.close() - return + # Calculate hash + torrent_hash = None + if hash_is_id: + info_bytes = bencodepy.encode(new_torrent.metainfo['info']) + torrent_hash = hashlib.sha1(info_bytes).hexdigest() + + new_torrent.metainfo['comment'] = comment + torrent_hash if hash_is_id else comment + + await loop.run_in_executor(None, lambda: Torrent.copy(new_torrent).write(path, overwrite=True)) + + return torrent_hash + + return None + + async def get_torrent_hash(self, meta, tracker): + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}].torrent" + async with aiofiles.open(torrent_path, 'rb') as torrent_file: + torrent_content = await torrent_file.read() + torrent_data = bencodepy.decode(torrent_content) + info = bencodepy.encode(torrent_data[b'info']) + info_hash = hashlib.sha1(info).hexdigest() + return info_hash async def save_image_links(self, meta, image_key, image_list=None): if image_list is None: @@ -917,10 +429,10 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N # Extract data from the attributes category = attributes.get('category') description = attributes.get('description') - tmdb = attributes.get('tmdb_id') - tvdb = attributes.get('tvdb_id') - mal = attributes.get('mal_id') - imdb = attributes.get('imdb_id') + tmdb = int(attributes.get('tmdb_id') or 0) + tvdb = int(attributes.get('tvdb_id') or 0) + mal = int(attributes.get('mal_id') or 0) + imdb = int(attributes.get('imdb_id') or 0) infohash = attributes.get('info_hash') tmdb = 0 if tmdb == 0 else tmdb tvdb = 0 if tvdb == 0 else tvdb @@ -944,10 +456,10 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N # Extract data from the attributes category = attributes.get('category') description = attributes.get('description') - tmdb = attributes.get('tmdb_id') - tvdb = attributes.get('tvdb_id') - mal = attributes.get('mal_id') - imdb = attributes.get('imdb_id') + tmdb = int(attributes.get('tmdb_id') or 0) + tvdb = int(attributes.get('tvdb_id') or 0) + mal = int(attributes.get('mal_id') or 0) + imdb = int(attributes.get('imdb_id') or 0) infohash = attributes.get('info_hash') tmdb = 0 if tmdb == 0 else tmdb tvdb = 0 if tvdb == 0 else tvdb @@ -974,13 +486,6 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N if meta.get('debug'): console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) - if imdb != 0: - imdb_str = str(f'tt{imdb}').zfill(7) - else: - imdb_str = None - - console.print(f"[green]Valid IDs found from {tracker}: TMDb: {tmdb}, IMDb: {imdb_str}, TVDb: {tvdb}, MAL: {mal}[/green]") - if tmdb or imdb or tvdb: if not id: # Only prompt the user for ID selection if not searching by ID @@ -999,8 +504,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N console.print(f"Extracted description: {description}", markup=False) if meta.get('unattended') or (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('huno') or meta.get('ulcx')): - meta['description'] = description - meta['saved_description'] = True + return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name else: console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is:") @@ -1009,16 +513,11 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N edited_description = click.edit(description) if edited_description: description = edited_description.strip() - meta['description'] = description - meta['saved_description'] = True elif edit_choice.lower() == 'd': description = None - imagelist = [] console.print("[yellow]Description discarded.[/yellow]") else: console.print("[green]Keeping the original description.[/green]") - meta['description'] = description - meta['saved_description'] = True if not meta.get('keep_images'): imagelist = [] else: @@ -1347,3 +846,97 @@ def format_bbcode(self, parsed_mediainfo): bbcode_output += "\n" return bbcode_output + + async def get_bdmv_mediainfo(self, meta, remove=None): + mediainfo = '' + mi_path = f'{meta["base_dir"]}/tmp/{meta["uuid"]}/MEDIAINFO_CLEANPATH.txt' + + if meta.get('is_disc') == 'BDMV': + if not os.path.isfile(mi_path): + if meta['debug']: + console.print("[blue]Generating MediaInfo for BDMV...[/blue]") + path = meta['discs'][0]['playlists'][0]['path'] + await exportInfo( + path, + False, + meta['uuid'], + meta['base_dir'], + export_text=True, + is_dvd=False, + debug=meta.get('debug', False) + ) + + else: + async with aiofiles.open(mi_path, 'r', encoding='utf-8') as f: + lines = await f.readlines() + + if remove: + if not isinstance(remove, list): + lines_to_remove = [remove] + else: + lines_to_remove = remove + + lines = [ + line for line in lines + if not any(line.strip().startswith(prefix) for prefix in lines_to_remove) + ] + + mediainfo = ''.join(lines) if remove else lines + + return mediainfo + + async def check_language_requirements( + self, + meta, + tracker, + languages_to_check, + check_audio=False, + check_subtitle=False, + require_both=False, + ): + """Check if the given media meets language requirements.""" + try: + if not meta.get("language_checked", False): + await process_desc_language(meta, desc=None, tracker=tracker) + + languages_to_check = [lang.lower() for lang in languages_to_check] + audio_languages = [lang.lower() for lang in meta.get("audio_languages", [])] + subtitle_languages = [lang.lower() for lang in meta.get("subtitle_languages", [])] + + audio_ok = ( + not check_audio + or any(lang in audio_languages for lang in languages_to_check) + ) + subtitle_ok = ( + not check_subtitle + or any(lang in subtitle_languages for lang in languages_to_check) + ) + + if require_both: + if not (audio_ok and subtitle_ok): + console.print( + f"[red]Language requirement not met for [bold]{tracker}[/bold].[/red]\n" + f"[yellow]Required both audio and subtitles in one of the following:[/yellow] " + f"{', '.join(languages_to_check)}\n" + f"[cyan]Found Audio:[/cyan] {', '.join(audio_languages) or 'None'}\n" + f"[cyan]Found Subtitles:[/cyan] {', '.join(subtitle_languages) or 'None'}" + ) + else: + if not (audio_ok or subtitle_ok): + console.print( + f"[red]Language requirement not met for [bold]{tracker}[/bold].[/red]\n" + f"[yellow]Required at least one of the following:[/yellow] " + f"{', '.join(languages_to_check)}\n" + f"[cyan]Found Audio:[/cyan] {', '.join(audio_languages) or 'None'}\n" + f"[cyan]Found Subtitles:[/cyan] {', '.join(subtitle_languages) or 'None'}" + ) + + if require_both: + return audio_ok and subtitle_ok + else: + return audio_ok or subtitle_ok + + except Exception as e: + console.print_exception() + console.print(f"[red]Error checking language requirements: {e}[/red]") + return False diff --git a/src/trackers/CZ.py b/src/trackers/CZ.py new file mode 100644 index 000000000..96b83dcea --- /dev/null +++ b/src/trackers/CZ.py @@ -0,0 +1,129 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +from datetime import datetime +from src.trackers.COMMON import COMMON +from src.trackers.AVISTAZ_NETWORK import AZTrackerBase + + +class CZ(AZTrackerBase): + def __init__(self, config): + super().__init__(config, tracker_name='CZ') + self.config = config + self.common = COMMON(config) + self.tracker = 'CZ' + self.source_flag = 'CinemaZ' + self.banned_groups = [''] + self.base_url = 'https://cinemaz.to/' + self.torrent_url = f'{self.base_url}/torrent/' + self.requests_url = f'{self.base_url}/requests' + + async def rules(self, meta): + warnings = [] + + # This also checks the rule 'FANRES content is not allowed' + if meta['category'] not in ('MOVIE', 'TV'): + warnings.append( + 'The only allowed content to be uploaded are Movies and TV Shows.\n' + 'Anything else, like games, music, software and porn is not allowed!' + ) + + if meta.get('anime', False): + warnings.append("Upload Anime content to our sister site AnimeTorrents.me instead. If it's on AniDB, it's an anime.") + + # https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes + + africa = [ + 'AO', 'BF', 'BI', 'BJ', 'BW', 'CD', 'CF', 'CG', 'CI', 'CM', 'CV', 'DJ', 'DZ', 'EG', 'EH', + 'ER', 'ET', 'GA', 'GH', 'GM', 'GN', 'GQ', 'GW', 'IO', 'KE', 'KM', 'LR', 'LS', 'LY', 'MA', + 'MG', 'ML', 'MR', 'MU', 'MW', 'MZ', 'NA', 'NE', 'NG', 'RE', 'RW', 'SC', 'SD', 'SH', 'SL', + 'SN', 'SO', 'SS', 'ST', 'SZ', 'TD', 'TF', 'TG', 'TN', 'TZ', 'UG', 'YT', 'ZA', 'ZM', 'ZW' + ] + + america = [ + 'AG', 'AI', 'AR', 'AW', 'BB', 'BL', 'BM', 'BO', 'BQ', 'BR', 'BS', 'BV', 'BZ', 'CA', 'CL', + 'CO', 'CR', 'CU', 'CW', 'DM', 'DO', 'EC', 'FK', 'GD', 'GF', 'GL', 'GP', 'GS', 'GT', 'GY', + 'HN', 'HT', 'JM', 'KN', 'KY', 'LC', 'MF', 'MQ', 'MS', 'MX', 'NI', 'PA', 'PE', 'PM', 'PR', + 'PY', 'SR', 'SV', 'SX', 'TC', 'TT', 'US', 'UY', 'VC', 'VE', 'VG', 'VI' + ] + + europe = [ + 'AD', 'AL', 'AT', 'AX', 'BA', 'BE', 'BG', 'BY', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', + 'FO', 'FR', 'GB', 'GG', 'GI', 'GR', 'HR', 'HU', 'IE', 'IM', 'IS', 'IT', 'JE', 'LI', 'LT', + 'LU', 'LV', 'MC', 'MD', 'ME', 'MK', 'MT', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'RU', 'SE', + 'SI', 'SJ', 'SK', 'SM', 'SU', 'UA', 'VA', 'XC' + ] + + # Countries that belong on PrivateHD (unless they are old) + phd_countries = [ + 'AG', 'AI', 'AU', 'BB', 'BM', 'BS', 'BZ', 'CA', 'CW', 'DM', 'GB', 'GD', 'IE', + 'JM', 'KN', 'KY', 'LC', 'MS', 'NZ', 'PR', 'TC', 'TT', 'US', 'VC', 'VG', 'VI', + ] + + # Countries that belong on AvistaZ + az_countries = [ + 'BD', 'BN', 'BT', 'CN', 'HK', 'ID', 'IN', 'JP', 'KH', 'KP', 'KR', 'LA', 'LK', + 'MM', 'MN', 'MO', 'MY', 'NP', 'PH', 'PK', 'SG', 'TH', 'TL', 'TW', 'VN' + ] + + # Countries normally allowed on CinemaZ + set_phd = set(phd_countries) + set_europe = set(europe) + set_america = set(america) + middle_east = [ + 'AE', 'BH', 'CY', 'EG', 'IR', 'IQ', 'IL', 'JO', 'KW', 'LB', 'OM', 'PS', 'QA', 'SA', 'SY', 'TR', 'YE' + ] + + # Combine all allowed regions for CinemaZ + cz_allowed_countries = list( + (set_europe - {'GB', 'IE'}) | # Europe excluding UK and Ireland + (set_america - set_phd) | # All of America excluding the PHD countries + set(africa) | # All of Africa + set(middle_east) | # Middle East countries + {'RU'} # Russia + ) + + origin_countries_codes = meta.get('origin_country', []) + year = meta.get('year') + is_older_than_50_years = False + + if isinstance(year, int): + current_year = datetime.now().year + if (current_year - year) >= 50: + is_older_than_50_years = True + + # Case 1: The content is from a major English-speaking country + if any(code in phd_countries for code in origin_countries_codes): + if is_older_than_50_years: + # It's old, so it's ALLOWED on CinemaZ + pass + else: + # It's new, so redirect to PrivateHD + warnings.append( + 'DO NOT upload recent mainstream English content. ' + 'Upload this to our sister site PrivateHD.to instead.' + ) + + # Case 2: The content is Asian, redirect to AvistaZ + elif any(code in az_countries for code in origin_countries_codes): + warnings.append( + 'DO NOT upload Asian content. ' + 'Upload this to our sister site AvistaZ.to instead.' + ) + + # Case 3: The content is from one of the normally allowed CZ regions + elif any(code in cz_allowed_countries for code in origin_countries_codes): + # It's from a valid region, so it's ALLOWED on CinemaZ + pass + + # Case 4: Fallback for any other case (e.g., country not in any list) + else: + warnings.append( + 'This content is not allowed. CinemaZ accepts content from Europe (excluding UK/IE), ' + 'Africa, the Middle East, Russia, and the Americas (excluding recent mainstream English content).' + ) + + if warnings: + all_warnings = '\n\n'.join(filter(None, warnings)) + return all_warnings + + return diff --git a/src/trackers/DC.py b/src/trackers/DC.py index c4e44187a..565d930f1 100644 --- a/src/trackers/DC.py +++ b/src/trackers/DC.py @@ -1,102 +1,126 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- +import aiofiles +import httpx import os -import re -import requests -from src.exceptions import UploadException from src.console import console +from src.get_desc import DescriptionBuilder from src.rehostimages import check_hosts -from .COMMON import COMMON +from src.trackers.COMMON import COMMON -class DC(COMMON): +class DC: def __init__(self, config): - super().__init__(config) + self.config = config + self.common = COMMON(config) self.tracker = 'DC' - self.source_flag = 'DigitalCore.club' - self.base_url = "https://digitalcore.club/" - self.torrent_url = f"{self.base_url}/torrent/" - self.api_base_url = f"{self.base_url}/api/v1" - self.banned_groups = [""] - - self.session = requests.Session() - self.session.headers.update({'User-Agent': 'Mozilla/5.0'}) - self.api_key = self.config['TRACKERS'][self.tracker].get('announce_url').replace('https://digitalcore.club/tracker.php/', '').replace('/announce', '') - self.username = self.config['TRACKERS'][self.tracker].get('username') - self.password = self.config['TRACKERS'][self.tracker].get('password') - self.auth_cookies = None - self.signature = "[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://digitalcore.club/' + self.api_base_url = f'{self.base_url}/api/v1/torrents' + self.torrent_url = f'{self.base_url}/torrent/' + self.banned_groups = [''] + self.api_key = self.config['TRACKERS'][self.tracker].get('api_key') + self.session = httpx.AsyncClient(headers={ + 'X-API-KEY': self.api_key + }, timeout=30.0) + + async def mediainfo(self, meta): + if meta.get('is_disc') == 'BDMV': + mediainfo = await self.common.get_bdmv_mediainfo(meta, remove=['File size', 'Overall bit rate']) + else: + mi_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" + with open(mi_path, 'r', encoding='utf-8') as f: + mediainfo = f.read() - async def generate_description(self, meta): - base_desc = f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt" - dc_desc = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + return mediainfo + async def generate_description(self, meta): + builder = DescriptionBuilder(self.config) desc_parts = [] - # BDInfo - tech_info = "" - if meta.get('is_disc') == 'BDMV': - bd_summary_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" - if os.path.exists(bd_summary_file): - with open(bd_summary_file, 'r', encoding='utf-8') as f: - tech_info = f.read() + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) - if tech_info: - desc_parts.append(f"{tech_info}\n") + # TV + title, episode_image, episode_overview = await builder.get_tv_info(meta, self.tracker) + if episode_overview: + desc_parts.append(f'[center]{title}[/center]') + desc_parts.append(f'[center]{episode_overview}[/center]') - if os.path.exists(base_desc): - with open(base_desc, 'r', encoding='utf-8') as f: - manual_desc = f.read() - desc_parts.append(manual_desc) + # File information + desc_parts.append(await builder.get_bdinfo_section(meta)) + + # NFO + if meta.get('description_nfo_content', ''): + desc_parts.append(f"[nfo]{meta.get('description_nfo_content')}[/nfo]") + + # User description + desc_parts.append(await builder.get_user_description(meta)) # Screenshots - if f'{self.tracker}_images_key' in meta: - images = meta[f'{self.tracker}_images_key'] + all_images = [] + + menu_images = meta.get("menu_images", []) + if menu_images: + all_images.extend(menu_images) + + if f"{self.tracker}_images_key" in meta: + images = meta.get(f"{self.tracker}_images_key") else: - images = meta['image_list'] + images = meta.get("image_list") if images: - screenshots_block = "[center][b]Screenshots[/b]\n\n" - for image in images: - img_url = image['img_url'] - web_url = image['web_url'] - screenshots_block += f"[url={web_url}][img]{img_url}[/img][/url] " - screenshots_block += "[/center]" - desc_parts.append(screenshots_block) - - if self.signature: - desc_parts.append(self.signature) - - final_description = "\n".join(filter(None, desc_parts)) + all_images.extend(images) + + if all_images: + screenshots_block = "" + for image in all_images: + web_url = image.get("web_url") + raw_url = image.get("raw_url") + if web_url and raw_url: + screenshots_block += f"[url={web_url}][img=350]{raw_url}[/img][/url] " + if screenshots_block: + desc_parts.append(f"[center]{screenshots_block}[/center]") + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Signature + desc_parts.append(f"[center][url=https://github.com/Audionut/Upload-Assistant]{meta['ua_signature']}[/url][/center]") + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + from src.bbcode import BBCODE bbcode = BBCODE() - desc = final_description - desc = desc.replace("[user]", "").replace("[/user]", "") - desc = desc.replace("[align=left]", "").replace("[/align]", "") - desc = desc.replace("[right]", "").replace("[/right]", "") - desc = desc.replace("[align=right]", "").replace("[/align]", "") - desc = desc.replace("[sup]", "").replace("[/sup]", "") - desc = desc.replace("[sub]", "").replace("[/sub]", "") - desc = desc.replace("[alert]", "").replace("[/alert]", "") - desc = desc.replace("[note]", "").replace("[/note]", "") - desc = desc.replace("[hr]", "").replace("[/hr]", "") - desc = desc.replace("[h1]", "[u][b]").replace("[/h1]", "[/b][/u]") - desc = desc.replace("[h2]", "[u][b]").replace("[/h2]", "[/b][/u]") - desc = desc.replace("[h3]", "[u][b]").replace("[/h3]", "[/b][/u]") - desc = desc.replace("[ul]", "").replace("[/ul]", "") - desc = desc.replace("[ol]", "").replace("[/ol]", "") - desc = re.sub(r"\[center\]\[spoiler=.*? NFO:\]\[code\](.*?)\[/code\]\[/spoiler\]\[/center\]", r"[nfo]\1[/nfo]", desc, flags=re.DOTALL) - desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) - desc = re.sub(r"(\[spoiler=[^]]+])", "[spoiler]", desc, flags=re.IGNORECASE) - desc = bbcode.convert_comparison_to_centered(desc, 1000) - - with open(dc_desc, 'w', encoding='utf-8') as f: - f.write(desc) + description = description.replace('[user]', '').replace('[/user]', '') + description = description.replace('[align=left]', '').replace('[/align]', '') + description = description.replace('[right]', '').replace('[/right]', '') + description = description.replace('[align=right]', '').replace('[/align]', '') + description = bbcode.remove_sup(description) + description = bbcode.remove_sub(description) + description = description.replace('[alert]', '').replace('[/alert]', '') + description = description.replace('[note]', '').replace('[/note]', '') + description = description.replace('[hr]', '').replace('[/hr]', '') + description = description.replace('[h1]', '[u][b]').replace('[/h1]', '[/b][/u]') + description = description.replace('[h2]', '[u][b]').replace('[/h2]', '[/b][/u]') + description = description.replace('[h3]', '[u][b]').replace('[/h3]', '[/b][/u]') + description = description.replace('[ul]', '').replace('[/ul]', '') + description = description.replace('[ol]', '').replace('[/ol]', '') + description = description.replace('[*] ', '• ').replace('[*]', '• ') + description = bbcode.convert_named_spoiler_to_normal_spoiler(description) + description = bbcode.convert_comparison_to_centered(description, 1000) + description = description.strip() + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description async def get_category_id(self, meta): - resolution = meta.get('resolution') - category = meta.get('category') - is_disc = meta.get('is_disc') - tv_pack = meta.get('tv_pack') - sd = meta.get('sd') + resolution = meta.get('resolution', '') + category = meta.get('category', '') + is_disc = meta.get('is_disc', '') + tv_pack = meta.get('tv_pack', '') + sd = meta.get('sd', '') if is_disc == 'BDMV': if resolution == '1080p' and category == 'MOVIE': @@ -125,163 +149,139 @@ async def get_category_id(self, meta): return category_map[category].get(resolution) return None - async def login(self): - if self.auth_cookies: - return True - if not all([self.username, self.password, self.api_key]): - console.print(f"[bold red]Username, password, or api_key for {self.tracker} is not configured.[/bold red]") - return False - - login_url = f"{self.api_base_url}/auth" - auth_params = {'username': self.username, 'password': self.password, 'captcha': self.api_key} - - try: - response = self.session.get(login_url, params=auth_params, timeout=10) - - if response.status_code == 200 and response.cookies: - self.auth_cookies = response.cookies - return True - else: - console.print(f"[bold red]Failed to authenticate or no cookies received. Status: {response.status_code}[/bold red]") - self.auth_cookies = None - return False - except requests.exceptions.RequestException as e: - console.print(f"[bold red]Error during {self.tracker} authentication: {e}[/bold red]") - self.auth_cookies = None - return False - async def search_existing(self, meta, disctype): - if not self.auth_cookies: - if not await self.login(): - console.print(f"[bold red]Search failed on {self.tracker} because login failed.[/bold red]") - return [] - imdb_id = meta.get('imdb_info', {}).get('imdbID') if not imdb_id: - console.print(f"[bold yellow]Cannot perform search on {self.tracker}: IMDb ID not found in metadata.[/bold yellow]") + console.print(f'[bold yellow]Cannot perform search on {self.tracker}: IMDb ID not found in metadata.[/bold yellow]') return [] - search_url = f"{self.api_base_url}/torrents" search_params = {'searchText': imdb_id} - + search_results = [] + dupes = [] try: - response = self.session.get(search_url, params=search_params, cookies=self.auth_cookies, timeout=15) + response = await self.session.get(self.api_base_url, params=search_params, headers=self.session.headers, timeout=15) response.raise_for_status() if response.text and response.text != '[]': - results = response.json() - if results and isinstance(results, list): - return results + search_results = response.json() + if search_results and isinstance(search_results, list): + for each in search_results: + name = each.get('name') + torrent_id = each.get('id') + size = each.get('size') + torrent_link = f'{self.torrent_url}{torrent_id}/' if torrent_id else None + dupe_entry = { + 'name': name, + 'size': size, + 'link': torrent_link + } + dupes.append(dupe_entry) + + return dupes except Exception as e: - console.print(f"[bold red]Error searching for IMDb ID '{imdb_id}' on {self.tracker}: {e}[/bold red]") + console.print(f'[bold red]Error searching for IMDb ID {imdb_id} on {self.tracker}: {e}[/bold red]') return [] - async def upload(self, meta, disctype): - await self.edit_torrent(meta, self.tracker, self.source_flag) - approved_image_hosts = ['imgbox', 'imgbb', "bhd", "imgur", "postimg", "digitalcore"] + async def edit_name(self, meta): + """ + Edits the name according to DC's naming conventions. + Scene uploads should use the scene name. + Scene uploads should also have "[UNRAR]" in the name, as the UA only uploads unzipped files, which are considered "altered". + https://digitalcore.club/forum/17/topic/1051/uploading-for-beginners + """ + if meta.get("scene_name", ""): + dc_name = f"{meta.get('scene_name')} [UNRAR]" + else: + dc_name = meta["uuid"] + base, ext = os.path.splitext(dc_name) + if ext.lower() in {".mkv", ".mp4", ".avi", ".ts"}: + dc_name = base + + return dc_name + + async def check_image_hosts(self, meta): + approved_image_hosts = ['imgbox', 'imgbb', 'bhd', 'imgur', 'postimg', 'digitalcore'] url_host_mapping = { - "ibb.co": "imgbb", - "imgbox.com": "imgbox", - "beyondhd.co": "bhd", - "imgur.com": "imgur", - "postimg.cc": "postimg", - "digitalcore.club": "digitalcore" + 'ibb.co': 'imgbb', + 'imgbox.com': 'imgbox', + 'beyondhd.co': 'bhd', + 'imgur.com': 'imgur', + 'postimg.cc': 'postimg', + 'digitalcore.club': 'digitalcore' } - await check_hosts(meta, self.tracker, url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=approved_image_hosts) + return - cat_id = await self.get_category_id(meta) - - await self.generate_description(meta) - - description_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - with open(description_path, 'r', encoding='utf-8') as f: - description = f.read() - - imdb = meta.get('imdb_info', {}).get('imdbID', '') - - mi_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/{'BD_SUMMARY_00.txt' if meta.get('is_disc') == 'BDMV' else 'MEDIAINFO.txt'}" - with open(mi_path, 'r', encoding='utf-8') as f: - mediainfo_dump = f.read() - - is_anonymous = "1" if meta['anon'] != 0 or self.config['TRACKERS'][self.tracker].get('anon', False) else "0" + async def fetch_data(self, meta): + anon = '1' if meta['anon'] or self.config['TRACKERS'][self.tracker].get('anon', False) else '0' data = { - 'category': cat_id, - 'imdbId': imdb, - 'nfo': description, - 'mediainfo': mediainfo_dump, - 'reqid': "0", - 'section': "new", - 'frileech': "1", - 'anonymousUpload': is_anonymous, - 'p2p': "0" + 'category': await self.get_category_id(meta), + 'imdbId': meta.get('imdb_info', {}).get('imdbID', ''), + 'nfo': await self.generate_description(meta), + 'mediainfo': await self.mediainfo(meta), + 'reqid': '0', + 'section': 'new', + 'frileech': '1', + 'anonymousUpload': anon, + 'p2p': '0', + 'unrar': '1', } - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + return data - try: - is_scene = bool(meta.get('scene_name')) - base_name = meta['scene_name'] if is_scene else meta['uuid'] - - existing_torrents = await self.search_existing(meta, disctype) - needs_unrar_tag = False + async def upload(self, meta, disctype): + data = await self.fetch_data(meta) + torrent_title = await self.edit_name(meta) + status_message = '' + response = None - if existing_torrents: - current_release_identifiers = {meta['uuid']} - if is_scene: - current_release_identifiers.add(meta['scene_name']) + if not meta.get('debug', False): + try: + upload_url = f'{self.api_base_url}/upload' + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" - relevant_torrents = [ - t for t in existing_torrents - if t.get('name') in current_release_identifiers - ] + with open(torrent_path, 'rb') as torrent_file: + files = {'file': (torrent_title + '.torrent', torrent_file, 'application/x-bittorrent')} - if relevant_torrents: - unrar_version_exists = any(t.get('unrar', 0) != 0 for t in relevant_torrents) + response = await self.session.post(upload_url, data=data, files=files, headers=self.session.headers, timeout=90) + response.raise_for_status() + response_data = response.json() + + if response.status_code == 200 and response_data.get('id'): + torrent_id = str(response_data['id']) + meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id + '/' + status_message = response_data.get('message') + + await self.common.add_tracker_torrent( + meta, + tracker=self.tracker, + source_flag=None, + new_tracker=None, + comment=None, + headers=self.session.headers, + downurl=f'{self.api_base_url}/download/{torrent_id}' + ) - if unrar_version_exists: - raise UploadException("An UNRAR duplicate of this specific release already exists on site.") else: - console.print(f"[bold yellow]Found a RAR version of this release on {self.tracker}. Appending [UNRAR] to filename.[/bold yellow]") - needs_unrar_tag = True - - if needs_unrar_tag: - upload_base_name = meta['scene_name'] if is_scene else meta['uuid'] - upload_filename = f"{upload_base_name} [UNRAR].torrent" - else: - upload_filename = f"{base_name}.torrent" + status_message = f"data error: {response_data.get('message', 'Unknown API error.')}" + + except httpx.HTTPStatusError as e: + status_message = f'data error: HTTP {e.response.status_code} - {e.response.text}' + except httpx.TimeoutException: + status_message = f'data error: Request timed out after {self.session.timeout.write} seconds' + except httpx.RequestError as e: + resp_text = getattr(getattr(e, 'response', None), 'text', 'No response received') + status_message = f'data error: Unable to upload. Error: {e}.\nResponse: {resp_text}' + except Exception as e: + resp_text = response.text if response is not None else 'No response received' + status_message = f'data error: It may have uploaded, go check. Error: {e}.\nResponse: {resp_text}' + return - upload_filename = upload_filename.replace('.mkv', '').replace('.mp4', '') - - with open(torrent_path, 'rb') as torrent_file: - files = {'file': (upload_filename, torrent_file, "application/x-bittorrent")} - upload_url = f"{self.api_base_url}/torrents/upload" + else: + console.print(data) + status_message = 'Debug mode enabled, not uploading' - if meta['debug'] is False: - response = self.session.post(upload_url, data=data, files=files, cookies=self.auth_cookies, timeout=90) - response.raise_for_status() - json_response = response.json() - meta['tracker_status'][self.tracker]['status_message'] = response.json() - - if response.status_code == 200 and json_response.get('id'): - torrent_id = json_response.get('id') - details_url = f"{self.base_url}/torrent/{torrent_id}/" if torrent_id else self.base_url - if torrent_id: - meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id - announce_url = self.config['TRACKERS'][self.tracker].get('announce_url') - await self.add_tracker_torrent(meta, self.tracker, self.source_flag, announce_url, details_url) - else: - raise UploadException(f"{json_response.get('message', 'Unknown API error.')}") - else: - console.print(f"[bold blue]Debug Mode: Upload to {self.tracker} was not sent.[/bold blue]") - console.print("Headers:", self.session.headers) - console.print("Payload (data):", data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - - except UploadException: - raise - except Exception as e: - raise UploadException(f"An unexpected error occurred during upload to {self.tracker}: {e}") + meta['tracker_status'][self.tracker]['status_message'] = status_message diff --git a/src/trackers/DP.py b/src/trackers/DP.py index 2b1364e2d..f031e301c 100644 --- a/src/trackers/DP.py +++ b/src/trackers/DP.py @@ -1,77 +1,68 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests -import platform -import httpx -import glob -import os -from src.trackers.COMMON import COMMON -from src.console import console +import cli_ui +import re from data.config import config -from src.languages import process_desc_language +from src.console import console +from src.get_desc import DescriptionBuilder +from src.trackers.UNIT3D import UNIT3D -class DP(): +class DP(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='DP') self.config = config self.tracker = 'DP' self.source_flag = 'DarkPeers' - self.upload_url = 'https://darkpeers.org/api/torrents/upload' - self.search_url = 'https://darkpeers.org/api/torrents/filter' - self.torrent_url = 'https://darkpeers.org/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://darkpeers.org/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [ - 'aXXo', 'BONE', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'dAV1nci', 'DNL', 'FaNGDiNG0', 'GalaxyTV', 'HD2DVD', 'HDT', 'HDTime', - 'iHYTECH', 'ION10', 'iPlanet', 'KiNGDOM', 'LAMA', 'MeGusta', 'mHD', 'mSD', 'NaNi', 'NhaNc3', 'nHD', 'nikt0', 'nSD', - 'OFT', 'PRODJi', 'RARBG', 'Rifftrax', 'SANTi', 'SasukeducK', 'SEEDSTER', 'ShAaNiG', 'Sicario', 'STUTTERSHIT', 'TAoE', - 'TGALAXY', 'TGx', 'TORRENTGALAXY', 'ToVaR', 'TSP', 'TSPxL', 'ViSION', 'VXT', 'WAF', 'WKS', 'X0r', 'YIFY', 'YTS', - ['EVO', 'web-dl Only'] + 'ARCADE', 'aXXo', 'BANDOLEROS', 'BONE', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'dAV1nci', 'DNL', + 'eranger2', 'FaNGDiNG0', 'FiSTER', 'flower', 'GalaxyTV', 'HD2DVD', 'HDT', 'HDTime', 'iHYTECH', + 'ION10', 'iPlanet', 'KiNGDOM', 'LAMA', 'MeGusta', 'mHD', 'mSD', 'NaNi', 'NhaNc3', 'nHD', + 'nikt0', 'nSD', 'OFT', 'PiTBULL', 'PRODJi', 'RARBG', 'Rifftrax', 'ROCKETRACCOON', + 'SANTi', 'SasukeducK', 'SEEDSTER', 'ShAaNiG', 'Sicario', 'STUTTERSHIT', 'TAoE', + 'TGALAXY', 'TGx', 'TORRENTGALAXY', 'ToVaR', 'TSP', 'TSPxL', 'ViSION', 'VXT', + 'WAF', 'WKS', 'X0r', 'YIFY', 'YTS', ] - pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + async def get_additional_checks(self, meta): + should_continue = True + if meta.get('keep_folder'): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]{self.tracker} does not allow single files in a folder.') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + return False + else: + return False - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id + nordic_languages = ['danish', 'swedish', 'norwegian', 'icelandic', 'finnish', 'english'] + if not await self.common.check_language_requirements( + meta, self.tracker, languages_to_check=nordic_languages, check_audio=True, check_subtitle=True + ): + return False - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id + if meta['type'] == "ENCODE" and meta.get('tag', "") in ['FGT']: + if not meta['unattended']: + console.print(f"[bold red]{self.tracker} does not allow FGT encodes, skipping upload.") + return False - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - modq = await self.get_flag(meta, 'modq') - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + if meta['type'] not in ['WEBDL'] and meta.get('tag', "") in ['EVO']: + if not meta['unattended']: + console.print(f"[bold red]{self.tracker} does not allow EVO for non-WEBDL types, skipping upload.") + return False + + return should_continue + + async def get_description(self, meta): if meta.get('logo', "") == "": from src.tmdb import get_logo TMDB_API_KEY = config['DEFAULT'].get('tmdb_api', False) @@ -83,146 +74,22 @@ async def upload(self, meta, disctype): logo_path = await get_logo(tmdb_id, category, debug, logo_languages=logo_languages, TMDB_API_KEY=TMDB_API_KEY, TMDB_BASE_URL=TMDB_BASE_URL) if logo_path: meta['logo'] = logo_path - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'mod_queue_opt_in': modq, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + return {'description': await DescriptionBuilder(self.config).unit3d_edit_desc(meta, self.tracker)} - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 - - return 1 if meta.get(flag_name, False) else 0 - - async def search_existing(self, meta, disctype): - if not meta['is_disc'] == "BDMV": - if not meta.get('audio_languages') or not meta.get('subtitle_languages'): - await process_desc_language(meta, desc=None, tracker=self.tracker) - nordic_languages = ['Danish', 'Swedish', 'Norwegian', 'Icelandic', 'Finnish', 'English'] - if not any(lang in meta.get('audio_languages', []) for lang in nordic_languages) and not any(lang in meta.get('subtitle_languages', []) for lang in nordic_languages): - if not meta['unattended']: - console.print('[bold red]DP requires at least one Nordic/English audio or subtitle track.') - meta['skipping'] = "DP" - return - - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + async def get_additional_data(self, meta): + data = { + 'mod_queue_opt_in': await self.get_flag(meta, 'modq'), } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - if meta['type'] == "ENCODE" and meta.get('tag', "") and 'fgt' in meta['tag'].lower() and len(dupes) > 0: - if not meta['unattended']: - console.print("[bold red]DP does not allow FGT encodes, skipping upload.") - meta['skipping'] = "DP" - return [] - return dupes + return data + + async def get_name(self, meta): + dp_name = meta.get('name') + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + tag_lower = meta['tag'].lower() + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + dp_name = re.sub(f"-{invalid_tag}", "", dp_name, flags=re.IGNORECASE) + dp_name = f"{dp_name}-NOGROUP" + return {'name': dp_name} diff --git a/src/trackers/EMUW.py b/src/trackers/EMUW.py new file mode 100644 index 000000000..96fcece48 --- /dev/null +++ b/src/trackers/EMUW.py @@ -0,0 +1,514 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import re +import asyncio +import cloudscraper +from src.console import console +from src.languages import process_desc_language +from src.trackers.UNIT3D import UNIT3D +from src.tmdb import get_tmdb_translations + + +class EMUW(UNIT3D): + """ + EMUW tracker handler with Spanish naming conventions + Handles torrents with Spanish titles, audio, and subtitle requirements + """ + + def __init__(self, config): + super().__init__(config, tracker_name='EMUW') + self.source_flag = 'Emuwarez' + self.base_url = 'https://emuwarez.com/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] + + async def get_name(self, meta): + """ + Generate EMUW-compliant torrent name format + Format: [Spanish Title] [Season] [Year] [Resolution] [Format] [Codec] [Audio] [SUBS] - [Group] + + Examples: + - Hora punta 1998 1080p BluRay x264 ESP DD 5.1 ING DTS 5.1 SUBS-EMUWAREZ + - Sound! Euphonium S03 2025 1080p WEB-DL AVC JAP AAC 2.0 SUBS-Fool + """ + # Get Spanish title if available and configured + title = await self._get_title(meta) + + # Get season using season_int + season = "" + if meta['category'] == 'TV' and meta.get('season_int'): + season = f"S{meta['season_int']:02d}" + + year = meta.get('year', '') + resolution = self._map_resolution(meta.get('resolution', '')) + video_format = self._map_format(meta) + video_codec = self._map_codec(meta) + + # Process language information + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + + # Build audio string + audio_str = await self._build_audio_string(meta) + + # Check for Spanish subtitles + subs_tag = " SUBS" if self._has_spanish_subs(meta) else "" + + # Get tag from meta['tag'] + tag = meta.get('tag', '').strip() + + # Remove leading dash if present + if tag.startswith('-'): + tag = tag[1:] + + # Filter out invalid tags and use default if needed + if not tag or tag.lower() in ['nogrp', 'nogroup', 'unknown', 'unk', 'hd.ma.5.1', 'untouched']: + tag = 'EMUWAREZ' + + # Build final name + name_parts = [part for part in [title, season, str(year), resolution, video_format, video_codec, audio_str] if + part] + base_name = ' '.join(name_parts) + + # Clean up spaces and build final name + base_name = re.sub(r'\s{2,}', ' ', base_name).strip() + emuwarez_name = f"{base_name}{subs_tag}-{tag}" + + return {'name': emuwarez_name} + + async def _get_title(self, meta): + """Get Spanish title if available and configured""" + spanish_title = None + + # Try to get from IMDb with priority: country match, then language match + imdb_info = meta.get('imdb_info') or {} + akas = imdb_info.get('akas', []) + + country_match = None + language_match = None + + for aka in akas: + if isinstance(aka, dict): + if aka.get("country") in ["Spain", "ES"]: + country_match = aka.get("title") + break # Country match takes priority + elif aka.get("language") in ["Spain", "Spanish", "ES"] and not language_match: + language_match = aka.get("title") + + spanish_title = country_match or language_match + + # Try TMDb if not found + if not spanish_title and meta.get('tmdb'): + spanish_title = await get_tmdb_translations( + tmdb_id=meta.get('tmdb'), + category=meta.get('category', 'MOVIE'), + target_language='es', + debug=meta.get('debug', False) + ) + + # Use Spanish title if configured + use_spanish_title = self.config['TRACKERS'][self.tracker].get('use_spanish_title', False) + if spanish_title and use_spanish_title: + return spanish_title + + return meta.get('title', '') + + def _map_resolution(self, resolution): + """Map resolution to EMUW nomenclature""" + resolution_map = { + '4320p': '4320p FUHD', + '2160p': '2160p UHD', + '1080p': '1080p', + '720p': '720p', + '576p': '576p SD', + '540p': '540p SD', + '480p': '480p SD', + } + return resolution_map.get(resolution, resolution) + + def _map_format(self, meta): + """Map source format to EMUW nomenclature""" + source = meta.get('source', '') + type_name = meta.get('type', '') + + format_map = { + 'BDMV': 'FBD', + 'DVD': 'FDVD', + 'REMUX': 'BDRemux', + } + + if meta.get('is_disc') in format_map: + return format_map[meta.get('is_disc')] + if type_name in format_map: + return format_map[type_name] + + if 'BluRay' in source or 'Blu-ray' in source: + return 'BluRay' + if 'WEB' in source: + return 'WEB-DL' if 'WEB-DL' in source else 'WEBRIP' + if 'HDTV' in source: + return 'HDTV' + if 'DVD' in source: + return 'SD' + + return '' + + def _map_codec(self, meta): + """Map video codec to EMUW nomenclature with HDR/DV prefix""" + codec_map = { + 'H.264': 'AVC', 'H.265': 'HEVC', 'HEVC': 'HEVC', 'AVC': 'AVC', + 'x264': 'x264', 'x265': 'x265', 'AV1': 'AV1', 'VP9': 'VP9', + 'VP8': 'VP8', 'VC-1': 'VC-1', 'MPEG-4': 'MPEG', + } + + hdr_prefix = '' + if meta.get('hdr'): + hdr = meta.get('hdr', '') + if 'DV' in hdr: + hdr_prefix = 'DV ' + if 'HDR' in hdr: + hdr_prefix += 'HDR ' + + video_codec = meta.get('video_codec', '') + video_encode = meta.get('video_encode', '') + codec = codec_map.get(video_codec) or codec_map.get(video_encode, video_codec) + + return f"{hdr_prefix}{codec}".strip() + + async def _get_original_language(self, meta): + """Get the original language from existing metadata""" + original_lang = None + + if meta.get('original_language'): + original_lang = meta['original_language'] + + if not original_lang: + imdb_info = meta.get('imdb_info') or {} + imdb_lang = imdb_info.get('language', '') + + if isinstance(imdb_lang, list) and imdb_lang: + imdb_lang = imdb_lang[0] + + if imdb_lang: + if isinstance(imdb_lang, dict): + original_lang = imdb_lang.get('text', '') + else: + original_lang = str(imdb_lang).strip() + + if original_lang: + return self._map_language(original_lang) + + return None + + async def _build_audio_string(self, meta): + """ + Build audio string in EMUW format with proper priority order + + Priority Order: + 1. DUAL: Exactly 2 audio tracks, same codec + 2. MULTI: 4+ audio tracks, same codec + 3. VOSE: Single audio (original lang) + Spanish subs + NO Spanish audio + 4. V.O.: Single audio (original lang) + NO Spanish subs + NO Spanish audio + 5. Normal: List all audio tracks + """ + audio_tracks = self._get_audio_tracks(meta) + if not audio_tracks: + return '' + + audio_langs = self._extract_audio_languages(audio_tracks, meta) + if not audio_langs: + return '' + + original_lang = await self._get_original_language(meta) + has_spanish_audio = 'ESP' in audio_langs or 'LAT' in audio_langs + has_spanish_subs = self._has_spanish_subs(meta) + num_audio_tracks = len(audio_tracks) + + # DUAL - Exactly 2 audios, same codec + if num_audio_tracks == 2: + codec1 = self._map_audio_codec(audio_tracks[0]) + codec2 = self._map_audio_codec(audio_tracks[1]) + + if codec1 == codec2: + channels = self._get_audio_channels(audio_tracks[0]) + return f"DUAL {codec1} {channels}" + + # MULTI - 4+ audios, same codec + if num_audio_tracks >= 4: + codecs = [self._map_audio_codec(t) for t in audio_tracks] + if all(c == codecs[0] for c in codecs): + channels = self._get_audio_channels(audio_tracks[0]) + return f"MULTI {codecs[0]} {channels}" + + # VOSE - Single audio (original) + Spanish subs + NO Spanish audio + if num_audio_tracks == 1 and original_lang and not has_spanish_audio and has_spanish_subs: + if audio_langs[0] == original_lang: + codec = self._map_audio_codec(audio_tracks[0]) + channels = self._get_audio_channels(audio_tracks[0]) + return f"VOSE {original_lang} {codec} {channels}" + + # V.O. - Single audio (original) + NO Spanish subs + NO Spanish audio + if num_audio_tracks == 1 and original_lang and not has_spanish_audio and not has_spanish_subs: + if audio_langs[0] == original_lang: + codec = self._map_audio_codec(audio_tracks[0]) + channels = self._get_audio_channels(audio_tracks[0]) + return f"V.O. {original_lang} {codec} {channels}" + + # Normal listing + audio_parts = [] + for i, track in enumerate(audio_tracks): + if i < len(audio_langs): + lang = audio_langs[i] + codec = self._map_audio_codec(track) + channels = self._get_audio_channels(track) + audio_parts.append(f"{lang} {codec} {channels}") + + return ' '.join(audio_parts) + + def _get_audio_tracks(self, meta): + """Extract audio tracks from mediainfo""" + if 'mediainfo' not in meta or 'media' not in meta['mediainfo']: + return [] + + tracks = meta['mediainfo']['media'].get('track', []) + return [t for t in tracks if t.get('@type') == 'Audio'] + + def _extract_audio_languages(self, audio_tracks, meta): + """Extract and normalize audio languages""" + audio_langs = [] + + for track in audio_tracks: + lang = track.get('Language', '') + if lang: + lang_code = self._map_language(lang) + if lang_code and lang_code not in audio_langs: + audio_langs.append(lang_code) + + if not audio_langs and meta.get('audio_languages'): + for lang in meta['audio_languages']: + lang_code = self._map_language(lang) + if lang_code and lang_code not in audio_langs: + audio_langs.append(lang_code) + + return audio_langs + + def _map_language(self, lang): + """Map language codes and names to EMUW nomenclature""" + if not lang: + return '' + + lang_map = { + 'spa': 'ESP', 'es': 'ESP', 'spanish': 'ESP', 'español': 'ESP', 'castellano': 'ESP', 'es-es': 'ESP', + 'eng': 'ING', 'en': 'ING', 'english': 'ING', 'en-us': 'ING', 'en-gb': 'ING', + 'lat': 'LAT', 'latino': 'LAT', 'latin american spanish': 'LAT', 'es-mx': 'LAT', 'es-419': 'LAT', + 'fre': 'FRA', 'fra': 'FRA', 'fr': 'FRA', 'french': 'FRA', 'français': 'FRA', + 'ger': 'ALE', 'deu': 'ALE', 'de': 'ALE', 'german': 'ALE', 'deutsch': 'ALE', + 'jpn': 'JAP', 'ja': 'JAP', 'japanese': 'JAP', '日本語': 'JAP', + 'kor': 'COR', 'ko': 'COR', 'korean': 'COR', '한국어': 'COR', + 'ita': 'ITA', 'it': 'ITA', 'italian': 'ITA', 'italiano': 'ITA', + 'por': 'POR', 'pt': 'POR', 'portuguese': 'POR', 'português': 'POR', 'pt-br': 'POR', 'pt-pt': 'POR', + 'chi': 'CHI', 'zho': 'CHI', 'zh': 'CHI', 'chinese': 'CHI', 'mandarin': 'CHI', '中文': 'CHI', 'zh-cn': 'CHI', + 'rus': 'RUS', 'ru': 'RUS', 'russian': 'RUS', 'русский': 'RUS', + 'ara': 'ARA', 'ar': 'ARA', 'arabic': 'ARA', + 'hin': 'HIN', 'hi': 'HIN', 'hindi': 'HIN', + 'tha': 'THA', 'th': 'THA', 'thai': 'THA', + 'vie': 'VIE', 'vi': 'VIE', 'vietnamese': 'VIE', + } + + lang_lower = str(lang).lower().strip() + mapped = lang_map.get(lang_lower) + + if mapped: + return mapped + + return lang.upper()[:3] if len(lang) >= 3 else lang.upper() + + def _map_audio_codec(self, audio_track): + """Map audio codec to EMUW nomenclature""" + codec = audio_track.get('Format', '').upper() + + if 'atmos' in str(audio_track.get('Format_AdditionalFeatures', '')).lower(): + return 'Atmos' + + codec_map = { + 'AAC LC': 'AAC LC', 'AAC': 'AAC', 'AC-3': 'DD', 'AC3': 'DD', + 'E-AC-3': 'DD+', 'EAC3': 'DD+', 'DTS': 'DTS', + 'DTS-HD MA': 'DTS-HD MA', 'DTS-HD HRA': 'DTS-HD HRA', + 'TRUEHD': 'TrueHD', 'MLP FBA': 'MLP', 'PCM': 'PCM', + 'FLAC': 'FLAC', 'OPUS': 'OPUS', 'MP3': 'MP3', + } + + return codec_map.get(codec, codec) + + def _get_audio_channels(self, audio_track): + """Get audio channel configuration""" + channels = audio_track.get('Channels', '') + channel_map = { + '1': 'Mono', '2': '2.0', '3': '3.0', + '4': '3.1', '5': '5.0', '6': '5.1', '8': '7.1', + } + return channel_map.get(str(channels), '5.1') + + def _has_spanish_subs(self, meta): + """Check if torrent has Spanish subtitles""" + if 'mediainfo' not in meta or 'media' not in meta['mediainfo']: + return False + + tracks = meta['mediainfo']['media'].get('track', []) + + for track in tracks: + if track.get('@type') == 'Text': + lang = track.get('Language', '') + if isinstance(lang, str): + lang = lang.lower() + else: + lang = '' + + title = track.get('Title', '') + if isinstance(title, str): + title = title.lower() + else: + title = '' + + if lang in ['es', 'spa', 'spanish', 'es-es', 'español']: + return True + if 'spanish' in title or 'español' in title or 'castellano' in title: + return True + + return False + + async def get_cat_id(self, category_name): + """Categories: Movies(1), Series(2), Documentales(4), Musica(5), Juegos(6), Software(7)""" + category_map = { + 'MOVIE': '1', + 'TV': '2', + 'FANRES': '1' + } + return category_map.get(category_name, '1') + + async def get_type_id(self, meta): + """Types: Full Disc(1), Remux(2), Encode(3), WEB-DL(4), WEBRIP(5), HDTV(6), SD(7)""" + type_map = { + 'DISC': '1', 'REMUX': '2', 'ENCODE': '3', + 'WEBDL': '4', 'WEBRIP': '5', 'HDTV': '6', 'SD': '7' + } + meta_type = meta.get('type', '') if isinstance(meta, dict) else meta + type_id = type_map.get(meta_type, '3') + return {'type_id': type_id} if isinstance(meta, dict) else type_id + + async def get_res_id(self, resolution): + """Resolutions: 4320p(1), 2160p(2), 1080p(3), 1080i(4), 720p(5), 576p(6), 540p(7), 480p(8), Otras(10)""" + resolution_map = { + '4320p': '1', '2160p': '2', '1080p': '3', '1080i': '4', + '720p': '5', '576p': '6', '540p': '7', '480p': '8', + 'SD': '10', 'OTHER': '10' + } + return resolution_map.get(resolution, '10') + + async def search_existing(self, meta, disctype): + """Search for duplicate torrents using cloudscraper for Cloudflare bypass""" + dupes = [] + + # Build search name using meta['name'] like UNIT3D + search_name = meta['name'] + + # Add season for TV shows + if meta['category'] == 'TV' and meta.get('season'): + search_name = f"{search_name} {meta['season']}" + + # Add edition if present + if meta.get('edition'): + search_name = f"{search_name} {meta['edition']}" + + params = { + 'tmdbId': meta.get('tmdb', ''), + 'categories[]': await self.get_cat_id(meta['category']), + 'name': search_name + } + + headers = { + 'Authorization': f"Bearer {self.config['TRACKERS'][self.tracker]['api_key'].strip()}", + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', + 'Referer': self.base_url, + 'Origin': self.base_url + } + + scraper = cloudscraper.create_scraper( + browser={ + 'browser': 'chrome', + 'platform': 'windows', + 'mobile': False, + 'desktop': True + }, + delay=10 + ) + + try: + # Establish session + scraper.get(self.base_url, timeout=15.0) + + # Make API request + response = scraper.get(url=self.search_url, params=params, headers=headers, timeout=15.0) + + if response.status_code == 200: + try: + data = response.json() + if 'data' in data and isinstance(data['data'], list): + for torrent in data['data']: + if 'attributes' in torrent: + attributes = torrent['attributes'] + if 'name' in attributes: + if not meta['is_disc']: + result = { + 'name': attributes['name'], + 'size': attributes.get('size'), + 'files': [file['name'] for file in attributes.get('files', []) if + isinstance(file, dict) and 'name' in file], + 'file_count': len(attributes.get('files', [])) if isinstance( + attributes.get('files'), list) else 0, + 'trumpable': attributes.get('trumpable', False), + 'link': attributes.get('details_link', None) + } + else: + result = { + 'name': attributes['name'], + 'size': attributes.get('size'), + 'trumpable': attributes.get('trumpable', False), + 'link': attributes.get('details_link', None) + } + dupes.append(result) + except Exception as json_error: + console.print(f"[red]Failed to parse JSON: {json_error}") + + elif response.status_code == 403: + console.print(f"[red]Cloudflare protection blocked API access to {self.tracker}") + elif response.status_code == 429: + console.print(f"[yellow]Rate limited by {self.tracker}, waiting 60s...") + await asyncio.sleep(60) + else: + console.print(f"[yellow]Unexpected status code: {response.status_code}") + + except Exception as e: + console.print(f"[red]Search error for {self.tracker}: {type(e).__name__}: {str(e)}") + + return dupes + + def get_upload_data(self, meta): + """Get upload data with EMUW-specific options""" + upload_data = super().get_upload_data(meta) + + if meta.get('anon', False): + upload_data['anonymous'] = 1 + if meta.get('stream', False): + upload_data['stream'] = 1 + if meta.get('resolution', '') in ['576p', '540p', '480p']: + upload_data['sd'] = 1 + if meta.get('personalrelease', False): + upload_data['personal_release'] = 1 + + return upload_data diff --git a/src/trackers/FF.py b/src/trackers/FF.py new file mode 100644 index 000000000..ecb02ff90 --- /dev/null +++ b/src/trackers/FF.py @@ -0,0 +1,638 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import aiofiles +import asyncio +import glob +import httpx +import os +import platform +import re +from bs4 import BeautifulSoup +from src.bbcode import BBCODE +from src.console import console +from src.cookie_auth import CookieValidator, CookieAuthUploader +from src.get_desc import DescriptionBuilder +from src.languages import process_desc_language + + +class FF: + def __init__(self, config): + self.config = config + self.cookie_validator = CookieValidator(config) + self.cookie_auth_uploader = CookieAuthUploader(config) + self.tracker = "FF" + self.banned_groups = [] + self.source_flag = "FunFile" + self.base_url = "https://www.funfile.org/" + self.torrent_url = f"{self.base_url}/details.php?id=" + self.requests_url = f"{self.base_url}/requests.php" + self.auth_token = None + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f"Upload Assistant/2.3 ({platform.system()} {platform.release()})" + }, timeout=30.0) + + async def validate_credentials(self, meta): + cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{self.tracker}.txt") + if not os.path.exists(cookie_file): + await self.login(meta) + + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + valid_cookies = await self.validate_cookies(meta) + if valid_cookies: + return True + else: + await self.login(meta) + return await self.validate_cookies(meta) + + async def validate_cookies(self, meta): + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/upload.php', + success_text='friends.php', + ) + + async def login(self, meta): + login_url = "https://www.funfile.org/takelogin.php" + cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{self.tracker}.txt") + + payload = { + "returnto": "/index.php", + "username": self.config['TRACKERS'][self.tracker]['username'], + "password": self.config['TRACKERS'][self.tracker]['password'], + "login": "Login" + } + + print(f"{self.tracker}: Trying to login...") + response = await self.session.post(login_url, data=payload) + + if response.status_code == 302: + print(f"{self.tracker}: Login Successful!") + + async with aiofiles.open(cookie_file, "w") as f: + await f.write("# Netscape HTTP Cookie File\n") + await f.write("# This file was generated by an automated script.\n\n") + for cookie in self.session.cookies.jar: + domain = cookie.domain + include_subdomains = "TRUE" if domain.startswith('.') else "FALSE" + path = cookie.path + secure = "TRUE" if cookie.secure else "FALSE" + expires = str(int(cookie.expires)) if cookie.expires else "0" + name = cookie.name + value = cookie.value + await f.write(f"{domain}\t{include_subdomains}\t{path}\t{secure}\t{expires}\t{name}\t{value}\n") + print(f"{self.tracker}: Saving the cookie file...") + else: + print(f"{self.tracker}: Login failed. Status code: {response.status_code}") + + async def search_existing(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + + if meta['category'] == 'MOVIE': + query = meta['title'] + if meta['category'] == 'TV': + query = f"{meta['title']} {meta.get('season', '')}{meta.get('episode', '')}" + + search_url = f"{self.base_url}/suggest.php?q={query}" + response = await self.session.get(search_url) + + if response.status_code == 200 and 'login' not in str(response.url): + items = [line.strip() for line in response.text.splitlines() if line.strip()] + return items + + return [] + + async def get_requests(self, meta): + if self.config['TRACKERS'][self.tracker].get('check_requests', False) is False: + return False + + else: + try: + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + category = self.get_type_id(meta) + + query_1 = meta['title'] + query_2 = meta['title'].replace(' ', '.') + + search_url_1 = f"{self.requests_url}?filter=open&category={category}&search={query_1}" + + if query_1 != query_2: + search_url_2 = f"{self.base_url}/requests.php?filter=open&category={category}&search={query_2}" + responses = await asyncio.gather( + self.session.get(search_url_1), + self.session.get(search_url_2) + ) + response_results_text = responses[0].text + responses[1].text + responses[0].raise_for_status() + responses[1].raise_for_status() + else: + response = await self.session.get(search_url_1) + response.raise_for_status() + response_results_text = response.text + + soup = BeautifulSoup(response_results_text, "html.parser") + request_rows = soup.select("td.mf_content table tr") + + results = [] + for row in request_rows: + name_element = row.select_one("td.row3 nobr a b") + if not name_element: + continue + + name = name_element.text.strip() + link_element = name_element.find_parent("a") + link = link_element["href"] if link_element else None + + all_tds = row.find_all("td", class_="row3") + reward = all_tds[2].text.strip() if len(all_tds) > 2 else None + + results.append({ + "Name": name, + "Link": link, + "Reward": reward + }) + + if results: + message = f"\n{self.tracker}: [bold yellow]Your upload may fulfill the following request(s), check it out:[/bold yellow]\n\n" + for r in results: + message += f"[bold green]Name:[/bold green] {r['Name']}\n" + message += f"[bold green]Reward:[/bold green] {r['Reward']}\n" + message += f"[bold green]Link:[/bold green] {r['Link']}\n\n" + console.print(message) + + return results + + except Exception as e: + print(f"An error occurred while fetching requests: {e}") + return [] + + async def generate_description(self, meta): + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo_resize_url = meta.get('tmdb_logo', '') + if logo_resize_url: + desc_parts.append(f"[center][img]https://image.tmdb.org/t/p/w300/{logo_resize_url}[/img][/center]") + + # TV + title, episode_image, episode_overview = await builder.get_tv_info(meta, self.tracker) + if episode_overview: + desc_parts.append(f'[center]{title}[/center]') + + if episode_image: + desc_parts.append(f'[center][/center]') + + desc_parts.append(f'[center]{episode_overview}[/center]') + + # File information + mediainfo = await builder.get_mediainfo_section(meta, self.tracker) + if mediainfo: + desc_parts.append(f'[pre]{mediainfo}[/pre]') + + bdinfo = await builder.get_bdinfo_section(meta) + if bdinfo: + desc_parts.append(f'[pre]{bdinfo}[/pre]') + + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Disc menus screenshots header + desc_parts.append(await builder.menu_screenshot_header(meta, self.tracker)) + + # Disc menus screenshots + menu_images = meta.get("menu_images", []) + if menu_images: + menu_screenshots_block = "" + for image in menu_images: + menu_img_url = image.get("img_url") + menu_web_url = image.get("web_url") + if menu_img_url and menu_web_url: + menu_screenshots_block += f' ' + if menu_screenshots_block: + desc_parts.append(f"[center]{menu_screenshots_block}[/center]") + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Screenshot Header + images = meta.get("image_list", []) + if images: + desc_parts.append(await builder.screenshot_header(self.tracker)) + + # Screenshots + screenshots_block = "" + for image in images: + img_url = image.get("img_url") + web_url = image.get("web_url") + if img_url and web_url: + screenshots_block += ( + f' ' + ) + if screenshots_block: + desc_parts.append(f"[center]{screenshots_block}[/center]") + + # Signature + desc_parts.append(f"[url=https://github.com/Audionut/Upload-Assistant][center][size=1]{meta['ua_signature']}[/size][/center][/url]") + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + bbcode = BBCODE() + description = description.replace("[user]", "").replace("[/user]", "") + description = description.replace("[align=left]", "").replace("[/align]", "") + description = description.replace("[right]", "").replace("[/right]", "") + description = description.replace("[align=right]", "").replace("[/align]", "") + description = bbcode.remove_sub(description) + description = bbcode.remove_sup(description) + description = description.replace("[alert]", "").replace("[/alert]", "") + description = description.replace("[note]", "").replace("[/note]", "") + description = description.replace("[hr]", "").replace("[/hr]", "") + description = description.replace("[h1]", "[u][b]").replace("[/h1]", "[/b][/u]") + description = description.replace("[h2]", "[u][b]").replace("[/h2]", "[/b][/u]") + description = description.replace("[h3]", "[u][b]").replace("[/h3]", "[/b][/u]") + description = description.replace("[ul]", "").replace("[/ul]", "") + description = description.replace("[ol]", "").replace("[/ol]", "") + description = description.replace("[hide]", "").replace("[/hide]", "") + description = description.replace("•", "-").replace("“", '"').replace("”", '"') + description = bbcode.convert_comparison_to_centered(description, 1000) + description = bbcode.remove_spoiler(description) + + # [url][img=000]...[/img][/url] + description = re.sub( + r"\[url=(?P[^\]]+)\]\[img=(?P\d+)\](?P[^\[]+)\[/img\]\[/url\]", + r'', + description, + flags=re.IGNORECASE + ) + + # [url][img]...[/img][/url] + description = re.sub( + r"\[url=(?P[^\]]+)\]\[img\](?P[^\[]+)\[/img\]\[/url\]", + r'', + description, + flags=re.IGNORECASE + ) + + # [img=200]...[/img] (no [url]) + description = re.sub( + r"\[img=(?P\d+)\](?P[^\[]+)\[/img\]", + r'', + description, + flags=re.IGNORECASE + ) + + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8' + ) as description_file: + await description_file.write(description) + + return description + + def get_type_id(self, meta): + if meta.get('anime'): + return '44' + category = meta['category'] + + if category == 'MOVIE': + return '19' + + elif category == 'TV': + return '7' + + def file_information(self, meta): + vc = meta.get('video_codec', '') + if vc: + self.video_codec = vc.strip().lower() + + ve = meta.get('video_encode', '') + if ve: + self.video_encode = ve.strip().lower() + + vs = meta.get('source', '') + if vs: + self.video_source = vs.strip().lower() + + vt = meta.get('type', '') + if vt: + self.video_type = vt.strip().lower() + + def movie_type(self, meta): + # Possible values: "XviD", "DVDR", "x264", "x265", "MP4", "VCD" + if self.video_source == 'dvd': + return "DVDR" + + if self.video_codec == 'hevc': + return "x265" + else: + return "x264" + + def tv_type(self, meta): + # Possible values: "XviD", "HR-XviD", "x264-SD", "x264-HD", "x265-SD", "x265-HD", "Web-SD", "Web-HD", "DVDR", "MP4" + if self.video_source == 'dvd': + return "DVDR" + + if self.video_source == 'web': + if meta.get('sd'): + return "Web-SD" + else: + return "Web-HD" + + if self.video_codec == 'hevc': + if meta.get('sd'): + return "x265-SD" + else: + return "x265-HD" + else: + if meta.get('sd'): + return "x264-SD" + else: + return "x264-HD" + + def anime_type(self, meta): + # Possible values: "TVSeries", "TVSpecial", "Movie", "OVA", "ONA", "DVDSpecial" + if meta.get('tvmaze_episode_data', {}).get('season_number') == 0: + return "TVSpecial" + + if self.video_source == 'dvd': + return "DVDSpecial" + + category = meta['category'] + + if category == 'TV': + return "TVSeries" + + if category == 'MOVIE': + return "Movie" + + def movie_source(self, meta): + # Possible values: "DVD", "DVDSCR", "Workprint", "TeleCine", "TeleSync", "CAM", "BluRay", "HD-DVD", "HDTV", "R5", "WebRIP" + mapping = { + "dvd": "DVD", + "dvdscr": "DVDSCR", + "workprint": "Workprint", + "telecine": "TeleCine", + "telesync": "TeleSync", + "cam": "CAM", + "bluray": "BluRay", + "blu-ray": "BluRay", + "hd-dvd": "HD-DVD", + "hdtv": "HDTV", + "r5": "R5", + "web": "WebRIP", + "webrip": "WebRIP" + } + + src = (self.video_source or "").strip().lower() + return mapping.get(src, None) + + def tv_source(self, meta): + # Possible values: "HDTV", "DSR", "PDTV", "TV", "DVD", "DvdScr", "BluRay", "WebRIP" + mapping = { + "hdtv": "HDTV", + "dsr": "DSR", + "pdtv": "PDTV", + "tv": "TV", + "dvd": "DVD", + "dvdscr": "DvdScr", + "bluray": "BluRay", + "blu-ray": "BluRay", + "web": "WebRIP", + "webrip": "WebRIP" + } + + src = (self.video_source or "").strip().lower() + return mapping.get(src, None) + + def anime_source(self, meta): + # Possible values: "DVD", "BluRay", "Anime Series", "HDTV" + mapping = { + "hdtv": "HDTV", + "tv": "HDTV", + "dvd": "DVD", + "bluray": "BluRay", + "blu-ray": "BluRay", + "web": "Anime Series", + "webrip": "Anime Series" + } + + src = (self.video_source or "").strip().lower() + return mapping.get(src, None) + + def anime_v_dar(self, meta): + # Possible values: "16_9", "4_3" + if meta.get('is_disc') != "BDMV": + tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) + for track in tracks: + if track.get('@type') == "Video": + dar_str = track.get('DisplayAspectRatio') + if dar_str: + try: + dar = float(dar_str) + return "16_9" if dar > 1.34 else "4_3" + except (ValueError, TypeError): + return "16_9" + + return "16_9" + else: + return "16_9" + + def anime_v_codec(self, meta): + # Possible values: "x264", "h264", "XviD", "DivX", "WMV", "VC1" + if self.video_codec == 'vc-1': + return "VC1" + + if self.video_encode == 'h.264': + return "h264" + else: + return 'x264' + + async def edit_name(self, meta): + if meta.get("scene", False): + if meta.get("scene_name", ""): + ff_name = meta.get("scene_name") + else: + ff_name = meta["uuid"] + base, ext = os.path.splitext(ff_name) + if ext.lower() in {".mkv", ".mp4", ".avi", ".ts"}: + ff_name = base.replace(" ", ".") + else: + ff_name = meta.get("clean_name").replace(" ", ".") + + return ff_name + + async def languages(self, meta): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + + lang_map = { + 'english': 'en', + 'japanese': 'jp', + 'korean': 'kr', + 'thai': 'th', + 'chinese': 'zh', + } + + anime_a_codec = [] + anime_a_ch = [] + anime_a_lang = [] + + anime_s_format = [] + anime_s_type = [] + anime_s_lang = [] + + audio_languages = meta.get('audio_languages', []) + if audio_languages: + audio_desc = meta.get('audio', '').lower() + found_codec = '0' + codec_options = { + 'aac': 'aac', 'ac3': 'ac3', 'dd': 'ac3', 'dolby digital': 'ac3', 'ogg': 'ogg', 'mp3': 'mp3', + 'dts-es': 'dtses', 'dtses': 'dtses', 'dts': 'dts', 'flac': 'flac', 'pcm': 'pcm', 'wma': 'wma' + } + for key, value in codec_options.items(): + if key in audio_desc: + found_codec = value + break + + channels_desc = meta.get('channels', '') + channel_map = { + '2.0': '2', + '5.1': '5_1', + '7.1': '7_1' + } + found_channel = channel_map.get(channels_desc, '0') + + for lang_str in audio_languages: + lang_code = lang_map.get(lang_str.lower(), '1') + + anime_a_codec.append(found_codec) + anime_a_ch.append(found_channel) + anime_a_lang.append(lang_code) + + subtitle_languages = meta.get('subtitle_languages', []) + if subtitle_languages: + subtitle_format = 'srt' + subtitle_type = 'sub' + + for lang_str in subtitle_languages: + lang_code = lang_map.get(lang_str.lower(), '1') + + anime_s_format.append(subtitle_format) + anime_s_type.append(subtitle_type) + anime_s_lang.append(lang_code) + + return { + 'anime_a_codec': anime_a_codec, + 'anime_a_ch': anime_a_ch, + 'anime_a_lang': anime_a_lang, + 'anime_s_format': anime_s_format, + 'anime_s_type': anime_s_type, + 'anime_s_lang': anime_s_lang, + } + + async def get_poster(self, meta): + poster_url = meta.get('poster') + + poster_file = None + if poster_url: + async with httpx.AsyncClient() as client: + response = await client.get(poster_url) + if response.status_code == 200: + poster_ext = os.path.splitext(poster_url)[1] or ".jpg" + poster_filename = f"{meta.get('name')}{poster_ext}" + poster_file = (poster_filename, response.content, "image/jpeg") + + return poster_file + + def get_nfo(self, meta): + nfo_dir = os.path.join(meta['base_dir'], "tmp", meta['uuid']) + nfo_files = glob.glob(os.path.join(nfo_dir, "*.nfo")) + + if nfo_files: + nfo_path = nfo_files[0] + + return { + 'nfo': ( + os.path.basename(nfo_path), + open(nfo_path, "rb"), + "application/octet-stream" + ) + } + return {} + + async def get_data(self, meta): + languages = await self.languages(meta) + self.file_information(meta) + + data = { + 'MAX_FILE_SIZE': 10000000, + 'type': self.get_type_id(meta), + 'tags': '', + 'descr': await self.generate_description(meta), + } + + if meta.get('anime'): + data.update({ + 'anime_type': self.anime_type(meta), + 'anime_source': self.anime_source(meta), + 'anime_container': 'mkv', + 'anime_v_res': meta.get('resolution'), + 'anime_v_dar': self.anime_v_dar(meta), + 'anime_v_codec': self.anime_v_codec(meta), + 'anime_a_codec[]': ['0'] + languages.get('anime_a_codec'), + 'anime_a_ch[]': ['0'] + languages.get('anime_a_ch'), + 'anime_a_lang[]': ['0'] + languages.get('anime_a_lang'), + 'anime_s_format[]': ['0'] + languages.get('anime_s_format'), + 'anime_s_type[]': ['0'] + languages.get('anime_s_type'), + 'anime_s_lang[]': ['0'] + languages.get('anime_s_lang'), + }) + + else: + if meta['category'] == 'MOVIE': + data.update({ + 'movie_type': self.movie_type(meta), + 'movie_source': self.movie_source(meta), + 'movie_imdb': str(meta.get('imdb_info', {}).get('imdb_url', '')), + 'pack': 0, + }) + + if meta['category'] == 'TV': + data.update({ + 'tv_type': self.tv_type(meta), + 'tv_source': self.tv_source(meta), + 'tv_imdb': str(meta.get('imdb_info', {}).get('imdb_url', '')), + 'pack': 1 if meta.get('tv_pack', 0) else 0, + }) + + return data + + async def upload(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + data = await self.get_data(meta) + torrent_name = await self.edit_name(meta) + files = {} + files['poster'] = await self.get_poster(meta) + nfo = self.get_nfo(meta) + if nfo: + files['nfo'] = nfo['nfo'] + + await self.cookie_auth_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + source_flag=self.source_flag, + torrent_url=self.torrent_url, + data=data, + torrent_field_name='file', + torrent_name=torrent_name, + upload_cookies=self.session.cookies, + upload_url=f"{self.base_url}/takeupload.php", + id_pattern=r'details\.php\?id=(\d+)', + success_status_code=302, + additional_files=files + ) + + return diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 9c2fd0cf5..32e9be9da 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import requests import asyncio import re diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 1c59db42e..3c889c8c4 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -1,55 +1,28 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import os -import glob -import httpx - from src.trackers.COMMON import COMMON -from src.console import console - +from src.trackers.UNIT3D import UNIT3D -class FNP(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class FNP(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='FNP') self.config = config + self.common = COMMON(config) self.tracker = 'FNP' self.source_flag = 'FnP' - self.upload_url = 'https://fearnopeer.com/api/torrents/upload' - self.search_url = 'https://fearnopeer.com/api/torrents/filter' - self.torrent_url = 'https://fearnopeer.com/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.base_url = 'https://fearnopeer.com/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [ + "4K4U", "BiTOR", "d3g", "FGT", "FRDS", "FTUApps", "GalaxyRG", "LAMA", + "MeGusta", "NeoNoir", "PSA", "RARBG", "YAWNiX", "YTS", "YIFY", "x0r" + ] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta): resolution_id = { '4320p': '1', '2160p': '2', @@ -60,131 +33,12 @@ async def get_res_id(self, resolution): '576i': '15', '480p': '8', '480i': '14' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 + }.get(meta['resolution'], '10') + return {'resolution_id': resolution_id} - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + async def get_additional_data(self, meta): data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://fearnopeer.com/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + 'modq': await self.get_flag(meta, 'modq'), } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - return dupes + return data diff --git a/src/trackers/FRIKI.py b/src/trackers/FRIKI.py index c82e0f29b..c90904b05 100644 --- a/src/trackers/FRIKI.py +++ b/src/trackers/FRIKI.py @@ -1,180 +1,21 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests -import platform -import httpx from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class FRIKI(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class FRIKI(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='FRIKI') self.config = config + self.common = COMMON(config) self.tracker = 'FRIKI' self.source_flag = 'frikibar.com' - self.upload_url = 'https://frikibar.com/api/torrents/upload' - self.search_url = 'https://frikibar.com/api/torrents/filter' - self.torrent_url = 'https://frikibar.com/torrents' - self.signature = None + self.base_url = 'https://frikibar.com/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [""] pass - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/GPW.py b/src/trackers/GPW.py new file mode 100644 index 000000000..712d03113 --- /dev/null +++ b/src/trackers/GPW.py @@ -0,0 +1,831 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import aiofiles +import httpx +import json +import os +import re +import unicodedata +from bs4 import BeautifulSoup +from src.bbcode import BBCODE +from src.console import console +from src.get_desc import DescriptionBuilder +from src.languages import process_desc_language +from src.rehostimages import check_hosts +from src.tmdb import get_tmdb_localized_data +from src.trackers.COMMON import COMMON +from typing import Dict + + +class GPW(): + def __init__(self, config): + self.config = config + self.common = COMMON(config) + self.tracker = 'GPW' + self.source_flag = 'GreatPosterWall' + self.base_url = 'https://greatposterwall.com/' + self.torrent_url = f'{self.base_url}/torrents.php?torrentid=' + self.announce = self.config['TRACKERS'][self.tracker]['announce_url'] + self.api_key = self.config['TRACKERS'][self.tracker]['api_key'] + self.auth_token = None + self.banned_groups = [ + 'ALT', 'aXXo', 'BATWEB', 'BlackTV', 'BitsTV', 'BMDRu', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'CTRLHD', + 'DDHDTV', 'DNL', 'DreamHD', 'ENTHD', 'FaNGDiNG0', 'FGT', 'FRDS', 'HD2DVD', 'HDTime', + 'HDT', 'Huawei', 'GPTHD', 'ION10', 'iPlanet', 'KiNGDOM', 'Leffe', 'Mp4Ba', 'mHD', 'MiniHD', 'mSD', 'MOMOWEB', + 'nHD', 'nikt0', 'NSBC', 'nSD', 'NhaNc3', 'NukeHD', 'OFT', 'PRODJi', 'RARBG', 'RDN', 'SANTi', 'SeeHD', 'SeeWEB', + 'SM737', 'SonyHD', 'STUTTERSHIT', 'TAGWEB', 'ViSION', 'VXT', 'WAF', 'x0r', 'Xiaomi', 'YIFY', + ['EVO', 'web-dl Only'] + ] + self.approved_image_hosts = ['kshare', 'pixhost', 'ptpimg', 'pterclub', 'ilikeshots', 'imgbox'] + self.url_host_mapping = { + 'kshare.club': 'kshare', + 'pixhost.to': 'pixhost', + 'imgbox.com': 'imgbox', + 'ptpimg.me': 'ptpimg', + 'img.pterclub.com': 'pterclub', + 'yes.ilikeshots.club': 'ilikeshots', + } + + async def load_cookies(self, meta): + cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/{self.tracker}.txt") + if not os.path.exists(cookie_file): + return False + + return await self.common.parseCookieFile(cookie_file) + + async def load_localized_data(self, meta): + localized_data_file = f'{meta["base_dir"]}/tmp/{meta["uuid"]}/tmdb_localized_data.json' + main_ch_data = {} + data = {} + + if os.path.isfile(localized_data_file): + try: + async with aiofiles.open(localized_data_file, 'r', encoding='utf-8') as f: + content = await f.read() + data = json.loads(content) + except json.JSONDecodeError: + print(f'Warning: Could not decode JSON from {localized_data_file}') + data = {} + except Exception as e: + print(f'Error reading file {localized_data_file}: {e}') + data = {} + + main_ch_data = data.get('zh-cn', {}).get('main') + + if not main_ch_data: + main_ch_data = await get_tmdb_localized_data( + meta, + data_type='main', + language='zh-cn', + append_to_response='credits' + ) + + self.tmdb_data = main_ch_data + + return + + async def get_container(self, meta): + container = meta.get('container', '') + if container == 'm2ts': + return container + elif container == 'vob': + return 'VOB IFO' + elif container in ['avi', 'mpg', 'mp4', 'mkv']: + return container.upper() + + return 'Other' + + async def get_subtitle(self, meta): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + + found_language_strings = meta.get('subtitle_languages', []) + + if found_language_strings: + return [lang.lower() for lang in found_language_strings] + else: + return [] + + async def get_ch_dubs(self, meta): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + + found_language_strings = meta.get('audio_languages', []) + + chinese_languages = {'mandarin', 'chinese', 'zh', 'zh-cn', 'zh-hans', 'zh-hant', 'putonghua', '国语', '普通话'} + for lang in found_language_strings: + if lang.strip().lower() in chinese_languages: + return True + return False + + async def get_codec(self, meta): + video_encode = meta.get('video_encode', '').strip().lower() + codec_final = meta.get('video_codec', '').strip().lower() + + codec_map = { + 'divx': 'DivX', + 'xvid': 'XviD', + 'x264': 'x264', + 'h.264': 'H.264', + 'x265': 'x265', + 'h.265': 'H.265', + 'hevc': 'H.265', + } + + for key, value in codec_map.items(): + if key in video_encode or key in codec_final: + return value + + return 'Other' + + async def get_audio_codec(self, meta): + priority_order = [ + 'DTS-X', 'E-AC-3 JOC', 'TrueHD', 'DTS-HD', 'PCM', 'FLAC', 'DTS-ES', + 'DTS', 'E-AC-3', 'AC3', 'AAC', 'Opus', 'Vorbis', 'MP3', 'MP2' + ] + + codec_map = { + 'DTS-X': ['DTS:X'], + 'E-AC-3 JOC': ['DD+ 5.1 Atmos', 'DD+ 7.1 Atmos'], + 'TrueHD': ['TrueHD'], + 'DTS-HD': ['DTS-HD'], + 'PCM': ['LPCM'], + 'FLAC': ['FLAC'], + 'DTS-ES': ['DTS-ES'], + 'DTS': ['DTS'], + 'E-AC-3': ['DD+'], + 'AC3': ['DD'], + 'AAC': ['AAC'], + 'Opus': ['Opus'], + 'Vorbis': ['VORBIS'], + 'MP2': ['MP2'], + 'MP3': ['MP3'] + } + + audio_description = meta.get('audio') + + if not audio_description or not isinstance(audio_description, str): + return 'Outro' + + for codec_name in priority_order: + search_terms = codec_map.get(codec_name, []) + + for term in search_terms: + if term in audio_description: + return codec_name + + return 'Outro' + + async def get_title(self, meta): + title = self.tmdb_data.get('name') or self.tmdb_data.get('title') or '' + + return title if title and title != meta.get('title') else '' + + async def check_image_hosts(self, meta): + # Rule: 2.2.1. Screenshots: They have to be saved at kshare.club, pixhost.to, ptpimg.me, img.pterclub.com, yes.ilikeshots.club, imgbox.com, s3.pterclub.com + await check_hosts(meta, self.tracker, url_host_mapping=self.url_host_mapping, img_host_index=1, approved_image_hosts=self.approved_image_hosts) + return + + async def get_release_desc(self, meta): + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo, logo_size = await builder.get_logo_section(meta, self.tracker) + if logo and logo_size: + desc_parts.append(f'[center][img={logo_size}]{logo}[/img][/center]') + + # NFO + if meta.get('description_nfo_content', ''): + desc_parts.append(f"[pre]{meta.get('description_nfo_content')}[/pre]") + + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Disc menus screenshots header + desc_parts.append(await builder.menu_screenshot_header(meta, self.tracker)) + + # Disc menus screenshots + if f'{self.tracker}_menu_images_key' in meta: + menu_images = meta.get(f'{self.tracker}_menu_images_key', []) + else: + menu_images = meta.get('menu_images', []) + if menu_images: + menu_screenshots_block = '' + for image in menu_images: + menu_screenshots_block += f"[img]{image['raw_url']}[/img]\n" + desc_parts.append('[center]\n' + menu_screenshots_block + '[/center]') + + # Screenshot Header + desc_parts.append(await builder.screenshot_header(self.tracker)) + + # Screenshots + if f'{self.tracker}_images_key' in meta: + images = meta[f'{self.tracker}_images_key'] + else: + images = meta['image_list'] + if images: + screenshots_block = '' + for image in images: + screenshots_block += f"[img]{image['raw_url']}[/img]\n" + desc_parts.append('[center]\n' + screenshots_block + '[/center]') + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Signature + desc_parts.append(f"[align=right][url=https://github.com/Audionut/Upload-Assistant][size=1]{meta['ua_signature']}[/size][/url][/align]") + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + bbcode = BBCODE() + description = bbcode.remove_sup(description) + description = bbcode.remove_sub(description) + description = bbcode.convert_to_align(description) + description = bbcode.remove_list(description) + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description + + async def get_trailer(self, meta): + video_results = self.tmdb_data.get('videos', {}).get('results', []) + + youtube = '' + + if video_results: + youtube = video_results[-1].get('key', '') + + if not youtube: + meta_trailer = meta.get('youtube', '') + if meta_trailer: + youtube = meta_trailer.replace('https://www.youtube.com/watch?v=', '').replace('/', '') + + return youtube + + async def get_tags(self, meta): + tags = '' + + genres = meta.get('genres', '') + if genres and isinstance(genres, str): + genre_names = [g.strip() for g in genres.split(',') if g.strip()] + if genre_names: + tags = ', '.join( + unicodedata.normalize('NFKD', name) + .encode('ASCII', 'ignore') + .decode('utf-8') + .replace(' ', '.') + .lower() + for name in genre_names + ) + + if not tags: + tags = await self.common.async_input(prompt=f'Enter the genres (in {self.tracker} format): ') + + return tags + + async def search_existing(self, meta, disctype): + if meta['category'] != 'MOVIE': + console.print(f'{self.tracker}: Only feature films, short films, and live performances are permitted on {self.tracker}') + meta['skipping'] = f'{self.tracker}' + return + + group_id = await self.get_groupid(meta) + if not group_id: + return [] + + imdb = meta.get("imdb_info", {}).get("imdbID") + + cookies = await self.load_cookies(meta) + if not cookies: + search_url = f'{self.base_url}/api.php?api_key={self.api_key}&action=torrent&imdbID={imdb}' + try: + async with httpx.AsyncClient(timeout=30) as client: + response = await client.get(search_url) + response.raise_for_status() + data = response.json() + + if data.get('status') == 200 and 'response' in data: + results = [] + for item in data['response']: + name = item.get('Name', '') + year = item.get('Year', '') + resolution = item.get('Resolution', '') + source = item.get('Source', '') + processing = item.get('Processing', '') + remaster = item.get('RemasterTitle', '') + codec = item.get('Codec', '') + + formatted = f'{name} {year} {resolution} {source} {processing} {remaster} {codec}'.strip() + formatted = re.sub(r'\s{2,}', ' ', formatted) + results.append(formatted) + return results + else: + return [] + except Exception as e: + print(f'An unexpected error occurred while processing the search: {e}') + return [] + + else: + search_url = f'{self.base_url}/torrents.php?groupname={imdb.upper()}' # using TT in imdb returns the search page instead of redirecting to the group page + found_items = [] + + try: + async with httpx.AsyncClient(cookies=cookies, timeout=30, headers={'User-Agent': 'Upload Assistant/2.3'}) as client: + response = await client.get(search_url) + response.raise_for_status() + soup = BeautifulSoup(response.text, 'html.parser') + + torrent_table = soup.find('table', id='torrent_table') + if not torrent_table: + return [] + + for torrent_row in torrent_table.find_all('tr', class_='TableTorrent-rowTitle'): + title_link = torrent_row.find('a', href=re.compile(r'torrentid=\d+')) + if not title_link or not title_link.get('data-tooltip'): + continue + + name = title_link['data-tooltip'] + + size_cell = torrent_row.find('td', class_='TableTorrent-cellStatSize') + size = size_cell.get_text(strip=True) if size_cell else None + + match = re.search(r'torrentid=(\d+)', title_link['href']) + torrent_link = f'{self.torrent_url}{match.group(1)}' if match else None + + dupe_entry = { + 'name': name, + 'size': size, + 'link': torrent_link + } + + found_items.append(dupe_entry) + + if found_items: + await self.get_slots(meta, client, group_id) + + return found_items + + except httpx.HTTPError as e: + print(f'An HTTP error occurred: {e}') + return [] + except Exception as e: + print(f'An unexpected error occurred while processing the search: {e}') + return [] + + async def get_slots(self, meta, client, group_id): + url = f'{self.base_url}/torrents.php?id={group_id}' + + try: + response = await client.get(url) + response.raise_for_status() + except httpx.HTTPStatusError as e: + print(f'Error on request: {e.response.status_code} - {e.response.reason_phrase}') + return + + soup = BeautifulSoup(response.text, 'html.parser') + + empty_slot_rows = soup.find_all('tr', class_='TableTorrent-rowEmptySlotNote') + + for row in empty_slot_rows: + edition_id = row.get('edition-id') + resolution = '' + + if edition_id == '1': + resolution = 'SD' + elif edition_id == '3': + resolution = '2160p' + + if not resolution: + slot_type_tag = row.find('td', class_='TableTorrent-cellEmptySlotNote').find('i') + if slot_type_tag: + resolution = slot_type_tag.get_text(strip=True).replace('empty slots:', '').strip() + + slot_names = [] + + i_tags = row.find_all('i') + for tag in i_tags: + text = tag.get_text(strip=True) + if 'empty slots:' not in text: + slot_names.append(text) + + span_tags = row.find_all('span', class_='tooltipstered') + for tag in span_tags: + slot_names.append(tag.find('i').get_text(strip=True)) + + final_slots_list = sorted(list(set(slot_names))) + formatted_slots = [f'- {slot}' for slot in final_slots_list] + final_slots = '\n'.join(formatted_slots) + + if final_slots: + final_slots = final_slots.replace('Slot', '').replace('Empty slots:', '').strip() + if resolution == meta.get('resolution'): + console.print(f'\n[green]Available Slots for[/green] {resolution}:') + console.print(f'{final_slots}\n') + + async def get_media_info(self, meta): + info_file_path = '' + if meta.get('is_disc') == 'BDMV': + info_file_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/BD_SUMMARY_00.txt" + else: + info_file_path = f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MEDIAINFO_CLEANPATH.txt" + + if os.path.exists(info_file_path): + try: + with open(info_file_path, 'r', encoding='utf-8') as f: + return f.read() + except Exception as e: + console.print(f'[bold red]Error reading info file at {info_file_path}: {e}[/bold red]') + return '' + else: + console.print(f'[bold red]Info file not found: {info_file_path}[/bold red]') + return '' + + async def get_edition(self, meta): + edition_str = meta.get('edition', '').lower() + if not edition_str: + return '' + + edition_map = { + "director's cut": "Director's Cut", + 'theatrical': 'Theatrical Cut', + 'extended': 'Extended', + 'uncut': 'Uncut', + 'unrated': 'Unrated', + 'imax': 'IMAX', + 'noir': 'Noir', + 'remastered': 'Remastered', + } + + for keyword, label in edition_map.items(): + if keyword in edition_str: + return label + + return '' + + async def get_processing_other(self, meta): + if meta.get('type') == 'DISC': + is_disc_type = meta.get('is_disc') + + if is_disc_type == 'BDMV': + disctype = meta.get('disctype') + if disctype in ['BD100', 'BD66', 'BD50', 'BD25']: + return disctype + + try: + size_in_gb = meta['bdinfo']['size'] + except (KeyError, IndexError, TypeError): + size_in_gb = 0 + + if size_in_gb > 66: + return 'BD100' + elif size_in_gb > 50: + return 'BD66' + elif size_in_gb > 25: + return 'BD50' + else: + return 'BD25' + + elif is_disc_type == 'DVD': + dvd_size = meta.get('dvd_size') + if dvd_size in ['DVD9', 'DVD5']: + return dvd_size + return 'DVD9' + + async def get_screens(self, meta): + screenshot_urls = [ + image.get('raw_url') + for image in meta.get('image_list', []) + if image.get('raw_url') + ] + + return screenshot_urls + + async def get_credits(self, meta): + director = (meta.get('imdb_info', {}).get('directors') or []) + (meta.get('tmdb_directors') or []) + if director: + unique_names = list(dict.fromkeys(director))[:5] + return ', '.join(unique_names) + else: + return 'N/A' + + async def get_remaster_title(self, meta): + found_tags = [] + + def add_tag(tag_id): + if tag_id and tag_id not in found_tags: + found_tags.append(tag_id) + + # Collections + distributor = meta.get('distributor', '').upper() + if distributor in ('WARNER ARCHIVE', 'WARNER ARCHIVE COLLECTION', 'WAC'): + add_tag('warner_archive_collection') + elif distributor in ('CRITERION', 'CRITERION COLLECTION', 'CC'): + add_tag('the_criterion_collection') + elif distributor in ('MASTERS OF CINEMA', 'MOC'): + add_tag('masters_of_cinema') + + # Editions + edition = meta.get('edition', '').lower() + if "director's cut" in edition: + add_tag('director_s_cut') + elif 'extended' in edition: + add_tag('extended_edition') + elif 'theatrical' in edition: + add_tag('theatrical_cut') + elif 'rifftrax' in edition: + add_tag('rifftrax') + elif 'uncut' in edition: + add_tag('uncut') + elif 'unrated' in edition: + add_tag('unrated') + + # Audio + if meta.get('dual_audio', False): + add_tag('dual_audio') + + if meta.get('extras'): + add_tag('extras') + + # Commentary + has_commentary = meta.get('has_commentary', False) or meta.get('manual_commentary', False) + + # Ensure 'with_commentary' is last if it exists + if has_commentary: + add_tag('with_commentary') + if 'with_commentary' in found_tags: + found_tags.remove('with_commentary') + found_tags.append('with_commentary') + + if not found_tags: + return '', '' + + remaster_title_show = ' / '.join(found_tags) + + return remaster_title_show + + async def get_groupid(self, meta): + search_url = f"{self.base_url}/api.php?api_key={self.api_key}&action=torrent&req=group&imdbID={meta.get('imdb_info', {}).get('imdbID')}" + + try: + async with httpx.AsyncClient(timeout=30) as client: + response = await client.get(search_url) + response.raise_for_status() + + except httpx.RequestError as e: + console.print(f'[bold red]Network error fetching groupid: {e}[/bold red]') + return None + except httpx.HTTPStatusError as e: + console.print(f'[bold red]HTTP error when fetching groupid: Status {e.response.status_code}[/bold red]') + return None + + try: + data = response.json() + except Exception as e: + console.print(f'[bold red]Error decoding JSON from groupid response: {e}[/bold red]') + return None + + if data.get('status') == 200 and 'response' in data and 'ID' in data['response']: + return str(data['response']['ID']) + return None + + async def get_additional_data(self, meta): + poster_url = '' + while True: + poster_url = await self.common.async_input(prompt=f"{self.tracker}: Enter the poster image URL (must be from one of {', '.join(self.approved_image_hosts)}): \n") + if any(host in poster_url for host in self.approved_image_hosts): + break + else: + console.print('[red]Invalid host. Please use a URL from the allowed hosts.[/red]') + + data = { + 'desc': self.tmdb_data.get('overview', ''), + 'image': poster_url, + 'imdb': meta.get('imdb_info', {}).get('imdbID'), + 'maindesc': meta.get('overview', ''), + 'name': meta.get('title'), + 'releasetype': self._get_movie_type(meta), + 'subname': await self.get_title(meta), + 'tags': await self.get_tags(meta), + 'year': meta.get('year'), + } + data.update(await self._get_artist_data(meta)) + + return data + + async def _get_artist_data(self, meta) -> Dict[str, str]: + directors = meta.get('imdb_info', {}).get('directors', []) + directors_id = meta.get('imdb_info', {}).get('directors_id', []) + + if directors and directors_id: + imdb_id = directors_id[0] + english_name = directors[0] + chinese_name = '' + else: + console.print(f'{self.tracker}: This movie is not registered in the {self.tracker} database, please enter the details of 1 director') + imdb_id = await self.common.async_input(prompt='Enter Director IMDb ID (e.g., nm0000138): ') + english_name = await self.common.async_input(prompt='Enter Director English name: ') + chinese_name = await self.common.async_input(prompt='Enter Director Chinese name (optional, press Enter to skip): ') + + post_data = { + 'artist_ids[]': imdb_id, + 'artists[]': english_name, + 'artists_sub[]': chinese_name, + 'importance[]': '1' + } + + return post_data + + def _get_movie_type(self, meta): + movie_type = '' + imdb_info = meta.get('imdb_info', {}) + if imdb_info: + imdbType = imdb_info.get('type', 'movie').lower() + if imdbType in ("movie", "tv movie", 'tvmovie', 'video'): + if int(imdb_info.get('runtime', '60')) >= 45 or int(imdb_info.get('runtime', '60')) == 0: + movie_type = '1' # Feature Film + else: + movie_type = '2' # Short Film + + return movie_type + + async def get_source(self, meta): + source_type = meta.get('type', '').lower() + + if source_type == 'disc': + is_disc = meta.get('is_disc', '').upper() + if is_disc == 'BDMV': + return 'Blu-ray' + elif is_disc in ('HDDVD', 'DVD'): + return 'DVD' + else: + return 'Other' + + keyword_map = { + 'webdl': 'WEB', + 'webrip': 'WEB', + 'web': 'WEB', + 'remux': 'Blu-ray', + 'encode': 'Blu-ray', + 'bdrip': 'Blu-ray', + 'brrip': 'Blu-ray', + 'hdtv': 'HDTV', + 'sdtv': 'TV', + 'dvdrip': 'DVD', + 'hd-dvd': 'HD-DVD', + 'dvdscr': 'DVD', + 'pdtv': 'TV', + 'uhdtv': 'HDTV', + 'vhs': 'VHS', + 'tvrip': 'TVRip', + } + + return keyword_map.get(source_type, 'Other') + + async def get_processing(self, meta): + type_map = { + 'ENCODE': 'Encode', + 'REMUX': 'Remux', + 'DIY': 'DIY', + 'UNTOUCHED': 'Untouched' + } + release_type = meta.get('type', '').strip().upper() + return type_map.get(release_type, 'Untouched') + + def get_media_flags(self, meta): + audio = meta.get('audio', '').lower() + hdr = meta.get('hdr', '') + bit_depth = meta.get('bit_depth', '') + channels = meta.get('channels', '') + + flags = {} + + # audio flags + if 'atmos' in audio: + flags['dolby_atmos'] = 'on' + + if 'dts:x' in audio: + flags['dts_x'] = 'on' + + if channels == '5.1': + flags['audio_51'] = 'on' + + if channels == '7.1': + flags['audio_71'] = 'on' + + # video flags + if not hdr.strip() and bit_depth == '10': + flags['10_bit'] = 'on' + + if 'DV' in hdr: + flags['dolby_vision'] = 'on' + + if 'HDR' in hdr: + flags['hdr10plus' if 'HDR10+' in hdr else 'hdr10'] = 'on' + + return flags + + async def fetch_data(self, meta, disctype): + await self.load_localized_data(meta) + remaster_title = await self.get_remaster_title(meta) + codec = await self.get_codec(meta) + container = await self.get_container(meta) + groupid = await self.get_groupid(meta) + + data = {} + + if not groupid: + console.print(f'{self.tracker}: This movie is not registered in the database, please enter additional information.') + data.update(await self.get_additional_data(meta)) + + data.update({ + 'codec_other': meta.get('video_codec', '') if codec == 'Other' else '', + 'codec': codec, + 'container_other': meta.get('container', '') if container == 'Other' else '', + 'container': container, + 'groupid': groupid if groupid else '', + 'mediainfo[]': await self.get_media_info(meta), + 'movie_edition_information': 'on' if remaster_title else '', + 'processing_other': await self.get_processing_other(meta) if meta.get('type') == 'DISC' else '', + 'processing': await self.get_processing(meta), + 'release_desc': await self.get_release_desc(meta), + 'remaster_custom_title': '', + 'remaster_title': remaster_title, + 'remaster_year': '', + 'resolution_height': '', + 'resolution_width': '', + 'resolution': meta.get('resolution'), + 'source_other': '', + 'source': await self.get_source(meta), + 'submit': 'true', + 'subtitle_type': ('2' if meta.get('hardcoded-subs', False) else '1' if meta.get('subtitle_languages', []) else '3'), + 'subtitles[]': await self.get_subtitle(meta), + }) + + if await self.get_ch_dubs(meta): + data.update({ + 'chinese_dubbed': 'on' + }) + + if meta.get('sfx_subtitles', False): + data.update({ + 'special_effects_subtitles': 'on' + }) + + if meta.get('scene', False): + data.update({ + 'scene': 'on' + }) + + if meta.get('personalrelease', False): + data.update({ + 'self_rip': 'on' + }) + + data.update(self.get_media_flags(meta)) + + return data + + async def upload(self, meta, disctype): + await self.common.edit_torrent(meta, self.tracker, self.source_flag) + data = await self.fetch_data(meta, disctype) + status_message = '' + + if not meta.get('debug', False): + response_data = '' + torrent_id = '' + upload_url = f'{self.base_url}/api.php?api_key={self.api_key}&action=upload' + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + + with open(torrent_path, 'rb') as torrent_file: + files = {'file_input': (f'{self.tracker}.placeholder.torrent', torrent_file, 'application/x-bittorrent')} + + try: + async with httpx.AsyncClient(timeout=30) as client: + response = await client.post(url=upload_url, files=files, data=data) + response_data = response.json() + + torrent_id = str(response_data['response']['torrent_id']) + meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id + status_message = 'Torrent uploaded successfully.' + + except httpx.TimeoutException: + meta['tracker_status'][self.tracker]['status_message'] = 'data error: Request timed out after 10 seconds' + except httpx.RequestError as e: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: Unable to upload. Error: {e}.\nResponse: {response_data}' + except Exception as e: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: It may have uploaded, go check. Error: {e}.\nResponse: {response_data}' + return + + await self.common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.announce, self.torrent_url + torrent_id) + + else: + console.print(data) + status_message = 'Debug mode enabled, not uploading.' + + meta['tracker_status'][self.tracker]['status_message'] = status_message diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 25f8a0c47..5b9fdf702 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -1,19 +1,22 @@ -import requests +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles import asyncio -import re -import os -from pathlib import Path -import json import glob import httpx +import json +import os +import re +import requests + +from torf import Torrent from unidecode import unidecode from urllib.parse import urlparse, quote -from src.trackers.COMMON import COMMON -from src.exceptions import * # noqa F403 + from src.console import console -from datetime import datetime -from torf import Torrent -from src.torrentcreate import CustomTorrent, torf_cb, create_torrent +from data.config import config +from src.exceptions import * # noqa F403 +from src.torrentcreate import create_torrent +from src.trackers.COMMON import COMMON class HDB(): @@ -44,6 +47,9 @@ async def get_type_category_id(self, meta): # 3 = Documentary if 'documentary' in meta.get("genres", "").lower() or 'documentary' in meta.get("keywords", "").lower(): cat_id = 3 + if meta.get('imdb_info').get('type') is not None and meta.get('imdb_info').get('genres') is not None: + if 'concert' in meta.get('imdb_info').get('type').lower() or ('video' in meta.get('imdb_info').get('type').lower() and 'music' in meta.get('imdb_info').get('genres').lower()): + cat_id = 4 return cat_id async def get_type_codec_id(self, meta): @@ -180,9 +186,9 @@ async def edit_name(self, meta): if 'HDR10+' not in meta['hdr']: hdb_name = hdb_name.replace('HDR', 'HDR10') if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE'): - hdb_name = hdb_name.replace(meta['audio'], meta['audio'].replace(' ', '', 1).replace('Atmos', '')) + hdb_name = hdb_name.replace(meta['audio'], meta['audio'].replace(' ', '', 1).replace(' Atmos', '')) else: - hdb_name = hdb_name.replace(meta['audio'], meta['audio'].replace('Atmos', '')) + hdb_name = hdb_name.replace(meta['audio'], meta['audio'].replace(' Atmos', '')) hdb_name = hdb_name.replace(meta.get('aka', ''), '') if meta.get('imdb_info'): hdb_name = hdb_name.replace(meta['title'], meta['imdb_info']['aka']) @@ -192,6 +198,8 @@ async def edit_name(self, meta): hdb_name = hdb_name.replace('PQ10', 'HDR') hdb_name = hdb_name.replace('Dubbed', '').replace('Dual-Audio', '') hdb_name = hdb_name.replace('REMUX', 'Remux') + hdb_name = hdb_name.replace('BluRay Remux', 'Remux') + hdb_name = hdb_name.replace('UHD Remux', 'Remux') hdb_name = ' '.join(hdb_name.split()) hdb_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) hdb_name = hdb_name.replace(' .', '.').replace('..', '.') @@ -217,61 +225,26 @@ async def upload(self, meta, disctype): console.print("[bold red]Dual-Audio Encodes are not allowed for non-anime and non-disc content") return - # Download new .torrent from site hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - torrent = Torrent.read(torrent_path) + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + if not await aiofiles.os.path.exists(torrent_file_path): + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename="BASE") + + loop = asyncio.get_running_loop() + torrent = await loop.run_in_executor(None, Torrent.read, torrent_file_path) # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed if torrent.piece_size > 16777216: # 16 MiB in bytes console.print("[red]Piece size is OVER 16M and does not work on HDB. Generating a new .torrent") - if meta.get('mkbrr', False): - from data.config import config - tracker_url = config['TRACKERS']['HDB'].get('announce_url', "https://fake.tracker/").strip() + tracker_url = config['TRACKERS']['HDB'].get('announce_url', "https://fake.tracker/").strip() + meta['max_piece_size'] = '16' + torrent_create = f"[{self.tracker}]" - # Create the torrent with the tracker URL - torrent_create = f"[{self.tracker}]" - create_torrent(meta, meta['path'], torrent_create, tracker_url=tracker_url) - torrent_filename = "[HDB]" - - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - else: - if meta['is_disc']: - include = [] - exclude = [] - else: - include = ["*.mkv", "*.mp4", "*.ts"] - exclude = ["*.*", "*sample.mkv", "!sample*.*"] - - # Create a new torrent with piece size explicitly set to 16 MiB - new_torrent = CustomTorrent( - meta=meta, - path=Path(meta['path']), - trackers=["https://fake.tracker/"], - source="Audionut", - private=True, - exclude_globs=exclude, # Ensure this is always a list - include_globs=include, # Ensure this is always a list - creation_date=datetime.now(), - comment="Created by Audionut's Upload Assistant", - created_by="Audionut's Upload Assistant" - ) - - # Explicitly set the piece size and update metainfo - new_torrent.piece_size = 16777216 # 16 MiB in bytes - new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set - - # Validate and write the new torrent - new_torrent.validate_piece_size() - new_torrent.generate(callback=torf_cb, interval=5) - new_torrent.write(torrent_path, overwrite=True) - torrent_filename = "[HDB]" - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - else: - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename="BASE") + create_torrent(meta, meta['path'], torrent_create, tracker_url=tracker_url) + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_create) # Proceed with the upload process - with open(torrent_path, 'rb') as torrentFile: + with open(torrent_file_path, 'rb') as torrentFile: if len(meta['filelist']) == 1: torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) else: @@ -301,8 +274,7 @@ async def upload(self, meta, disctype): if meta.get('tvdb_id', 0) != 0: data['tvdb'] = meta['tvdb_id'] if meta.get('imdb_id') != 0: - imdbID = f"tt{meta.get('imdb_id'):07d}" - data['imdb'] = f"https://www.imdb.com/title/%7BimdbID%7D/", + data['imdb'] = str(meta.get('imdb_info', {}).get('imdb_url', '')) + '/', else: data['imdb'] = 0 if meta.get('category') == 'TV': @@ -328,7 +300,7 @@ async def upload(self, meta, disctype): if match: meta['tracker_status'][self.tracker]['status_message'] = match.group(0) id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) - await self.download_new_torrent(id, torrent_path) + await self.download_new_torrent(id, torrent_file_path) else: console.print(data) console.print("\n\n") @@ -353,45 +325,78 @@ async def search_existing(self, meta, disctype): if int(meta.get('tvdb_id')) != 0: data['tvdb'] = {'id': meta['tvdb_id']} + # Build search_terms list search_terms = [] - has_valid_ids = ((meta.get('category') == 'TV' and meta.get('tvdb_id') == 0 and meta.get('imdb_id') == 0) or - (meta.get('category') == 'MOVIE' and meta.get('imdb_id') == 0)) + has_valid_ids = ((meta.get('category') == 'TV' and meta.get('tvdb_id', 0) == 0 and meta.get('imdb_id', 0) == 0) or + (meta.get('category') == 'MOVIE' and meta.get('imdb_id', 0) == 0)) if has_valid_ids: console.print("[yellow]No IMDb or TVDB ID found, trying other options...") console.print("[yellow]Double check that the upload does not already exist...") - search_terms.append(meta['filename']) - if meta.get('aka') and meta['aka'] != "": + if meta.get('filename'): + search_terms.append(meta['filename']) + if meta.get('aka'): aka_clean = meta['aka'].replace('AKA ', '').strip() if aka_clean: search_terms.append(aka_clean) if meta.get('uuid'): search_terms.append(meta['uuid']) - else: - search_terms.append(meta['resolution']) + # We have ids + if not search_terms: + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.post(url, json=data) + if response.status_code == 200: + response_data = response.json() + results = response_data.get('data', []) + if results: + for each in results: + result = { + 'name': each['name'], + 'size': each['size'], + 'files': each['filename'][:-8] if each['filename'].endswith('.torrent') else each['filename'], + 'filecount': each['numfiles'], + 'link': f"https://hdbits.org/details.php?id={each['id']}", + 'download': f"https://hdbits.org/download.php/%7Bquote(each['filename'])}?id={each['id']}&passkey={self.passkey}" + } + dupes.append(result) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print("[bold red]Unexpected error occurred while searching torrents.") + console.print(str(e)) + await asyncio.sleep(5) + return dupes + + # Otherwise, search for each term for search_term in search_terms: console.print(f"[yellow]Searching HDB for: {search_term}") data['search'] = search_term try: - # Send POST request with JSON body async with httpx.AsyncClient(timeout=5.0) as client: response = await client.post(url, json=data) - if response.status_code == 200: response_data = response.json() results = response_data.get('data', []) - if results: for each in results: - result = each['name'] + result = { + 'name': each['name'], + 'size': each['size'], + 'files': each['filename'][:-8] if each['filename'].endswith('.torrent') else each['filename'], + 'filecount': each['numfiles'], + 'link': f"https://hdbits.org/details.php?id={each['id']}", + 'download': f"https://hdbits.org/download.php/%7Bquote(each['filename'])}?id={each['id']}&passkey={self.passkey}" + } dupes.append(result) - console.print(f"[green]Found {len(results)} results using search term: {search_term}") - break # We found results, no need to try other search terms else: console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") - except httpx.TimeoutException: console.print("[bold red]Request timed out while searching for existing torrents.") except httpx.RequestError as e: @@ -404,30 +409,12 @@ async def search_existing(self, meta, disctype): return dupes async def validate_credentials(self, meta): - vapi = await self.validate_api() vcookie = await self.validate_cookies(meta) - if vapi is not True: - console.print('[red]Failed to validate API. Please confirm that the site is up and your passkey is valid.') - return False if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') return False return True - async def validate_api(self): - url = "https://hdbits.org/api/test" - data = { - 'username': self.username, - 'passkey': self.passkey - } - try: - r = requests.post(url, data=json.dumps(data)).json() - if r.get('status', 5) == 0: - return True - return False - except Exception: - return False - async def validate_cookies(self, meta): common = COMMON(config=self.config) url = "https://hdbits.org/" @@ -515,6 +502,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[/size\]|\[size=\d+\]", "", desc, flags=re.IGNORECASE) descfile.write(desc) if self.rehost_images is True: console.print("[green]Rehosting Images...") @@ -541,7 +529,7 @@ async def edit_desc(self, meta): descfile.write(f"{hdbimg_bbcode}") descfile.write("[/center]") else: - descfile.write(f"{hdbimg_bbcode}") + descfile.write(f"[center]{hdbimg_bbcode}[/center]") else: images = meta['image_list'] if len(images) > 0: @@ -600,8 +588,14 @@ async def hdbimg_upload(self, meta): # Interleave images for correct ordering all_image_files = [] sorted_group_indices = sorted(group_images.keys(), key=lambda x: int(x)) - if len(sorted_group_indices) < 4: - thumb_size = 'w250' + if len(sorted_group_indices) < 3: + thumb_size = 'w350' + elif len(sorted_group_indices) == 3: + thumb_size = 'w300' + elif len(sorted_group_indices) == 4: + thumb_size = 'w200' + elif len(sorted_group_indices) == 5: + thumb_size = 'w150' else: thumb_size = 'w100' @@ -670,11 +664,64 @@ async def hdbimg_upload(self, meta): if meta['debug']: console.print(f"[green]Uploading {len(files)} images to HDB...") - response = requests.post(url, data=data, files=files) - if response.status_code == 200: - console.print("[green]Upload successful!") - bbcode = response.text + uploadSuccess = True + if meta.get('comparison', False): + num_groups = len(sorted_group_indices) if sorted_group_indices else 3 + max_chunk_size = 100 * 1024 * 1024 # 100 MiB in bytes + bbcode = "" + + chunks = [] + current_chunk = [] + current_chunk_size = 0 + + files_list = list(files.items()) + for i in range(0, len(files_list), num_groups): + row_items = files_list[i:i+num_groups] + row_size = sum(os.path.getsize(all_image_files[i+j]) for j in range(len(row_items))) + + # If adding this row would exceed chunk size and we already have items, start new chunk + if current_chunk and current_chunk_size + row_size > max_chunk_size: + chunks.append(current_chunk) + current_chunk = [] + current_chunk_size = 0 + + current_chunk.extend(row_items) + current_chunk_size += row_size + + if current_chunk: + chunks.append(current_chunk) + + if meta['debug']: + console.print(f"[cyan]Split into {len(chunks)} chunks based on 100 MiB limit") + + # Upload each chunk + for chunk_idx, chunk in enumerate(chunks): + fileList = {} + for j, (key, value) in enumerate(chunk): + fileList[f'images_files[{j}]'] = value + + if meta['debug']: + chunk_size_mb = sum(os.path.getsize(all_image_files[int(key.split('[')[1].split(']')[0])]) for key, _ in chunk) / (1024 * 1024) + console.print(f"[cyan]Uploading chunk {chunk_idx + 1}/{len(chunks)} ({len(fileList)} images, {chunk_size_mb:.2f} MiB)") + + response = requests.post(url, data=data, files=fileList) + if response.status_code == 200: + console.print(f"[green]Chunk {chunk_idx + 1}/{len(chunks)} upload successful!") + bbcode += response.text + else: + console.print(f"[red]Chunk {chunk_idx + 1}/{len(chunks)} upload failed with status code {response.status_code}") + uploadSuccess = False + break + else: + response = requests.post(url, data=data, files=files) + if response.status_code == 200: + console.print("[green]Upload successful!") + bbcode = response.text + else: + uploadSuccess = False + + if uploadSuccess is True: if meta.get('comparison', False): matches = re.findall(r'\[url=.*?\]\[img\].*?\[/img\]\[/url\]', bbcode) formatted_bbcode = "" @@ -757,17 +804,19 @@ async def search_filename(self, search_term, search_file_folder, meta): bd_summary = line.split("Disc Title:")[1].strip() break + if not bd_summary: + bd_summary = meta.get('uuid', '') + if bd_summary: data = { "username": self.username, "passkey": self.passkey, "limit": 100, - "search": bd_summary # Using the Disc Title for search + "search": bd_summary # Using the Disc Title for search with uuid fallback } - console.print(f"[green]Searching HDB for disc title: [bold yellow]{bd_summary}[/bold yellow]") + console.print(f"[green]Searching HDB for title: [bold yellow]{bd_summary}[/bold yellow]") # console.print(f"[yellow]Using this data: {data}") else: - console.print(f"[red]Error: 'Disc Title' not found in {bd_summary_path}[/red]") return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_description, hdb_id except FileNotFoundError: diff --git a/src/trackers/HDS.py b/src/trackers/HDS.py index 2b40b9733..e372befed 100644 --- a/src/trackers/HDS.py +++ b/src/trackers/HDS.py @@ -1,172 +1,209 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- +import aiofiles +import glob +import httpx import os -import re -import requests -import cli_ui -from src.exceptions import UploadException +import platform from bs4 import BeautifulSoup +from src.bbcode import BBCODE from src.console import console -from .COMMON import COMMON -from pymediainfo import MediaInfo +from src.cookie_auth import CookieValidator, CookieAuthUploader +from src.get_desc import DescriptionBuilder -class HDS(COMMON): +class HDS: def __init__(self, config): - super().__init__(config) + self.config = config + self.cookie_validator = CookieValidator(config) + self.cookie_auth_uploader = CookieAuthUploader(config) self.tracker = 'HDS' self.source_flag = 'HD-Space' - self.banned_groups = [""] - self.base_url = "https://hd-space.org/" - self.session = requests.Session() - self.session.headers.update({ - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' - }) - self.signature = "[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [''] + self.base_url = 'https://hd-space.org/' + self.torrent_url = f'{self.base_url}/index.php?page=torrent-details&id=' + self.requests_url = f'{self.base_url}/index.php?page=viewrequests' + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f"Upload Assistant/2.3 ({platform.system()} {platform.release()})" + }, timeout=30) + + async def validate_credentials(self, meta): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/index.php?page=upload', + error_text='Recover password', + ) async def generate_description(self, meta): - base_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt" - final_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - - description_parts = [] - - # MediaInfo/BDInfo - tech_info = "" - if meta.get('is_disc') != 'BDMV': - video_file = meta['filelist'][0] - mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") - if os.path.exists(mi_template): - try: - media_info = MediaInfo.parse(video_file, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) - tech_info = str(media_info) - except Exception: - console.print("[bold red]Couldn't find the MediaInfo template[/bold red]") - mi_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" - if os.path.exists(mi_file_path): - with open(mi_file_path, 'r', encoding='utf-8') as f: - tech_info = f.read() - else: - console.print("[bold yellow]Using normal MediaInfo for the description.[/bold yellow]") - mi_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" - if os.path.exists(mi_file_path): - with open(mi_file_path, 'r', encoding='utf-8') as f: - tech_info = f.read() - else: - bd_summary_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" - if os.path.exists(bd_summary_file): - with open(bd_summary_file, 'r', encoding='utf-8') as f: - tech_info = f.read() - - if tech_info: - description_parts.append(tech_info) - - if os.path.exists(base_desc_path): - with open(base_desc_path, 'r', encoding='utf-8') as f: - manual_desc = f.read() - description_parts.append(manual_desc) - - # Screenshots - images = meta.get('image_list', []) - if not images or len(images) < 3: - raise UploadException("[red]HDS requires at least 3 screenshots.[/red]") - - screenshots_block = "[center][b]Screenshots[/b]\n\n" - for image in images: - img_url = image['img_url'] - web_url = image['web_url'] - screenshots_block += f"[url={web_url}][img]{img_url}[/img][/url] " - screenshots_block += "[/center]" - - description_parts.append(screenshots_block) - - if self.signature: - description_parts.append(self.signature) - - final_description = "\n\n".join(filter(None, description_parts)) - from src.bbcode import BBCODE + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo_resize_url = meta.get('tmdb_logo', '') + if logo_resize_url: + desc_parts.append(f"[center][img]https://image.tmdb.org/t/p/w300/{logo_resize_url}[/img][/center]") + + # TV + title, episode_image, episode_overview = await builder.get_tv_info(meta, self.tracker, resize=True) + if episode_overview: + desc_parts.append(f'[center]{title}[/center]') + + if episode_image: + desc_parts.append(f"[center][img]{episode_image}[/img][/center]") + + desc_parts.append(f'[center]{episode_overview}[/center]') + + # File information + mediainfo = await builder.get_mediainfo_section(meta, self.tracker) + if mediainfo: + desc_parts.append(f'[pre]{mediainfo}[/pre]') + + bdinfo = await builder.get_bdinfo_section(meta) + if bdinfo: + desc_parts.append(f'[pre]{bdinfo}[/pre]') + + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Disc menus screenshots header + menu_images = meta.get("menu_images", []) + if menu_images: + desc_parts.append(await builder.menu_screenshot_header(meta, self.tracker)) + + # Disc menus screenshots + menu_screenshots_block = "" + for image in menu_images: + menu_web_url = image.get("web_url") + menu_img_url = image.get("img_url") + if menu_web_url and menu_img_url: + menu_screenshots_block += f"[url={menu_web_url}][img]{menu_img_url}[/img][/url]" + # HDS cannot resize images. If the image host does not provide small thumbnails(<400px), place only one image per line + if "imgbox" not in menu_web_url: + menu_screenshots_block += "\n" + if menu_screenshots_block: + desc_parts.append(f"[center]\n{menu_screenshots_block}\n[/center]") + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Screenshot Header + images = meta.get("image_list", []) + if images: + desc_parts.append(await builder.screenshot_header(self.tracker)) + + # Screenshots + if images: + screenshots_block = "" + for image in images: + web_url = image.get("web_url") + img_url = image.get("img_url") + if web_url and img_url: + screenshots_block += f"[url={web_url}][img]{img_url}[/img][/url]" + # HDS cannot resize images. If the image host does not provide small thumbnails(<400px), place only one image per line + if "imgbox" not in web_url: + screenshots_block += "\n" + if screenshots_block: + desc_parts.append(f"[center]\n{screenshots_block}\n[/center]") + + # Signature + desc_parts.append(f"[center][url=https://github.com/Audionut/Upload-Assistant][size=2]{meta['ua_signature']}[/size][/url][/center]") + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + bbcode = BBCODE() - desc = final_description - desc = desc.replace("[user]", "").replace("[/user]", "") - desc = desc.replace("[align=left]", "").replace("[/align]", "") - desc = desc.replace("[right]", "").replace("[/right]", "") - desc = desc.replace("[align=right]", "").replace("[/align]", "") - desc = desc.replace("[sup]", "").replace("[/sup]", "") - desc = desc.replace("[sub]", "").replace("[/sub]", "") - desc = desc.replace("[alert]", "").replace("[/alert]", "") - desc = desc.replace("[note]", "").replace("[/note]", "") - desc = desc.replace("[hr]", "").replace("[/hr]", "") - desc = desc.replace("[h1]", "[u][b]").replace("[/h1]", "[/b][/u]") - desc = desc.replace("[h2]", "[u][b]").replace("[/h2]", "[/b][/u]") - desc = desc.replace("[h3]", "[u][b]").replace("[/h3]", "[/b][/u]") - desc = desc.replace("[ul]", "").replace("[/ul]", "") - desc = desc.replace("[ol]", "").replace("[/ol]", "") - desc = desc.replace("[hide]", "").replace("[/hide]", "") - desc = re.sub(r"\[center\]\[spoiler=.*? NFO:\]\[code\](.*?)\[/code\]\[/spoiler\]\[/center\]", r"NFO:[code][pre]\1[/pre][/code]", desc, flags=re.DOTALL) - desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) - desc = bbcode.convert_comparison_to_centered(desc, 1000) - desc = bbcode.remove_spoiler(desc) - - with open(final_desc_path, 'w', encoding='utf-8') as f: - f.write(desc) + description = description.replace('[user]', '').replace('[/user]', '') + description = description.replace('[align=left]', '').replace('[/align]', '') + description = description.replace('[right]', '').replace('[/right]', '') + description = description.replace('[align=right]', '').replace('[/align]', '') + description = bbcode.remove_sub(description) + description = bbcode.remove_sup(description) + description = description.replace('[alert]', '').replace('[/alert]', '') + description = description.replace('[note]', '').replace('[/note]', '') + description = description.replace('[hr]', '').replace('[/hr]', '') + description = description.replace('[h1]', '[u][b]').replace('[/h1]', '[/b][/u]') + description = description.replace('[h2]', '[u][b]').replace('[/h2]', '[/b][/u]') + description = description.replace('[h3]', '[u][b]').replace('[/h3]', '[/b][/u]') + description = description.replace('[ul]', '').replace('[/ul]', '') + description = description.replace('[ol]', '').replace('[/ol]', '') + description = bbcode.remove_hide(description) + description = bbcode.remove_img_resize(description) + description = bbcode.convert_comparison_to_centered(description, 1000) + description = bbcode.remove_spoiler(description) + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description async def search_existing(self, meta, disctype): - dupes = [] - if not await self.validate_credentials(meta): - cli_ui.fatal(f"Failed to validate {self.tracker} credentials, skipping duplicate check.") - return dupes + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + dupes = [] imdb_id = meta.get('imdb', '') if imdb_id == '0': - cli_ui.info(f"IMDb ID not found, cannot search for duplicates on {self.tracker}.") + console.print(f'IMDb ID not found, cannot search for duplicates on {self.tracker}.') return dupes - search_url = f"{self.base_url}/index.php?page=torrents&search={imdb_id}&active=0&options=2" + search_url = f'{self.base_url}/index.php?' + + params = { + 'page': 'torrents', + 'search': imdb_id, + 'active': '0', + 'options': '2' + } try: - response = self.session.get(search_url, timeout=20) + response = await self.session.get(search_url, params=params) response.raise_for_status() - soup = BeautifulSoup(response.text, 'html.parser') - torrent_links = soup.find_all('a', href=lambda href: href and 'page=torrent-details&id=' in href) - if torrent_links: - for link in torrent_links: - dupes.append(link.get_text(strip=True)) + all_tables = soup.find_all('table', class_='lista') - except Exception as e: - console.print(f"[bold red]Error searching for duplicates on {self.tracker}: {e}[/bold red]") + torrent_rows = [] - return dupes + for table in all_tables: + recommend_header = table.find('td', class_='block', string='Our Team Recommend') + if recommend_header: + continue - async def validate_credentials(self, meta): - cookie_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDS.txt") - if not os.path.exists(cookie_file): - console.print(f"[bold red]Cookie file for {self.tracker} not found: {cookie_file}[/bold red]") - return False + rows_in_table = table.select('tr:has(td.lista)') + torrent_rows.extend(rows_in_table) - common = COMMON(config=self.config) - self.session.cookies.update(await common.parseCookieFile(cookie_file)) + for row in torrent_rows: + name_tag = row.select_one('td:nth-child(2) > a[href*="page=torrent-details&id="]') + name = name_tag.get_text(strip=True) if name_tag else 'Unknown Name' - try: - test_url = f"{self.base_url}/index.php?page=upload" + link_tag = name_tag + torrent_link = None + if link_tag and 'href' in link_tag.attrs: + torrent_link = f'{self.base_url}/{link_tag["href"]}' - response = self.session.get(test_url, timeout=10, allow_redirects=False) + duplicate_entry = { + 'name': name, + 'size': None, + 'link': torrent_link + } + dupes.append(duplicate_entry) - if response.status_code == 200 and 'index.php?page=upload' in response.url: - return True - else: - console.print(f"[bold red]Failed to validate {self.tracker} credentials. The cookie may be expired.[/bold red]") - return False except Exception as e: - console.print(f"[bold red]Error validating {self.tracker} credentials: {e}[/bold red]") - return False + console.print(f'[bold red]Error searching for duplicates on {self.tracker}: {e}[/bold red]') + + return dupes async def get_category_id(self, meta): resolution = meta.get('resolution') category = meta.get('category') type_ = meta.get('type') is_disc = meta.get('is_disc') - genres = meta.get("genres", "").lower() - keywords = meta.get("keywords", "").lower() + genres = meta.get('genres', '').lower() + keywords = meta.get('keywords', '').lower() is_anime = meta.get('anime') if is_disc == 'BDMV': @@ -207,83 +244,114 @@ async def get_category_id(self, meta): return 38 - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - - if not await self.validate_credentials(meta): - cli_ui.fatal(f"Failed to validate {self.tracker} credentials, aborting.") - return - - cat_id = await self.get_category_id(meta) - - await self.generate_description(meta) - description_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - with open(description_path, 'r', encoding='utf-8') as f: - description = f.read() - - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - - tracker_anon_setting = self.config['TRACKERS'][self.tracker].get('anon', False) - is_anonymous = meta['anon'] != 0 or tracker_anon_setting is True - + async def get_requests(self, meta): + if not self.config['DEFAULT'].get('search_requests', False) and not meta.get('search_requests', False): + return False + else: + try: + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + query = meta['title'] + search_url = f'{self.base_url}/index.php?' + + params = { + 'page': 'viewrequests', + 'search': query, + 'filter': 'true' + } + + response = await self.session.get(search_url, params=params, cookies=self.session.cookies) + response.raise_for_status() + response_results_text = response.text + + soup = BeautifulSoup(response_results_text, 'html.parser') + request_rows = soup.select('form[action="https://github.com/index.php?page=takedelreq"] table.lista tr') + + results = [] + for row in request_rows: + if row.find('td', class_='header'): + continue + + name_element = row.select_one('td.lista a b') + if not name_element: + continue + + name = name_element.text.strip() + link_element = name_element.find_parent('a') + link = link_element['href'] if link_element else None + + results.append({ + 'Name': name, + 'Link': link, + }) + + if results: + message = f"\n{self.tracker}: [bold yellow]Your upload may fulfill the following request(s), check it out:[/bold yellow]\n\n" + for r in results: + message += f"[bold green]Name:[/bold green] {r['Name']}\n" + message += f"[bold green]Link:[/bold green] {self.base_url}/{r['Link']}\n\n" + console.print(message) + + return results + + except Exception as e: + print(f'An error occurred while fetching requests: {e}') + return [] + + async def get_data(self, meta): data = { - 'user_id': '', - 'category': cat_id, + 'category': await self.get_category_id(meta), 'filename': meta['name'], + 'genre': meta.get('genres', ''), 'imdb': meta.get('imdb', ''), - 'youtube_video': meta.get('youtube', ''), - 'info': description, - 'anonymous': 'true' if is_anonymous else 'false', - 't3d': 'true' if '3D' in meta.get('3d', '') else 'false', - 'req': 'false', - 'nuk': 'false', + 'info': await self.generate_description(meta), 'nuk_rea': '', + 'nuk': 'false', + 'req': 'false', 'submit': 'Send', + 't3d': 'true' if '3D' in meta.get('3d', '') else 'false', + 'user_id': '', + 'youtube_video': meta.get('youtube', ''), } - if meta.get('genre'): - data['genre'] = meta.get('genre') - - with open(torrent_path, 'rb') as torrent_file: - files = {'torrent': (os.path.basename(torrent_path), torrent_file, 'application/x-bittorrent')} - self.session.headers.update({'Referer': f'{self.base_url}/index.php?page=upload'}) - - if meta['debug'] is False: - upload_url = f"{self.base_url}/index.php?page=upload" - response = self.session.post(upload_url, data=data, files=files, timeout=60) - - if "This torrent may already exist in our database." in response.text: - console.print(f"[bold red]Upload to {self.tracker} failed: The torrent already exists on the site.[/bold red]") - raise UploadException(f"Upload to {self.tracker} failed: Duplicate detected.", "red") - - elif "Upload successful!" in response.text and "download.php?id=" in response.text: - soup = BeautifulSoup(response.text, 'html.parser') - download_link_tag = soup.find('a', href=lambda href: href and "download.php?id=" in href) - - if download_link_tag: - href = download_link_tag['href'] - id_match = re.search(r'id=([a-f0-9]+)', href) - - if id_match: - torrent_id = id_match.group(1) - details_url = f"{self.base_url}/index.php?page=torrent-details&id={torrent_id}" - meta['tracker_status'][self.tracker]['status_message'] = details_url - - announce_url = self.config['TRACKERS'][self.tracker].get('announce_url') - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, announce_url, details_url) - else: - console.print("[bold red]Critical Error: Could not extract torrent ID from the download link.[/bold red]") - else: - console.print("[bold yellow]Warning: Upload was successful, but the torrent link could not be found on the response page.[/bold yellow]") - - else: - console.print(f"[bold red]Upload to {self.tracker} failed.[/bold red]") - console.print(f"Status: {response.status_code}") - console.print(f"Response: {response.text[:800]}") - raise UploadException(f"Upload to {self.tracker} failed, check the response.", "red") - else: - console.print(f"[bold blue]Debug Mode: Upload to {self.tracker} was not sent.[/bold blue]") - console.print("Headers:", self.session.headers) - console.print("Payload (data):", data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." + # Anon + anon = not (meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False)) + if anon: + data.update({ + 'anonymous': 'true' + }) + else: + data.update({ + 'anonymous': 'false' + }) + + return data + + async def get_nfo(self, meta): + nfo_dir = os.path.join(meta['base_dir'], "tmp", meta['uuid']) + nfo_files = glob.glob(os.path.join(nfo_dir, "*.nfo")) + + if nfo_files: + nfo_path = nfo_files[0] + return {'nfo': (os.path.basename(nfo_path), open(nfo_path, "rb"), "application/octet-stream")} + return {} + + async def upload(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + data = await self.get_data(meta) + files = await self.get_nfo(meta) + + await self.cookie_auth_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + source_flag=self.source_flag, + torrent_url=self.torrent_url, + data=data, + torrent_field_name='torrent', + upload_cookies=self.session.cookies, + upload_url="https://hd-space.org/index.php?page=upload", + hash_is_id=True, + success_text="download.php?id=", + additional_files=files, + ) + + return diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 98395e78b..970842905 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -1,27 +1,48 @@ -import requests -import asyncio -import re +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import aiofiles +import glob +import httpx import os -import cli_ui +import platform +import re from bs4 import BeautifulSoup -from unidecode import unidecode -from pymediainfo import MediaInfo -from src.trackers.COMMON import COMMON -from src.exceptions import * # noqa F403 +from src.bbcode import BBCODE from src.console import console +from src.cookie_auth import CookieValidator, CookieAuthUploader +from src.get_desc import DescriptionBuilder +from urllib.parse import urlparse -class HDT(): - +class HDT: def __init__(self, config): self.config = config + self.cookie_validator = CookieValidator(config) + self.cookie_auth_uploader = CookieAuthUploader(config) self.tracker = 'HDT' self.source_flag = 'hd-torrents.org' - self.username = config['TRACKERS'][self.tracker].get('username', '').strip() - self.password = config['TRACKERS'][self.tracker].get('password', '').strip() - self.signature = None - self.base_url = "https://hd-torrents.net/" - self.banned_groups = [""] + + url_from_config = self.config['TRACKERS'][self.tracker].get('url') + parsed_url = urlparse(url_from_config) + self.config_url = parsed_url.netloc + self.base_url = f'https://{self.config_url}/' + + self.torrent_url = f'{self.base_url}/details.php?id=' + self.announce_url = self.config['TRACKERS'][self.tracker]['announce_url'] + self.banned_groups = [] + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f'Upload Assistant ({platform.system()} {platform.release()})' + }, timeout=60.0) + + async def validate_credentials(self, meta): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/upload.php', + success_text='usercp.php', + token_pattern=r'name="csrfToken" value="([^"]+)"' + ) async def get_category_id(self, meta): if meta['category'] == 'MOVIE': @@ -102,237 +123,229 @@ async def edit_name(self, meta): hdt_name = hdt_name.replace(':', '').replace('..', ' ').replace(' ', ' ') return hdt_name - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await self.edit_desc(meta) - hdt_name = await self.edit_name(meta) - cat_id = await self.get_category_id(meta) - - # Confirm the correct naming order for HDT - cli_ui.info(f"HDT name: {hdt_name}") - if meta.get('unattended', False) is False: - hdt_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if hdt_confirm is not True: - hdt_name_manually = cli_ui.ask_string("Please enter a proper name", default="") - if hdt_name_manually == "": - console.print('No proper name given') - console.print("Aborting...") - return - else: - hdt_name = hdt_name_manually + async def edit_desc(self, meta): + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo_resize_url = meta.get('tmdb_logo', '') + if logo_resize_url: + desc_parts.append(f"[center][img]https://image.tmdb.org/t/p/w300/{logo_resize_url}[/img][/center]") + + # TV + title, episode_image, episode_overview = await builder.get_tv_info(meta, self.tracker, resize=True) + if episode_overview: + desc_parts.append(f'[center]{title}[/center]') + + if episode_image: + desc_parts.append(f"[center][img]{episode_image}[/img][/center]") + + desc_parts.append(f'[center]{episode_overview}[/center]') + + # File information + mediainfo = await builder.get_mediainfo_section(meta, self.tracker) + if mediainfo: + desc_parts.append(f'[left][font=consolas]{mediainfo}[/font][/left]') + + bdinfo = await builder.get_bdinfo_section(meta) + if bdinfo: + desc_parts.append(f'[left][font=consolas]{bdinfo}[/font][/left]') + + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Screenshot Header + desc_parts.append(await builder.screenshot_header(self.tracker)) + + # Screenshots + images = meta.get('image_list', []) + if images: + screenshots_block = '' + for image in images: + screenshots_block += f" " + desc_parts.append('[center]\n' + screenshots_block + '[/center]') + + # Signature + desc_parts.append(f"[right][url=https://github.com/Audionut/Upload-Assistant][size=1]{meta['ua_signature']}[/size][/url][/right]") + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + bbcode = BBCODE() + description = description.replace('[user]', '').replace('[/user]', '') + description = description.replace('[align=left]', '').replace('[/align]', '') + description = description.replace('[align=right]', '').replace('[/align]', '') + description = bbcode.remove_sub(description) + description = bbcode.remove_sup(description) + description = description.replace('[alert]', '').replace('[/alert]', '') + description = description.replace('[note]', '').replace('[/note]', '') + description = description.replace('[hr]', '').replace('[/hr]', '') + description = description.replace('[h1]', '[u][b]').replace('[/h1]', '[/b][/u]') + description = description.replace('[h2]', '[u][b]').replace('[/h2]', '[/b][/u]') + description = description.replace('[h3]', '[u][b]').replace('[/h3]', '[/b][/u]') + description = description.replace('[ul]', '').replace('[/ul]', '') + description = description.replace('[ol]', '').replace('[/ol]', '') + description = bbcode.convert_spoiler_to_hide(description) + description = bbcode.remove_img_resize(description) + description = bbcode.convert_comparison_to_centered(description, 1000) + description = bbcode.remove_spoiler(description) + description = bbcode.remove_list(description) + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) - # Upload - hdt_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='', encoding='utf-8').read() - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + return description - with open(torrent_path, 'rb') as torrentFile: - torrentFileName = unidecode(hdt_name) - files = { - 'torrent': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + async def search_existing(self, meta, disctype): + if meta['resolution'] not in ['2160p', '1080p', '1080i', '720p']: + console.print('[bold red]Resolution must be at least 720p resolution for HDT.') + meta['skipping'] = f'{self.tracker}' + return [] + + # Ensure we have valid credentials and auth_token before searching + if not hasattr(self, 'auth_token') or not self.auth_token: + credentials_valid = await self.validate_credentials(meta) + if not credentials_valid: + console.print(f'[bold red]{self.tracker}: Failed to validate credentials for search.') + return [] + + search_url = f'{self.base_url}/torrents.php?' + if int(meta.get('imdb_id', 0)) != 0: + imdbID = f"tt{meta['imdb']}" + params = { + 'csrfToken': HDT.secret_token, + 'search': imdbID, + 'active': '0', + 'options': '2', + 'category[]': await self.get_category_id(meta) } - data = { - 'filename': hdt_name, - 'category': cat_id, - 'info': hdt_desc.strip() + else: + params = { + 'csrfToken': HDT.secret_token, + 'search': meta['title'], + 'category[]': await self.get_category_id(meta), + 'options': '3' } - # 3D - if "3D" in meta.get('3d', ''): - data['3d'] = 'true' + results = [] + + try: + response = await self.session.get(search_url, params=params) + soup = BeautifulSoup(response.text, 'html.parser') + rows = soup.find_all('tr') + + for row in rows: + if row.find('td', class_='mainblockcontent', string='Filename') is not None: + continue + + name_tag = row.find('a', href=lambda href: href and href.startswith('details.php?id=')) + + name = name_tag.text.strip() if name_tag else None + link = f'{self.base_url}/{name_tag["href"]}' if name_tag else None + size = None + + cells = row.find_all('td', class_='mainblockcontent') + for cell in cells: + cell_text = cell.text.strip() + if 'GiB' in cell_text or 'MiB' in cell_text: + size = cell_text + break + + if name: + results.append({ + 'name': name, + 'size': size, + 'link': link + }) + + except httpx.TimeoutException: + console.print(f'{self.tracker}: Timeout while searching for existing torrents.') + return [] + except httpx.HTTPStatusError as e: + console.print(f'{self.tracker}: HTTP error while searching: Status {e.response.status_code}.') + return [] + except httpx.RequestError as e: + console.print(f'{self.tracker}: Network error while searching: {e.__class__.__name__}.') + return [] + except Exception as e: + console.print(f'{self.tracker}: Unexpected error while searching: {e}') + return [] + + return results + + async def get_data(self, meta): + data = { + 'filename': await self.edit_name(meta), + 'category': await self.get_category_id(meta), + 'info': await self.edit_desc(meta), + 'csrfToken': HDT.secret_token, + } + + # 3D + if "3D" in meta.get('3d', ''): + data['3d'] = 'true' + + # HDR + if "HDR" in meta.get('hdr', ''): + if "HDR10+" in meta['hdr']: + data['HDR10'] = 'true' + data['HDR10Plus'] = 'true' + else: + data['HDR10'] = 'true' + if "DV" in meta.get('hdr', ''): + data['DolbyVision'] = 'true' - # HDR - if "HDR" in meta.get('hdr', ''): - if "HDR10+" in meta['hdr']: - data['HDR10'] = 'true' - data['HDR10Plus'] = 'true' - else: - data['HDR10'] = 'true' - if "DV" in meta.get('hdr', ''): - data['DolbyVision'] = 'true' + # IMDB + if int(meta.get('imdb_id')) != 0: + data['infosite'] = str(meta.get('imdb_info', {}).get('imdb_url', '') + '/') - # IMDB - if int(meta.get('imdb_id')) != 0: - data['infosite'] = f"https://www.imdb.com/title/tt%7Bmeta['imdb']}/" + # Full Season Pack + if int(meta.get('tv_pack', '0')) != 0: + data['season'] = 'true' + else: + data['season'] = 'false' - # Full Season Pack - if int(meta.get('tv_pack', '0')) != 0: - data['season'] = 'true' - else: - data['season'] = 'false' + # Anonymous check + if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): + data['anonymous'] = 'false' + else: + data['anonymous'] = 'true' - # Anonymous check - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - data['anonymous'] = 'false' - else: - data['anonymous'] = 'true' - - # Send - url = f"{self.base_url}/upload.php" - if meta['debug']: - console.print(url) - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - else: - with requests.Session() as session: - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") - - session.cookies.update(await common.parseCookieFile(cookiefile)) - up = session.post(url=url, data=data, files=files) - torrentFile.close() - - # Match url to verify successful upload - try: - search = re.search(r"download\.php\?id\=([a-z0-9]+)", up.text).group(1) - except Exception as e: - if meta['debug']: - console.print(f"[red]Error occurred while searching for download link: {e}") - search = None - if search: - id = search - # modding existing torrent for adding to client instead of downloading torrent from site. - meta['tracker_status'][self.tracker]['status_message'] = f"{self.base_url}/details.php?id=" + id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['HDT'].get('my_announce_url'), f"{self.base_url}/details.php?id=" + id) - else: - if meta['debug']: - console.print("[cyan]Request Data:") - console.print("\n\n") - console.print(f'[red]{up.text}') - raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 - return + return data - async def search_existing(self, meta, disctype): - if meta['resolution'] not in ['2160p', '1080p', '1080i', '720p']: - console.print('[bold red]Resolution must be at least 720p resolution for HDT.') - meta['skipping'] = "HDT" - return - dupes = [] - with requests.Session() as session: - common = COMMON(config=self.config) - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") - session.cookies.update(await common.parseCookieFile(cookiefile)) - - search_url = f"{self.base_url}/torrents.php?" - csrfToken = await self.get_csrfToken(session, search_url) - if int(meta['imdb_id']) != 0: - imdbID = f"tt{meta['imdb']}" - params = { - 'csrfToken': csrfToken, - 'search': imdbID, - 'active': '0', - 'options': '2', - 'category[]': await self.get_category_id(meta) - } - else: - params = { - 'csrfToken': csrfToken, - 'search': meta['title'], - 'category[]': await self.get_category_id(meta), - 'options': '3' - } - if meta['debug']: - console.print(f"[cyan]Searching for existing torrents on {search_url} with params: {params}") - r = session.get(search_url, params=params) - await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - find = soup.find_all('a', href=True) - if meta['debug']: - console.print(f"[cyan]Found {len(find)} links in the search results.") - console.print(f"[cyan]first 30 links: {[each['href'] for each in find[:30]]}") - for each in find: - if each['href'].startswith('details.php?id='): - if meta['debug']: - console.print(f"[cyan]Found wanted links: {each['href']}") - dupes.append(each.text) - - return dupes + async def get_nfo(self, meta): + nfo_dir = os.path.join(meta['base_dir'], "tmp", meta['uuid']) + nfo_files = glob.glob(os.path.join(nfo_dir, "*.nfo")) - async def validate_credentials(self, meta): - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") - vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie is not True: - console.print('[red]Failed to validate cookies. Please confirm that the site is up or export a fresh cookie file from the site') - return False - return True - - async def validate_cookies(self, meta, cookiefile): - common = COMMON(config=self.config) - url = f"{self.base_url}/index.php" - cookiefile = f"{meta['base_dir']}/data/cookies/HDT.txt" - if os.path.exists(cookiefile): - with requests.Session() as session: - session.cookies.update(await common.parseCookieFile(cookiefile)) - res = session.get(url=url) - if meta['debug']: - console.print(res.url) - if res.text.find("Logout") != -1: - return True - else: - return False - else: - return False - - async def get_csrfToken(self, session, url): - r = session.get(url) - await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - csrfToken = soup.find('input', {'name': 'csrfToken'}).get('value') - return csrfToken - - def get_links(self, movie, subheading, heading_end): - description = "" - description += "\n" + subheading + "Links" + heading_end + "\n" - if 'IMAGES' in self.config: - if movie['tmdb'] != 0: - description += f" [URL=https://www.themoviedb.org/{str(movie['category'].lower())}/{str(movie['tmdb'])}][img]{self.config['IMAGES']['tmdb_75']}[/img][/URL]" - if movie['tvdb_id'] != 0: - description += f" [URL=https://www.thetvdb.com/?id={str(movie['tvdb_id'])}&tab=series][img]{self.config['IMAGES']['tvdb_75']}[/img][/URL]" - if movie['tvmaze_id'] != 0: - description += f" [URL=https://www.tvmaze.com/shows/{str(movie['tvmaze_id'])}][img]{self.config['IMAGES']['tvmaze_75']}[/img][/URL]" - if movie['mal_id'] != 0: - description += f" [URL=https://myanimelist.net/anime/{str(movie['mal_id'])}][img]{self.config['IMAGES']['mal_75']}[/img][/URL]" - else: - if movie['tmdb'] != 0: - description += f"\nhttps://www.themoviedb.org/{str(movie['category'].lower())}/{str(movie['tmdb'])}" - if movie['tvdb_id'] != 0: - description += f"\nhttps://www.thetvdb.com/?id={str(movie['tvdb_id'])}&tab=series" - if movie['tvmaze_id'] != 0: - description += f"\nhttps://www.tvmaze.com/shows/{str(movie['tvmaze_id'])}" - if movie['mal_id'] != 0: - description += f"\nhttps://myanimelist.net/anime/{str(movie['mal_id'])}" - - description += "\n\n" - return description + if nfo_files: + nfo_path = nfo_files[0] + return {'nfos': (os.path.basename(nfo_path), open(nfo_path, "rb"), "application/octet-stream")} + return {} - async def edit_desc(self, meta): - subheading = "[COLOR=RED][size=4]" - heading_end = "[/size][/COLOR]" - # base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='', encoding='utf-8') as descfile: - if meta['is_disc'] != 'BDMV': - # Beautify MediaInfo for HDT using custom template - video = meta['filelist'][0] - mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") - if os.path.exists(mi_template): - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) - descfile.write(f"""[left][font=consolas]\n{media_info}\n[/font][/left]\n""") - else: - console.print("[bold red]Couldn't find the MediaInfo template") - console.print("[green]Using normal MediaInfo for the description.") + async def upload(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + data = await self.get_data(meta) + files = await self.get_nfo(meta) + + await self.cookie_auth_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + source_flag=self.source_flag, + torrent_url=self.torrent_url, + data=data, + torrent_field_name='torrent', + upload_cookies=self.session.cookies, + upload_url=f"{self.base_url}/upload.php", + hash_is_id=True, + success_text="Upload successful!", + default_announce='https://hdts-announce.ru/announce.php', + additional_files=files, + ) - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as MI: - descfile.write(f"""[left][font=consolas]\n{MI.read()}\n[/font][/left]\n\n""") - else: - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as BD_SUMMARY: - descfile.write(f"""[left][font=consolas]\n{BD_SUMMARY.read()}\n[/font][/left]\n\n""") - - descfile.write(self.get_links(meta, subheading, heading_end)) - # Add Screenshots - images = meta['image_list'] - if len(images) > 0: - for image in images: - img_url = image['img_url'] - raw_url = image['raw_url'] - descfile.write(f' ') - - descfile.close() + return diff --git a/src/trackers/HHD.py b/src/trackers/HHD.py index f6ad1e6ed..e8bbf7c13 100644 --- a/src/trackers/HHD.py +++ b/src/trackers/HHD.py @@ -1,50 +1,43 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests -import platform -import os -import glob -import httpx - -from src.trackers.COMMON import COMMON from src.console import console +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class HHD(): +class HHD(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='HHD') self.config = config + self.common = COMMON(config) self.tracker = 'HHD' self.source_flag = 'HHD' - self.upload_url = 'https://homiehelpdesk.net/api/torrents/upload' - self.search_url = 'https://homiehelpdesk.net/api/torrents/filter' - self.torrent_url = 'https://homiehelpdesk.net/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://homiehelpdesk.net/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [ - 'aXXo', 'BONE', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'dAV1nci', 'd3g', 'DNL', 'FaNGDiNG0', 'GalaxyTV', 'HD2DVD', 'HDTime', 'iHYTECH', 'ION10', - 'iPlanet', 'KiNGDOM', 'LAMA', 'MeGusta', 'mHD', 'mSD', 'NaNi', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'OFT', 'PRODJi', 'RARBG', 'Rifftrax', 'SANTi', 'SasukeducK', - 'ShAaNiG', 'Sicario', 'STUTTERSHIT', 'TGALAXY', 'TORRENTGALAXY', 'TSP', 'TSPxL', 'ViSION', 'VXT', 'WAF', 'WKS', 'x0r', 'YAWNiX', 'YIFY', 'YTS', 'PSA'] + 'aXXo', 'BONE', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'dAV1nci', 'd3g', + 'DNL', 'FaNGDiNG0', 'GalaxyTV', 'HD2DVD', 'HDTime', 'iHYTECH', 'ION10', + 'iPlanet', 'KiNGDOM', 'LAMA', 'MeGusta', 'mHD', 'mSD', 'NaNi', 'NhaNc3', + 'nHD', 'nikt0', 'nSD', 'OFT', 'PRODJi', 'RARBG', 'Rifftrax', 'SANTi', + 'SasukeducK', 'ShAaNiG', 'Sicario', 'STUTTERSHIT', 'TGALAXY', 'TORRENTGALAXY', + 'TSP', 'TSPxL', 'ViSION', 'VXT', 'WAF', 'WKS', 'x0r', 'YAWNiX', 'YIFY', 'YTS', 'PSA', ['EVO', 'WEB-DL only'] + ] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + async def get_additional_checks(self, meta): + should_continue = True + if meta['type'] == "DVDRIP": + console.print("[bold red]DVDRIP uploads are not allowed on HHD.[/bold red]") + return False - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - }.get(type, '0') - return type_id + return should_continue - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta, mapping_only=False, reverse=False, resolution=None): resolution_id = { '4320p': '1', '2160p': '2', @@ -57,131 +50,14 @@ async def get_res_id(self, resolution): '480p': '8', '480i': '9', 'Other': '10' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://homiehelpdesk.net/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return + if mapping_only: + return resolution_id + elif reverse: + return {v: k for k, v in resolution_id.items()} + elif resolution is not None: + return {'resolution_id': resolution_id.get(resolution, '10')} else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + meta_resolution = meta.get('resolution', '') + resolved_id = resolution_id.get(meta_resolution, '10') + return {'resolution_id': resolved_id} diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index a9e3da078..03e7252fb 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -1,47 +1,94 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests +import aiofiles import os import re -import platform -import cli_ui -import httpx -from src.trackers.COMMON import COMMON from src.console import console +from src.get_desc import DescriptionBuilder +from src.languages import process_desc_language from src.rehostimages import check_hosts -from src.languages import parsed_mediainfo, process_desc_language +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class HUNO(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class HUNO(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='HUNO') self.config = config + self.common = COMMON(config) self.tracker = 'HUNO' self.source_flag = 'HUNO' - self.search_url = 'https://hawke.uno/api/torrents/filter' - self.upload_url = 'https://hawke.uno/api/torrents/upload' - self.torrent_url = 'https://hawke.uno/torrents/' - self.id_url = 'https://hawke.uno/api/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = ["4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS"] + self.base_url = 'https://hawke.uno/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [ + '4K4U', 'Bearfish', 'BiTOR', 'BONE', 'D3FiL3R', 'd3g', 'DTR', 'ELiTE', + 'EVO', 'eztv', 'EzzRips', 'FGT', 'HashMiner', 'HETeam', 'HEVCBay', 'HiQVE', + 'HR-DR', 'iFT', 'ION265', 'iVy', 'JATT', 'Joy', 'LAMA', 'm3th', 'MeGusta', + 'MRN', 'Musafirboy', 'OEPlus', 'Pahe.in', 'PHOCiS', 'PSA', 'RARBG', 'RMTeam', + 'ShieldBearer', 'SiQ', 'TBD', 'Telly', 'TSP', 'VXT', 'WKS', 'YAWNiX', 'YIFY', 'YTS' + ] + self.approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost', 'bam'] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - huno_name, region_id, distributor_id = await self.get_name(meta, region_id=region_id, distributor_id=distributor_id) - if (huno_name or region_id) == "SKIPPED": - meta['tracker_status'][self.tracker]['status_message'] = "data error: huno_missing_data" - return + async def get_additional_checks(self, meta): + should_continue = True + + if await self.get_audio(meta) == "SKIPPED": + console.print(f'{self.tracker}: No audio languages were found, the upload cannot continue.') + return False + + if meta['video_codec'] != "HEVC" and meta['type'] in {"ENCODE", "WEBRIP", "DVDRIP", "HDTV"}: + if not meta['unattended']: + console.print('[bold red]Only x265/HEVC encodes are allowed at HUNO') + return False + + if not meta['valid_mi_settings']: + console.print(f"[bold red]No encoding settings in mediainfo, skipping {self.tracker} upload.[/bold red]") + return False + if not meta['is_disc'] and meta['type'] in ['ENCODE', 'WEBRIP', 'DVDRIP', 'HDTV']: + tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) + for track in tracks: + if track.get('@type') == "Video": + encoding_settings = track.get('Encoded_Library_Settings', {}) + + if encoding_settings: + crf_match = re.search(r'crf[ =:]+([\d.]+)', encoding_settings, re.IGNORECASE) + if crf_match: + if meta.get('debug', False): + console.print(f"Found CRF value: {crf_match.group(1)}") + crf_value = float(crf_match.group(1)) + if crf_value > 22: + if not meta['unattended']: + console.print(f"CRF value too high: {crf_value} for HUNO") + return False + else: + if meta.get('debug', False): + console.print("No CRF value found in encoding settings.") + bit_rate = track.get('BitRate') + if bit_rate and "Animation" not in meta.get('genre', ""): + try: + bit_rate_num = int(bit_rate) + except (ValueError, TypeError): + bit_rate_num = None + + if bit_rate_num is not None: + bit_rate_kbps = bit_rate_num / 1000 + + if bit_rate_kbps < 3000: + if not meta.get('unattended', False): + console.print(f"Video bitrate too low: {bit_rate_kbps:.0f} kbps for HUNO") + return False + + return should_continue + + async def get_stream(self, meta): + return {'stream': await self.is_plex_friendly(meta)} + + async def check_image_hosts(self, meta): url_host_mapping = { "ibb.co": "imgbb", "ptpimg.me": "ptpimg", @@ -49,117 +96,84 @@ async def upload(self, meta, disctype): "imgbox.com": "imgbox", "imagebam.com": "bam", } - approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost', 'bam'] - await check_hosts(meta, self.tracker, url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=approved_image_hosts) + await check_hosts(meta, self.tracker, url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=self.approved_image_hosts) + + async def get_description(self, meta): if 'HUNO_images_key' in meta: image_list = meta['HUNO_images_key'] else: image_list = meta['image_list'] - await common.unit3d_edit_desc(meta, self.tracker, self.signature, image_list=image_list) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta) - resolution_id = await self.get_res_id(meta['resolution']) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 + return {'description': await DescriptionBuilder(self.config).unit3d_edit_desc(meta, self.tracker, image_list=image_list, approved_image_hosts=self.approved_image_hosts)} + + async def get_mediainfo(self, meta): if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + mediainfo = await self.common.get_bdmv_mediainfo(meta, remove=['File size', 'Overall bit rate']) else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': huno_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': await self.is_plex_friendly(meta), - 'sd': meta['sd'], - 'keywords': meta['keywords'], - # 'season_pack': meta.get('tv_pack', 0), - # 'featured' : 0, - # 'free' : 0, - # 'double_up' : 0, - # 'sticky' : 0, - } + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as f: + mediainfo = await f.read() - tracker_config = self.config['TRACKERS'][self.tracker] + return {'mediainfo': mediainfo} - if 'internal' in tracker_config: - if tracker_config['internal'] and meta['tag'] and meta['tag'][1:] in tracker_config.get('internal_groups', []): - data['internal'] = 1 - else: - data['internal'] = 0 + async def get_featured(self, meta): + return {} + + async def get_free(self, meta): if meta.get('freeleech', 0) != 0: - data['free'] = meta.get('freeleech', 0) + free = meta.get('freeleech', 0) + return {'free': free} + return {} + + async def get_doubleup(self, meta): + return {} + + async def get_sticky(self, meta): + return {} + + async def get_season_number(self, meta): if meta.get('category') == 'TV' and meta.get('tv_pack') == 1: - data['season_pack'] = 1 + return {'season_pack': 1} + return {} - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': tracker_config['api_key'].strip() - } + async def get_episode_number(self, meta): + return {} - if meta['debug'] is False: - try: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - meta['tracker_status'][self.tracker]['status_message'] = response.json() - except Exception as e: - meta['tracker_status'][self.tracker]['status_message'] = f" data error - Error uploading torrent: {e}" - return - try: - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://hawke.uno/torrents/" + t_id) - except Exception: - console.print("Error getting torrent ID from response.") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + async def get_personal_release(self, meta): + return {} + + async def get_internal(self, meta): + internal = 0 + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != '' and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + internal = 1 + + return {'internal': internal} + + async def get_additional_files(self, meta): + return {} async def get_audio(self, meta): channels = meta.get('channels', "") - codec = meta.get('audio', "").replace("DD+", "DDP").replace("EX", "").replace("Dual-Audio", "").replace(channels, "") - dual = "Dual-Audio" in meta.get('audio', "") + codec = meta.get('audio', "").replace("DD+", "DDP").replace("EX", "").replace("Dual-Audio", "").replace("Dubbed", "").replace(channels, "") languages = "" - if dual: - languages = "Dual" + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + if meta.get('audio_languages'): + languages = meta['audio_languages'] + languages = set(languages) + if len(languages) > 2: + languages = "Multi" + elif len(languages) > 1: + languages = "Dual" + else: + languages = list(languages)[0] + + if "zxx" in languages: + languages = "NONE" + elif not languages: + languages = "SKIPPED" else: - if not meta.get('audio_languages'): - await process_desc_language(meta, desc=None, tracker=self.tracker) - if meta.get('audio_languages'): - languages = meta['audio_languages'] - languages = set(languages) - if len(languages) > 1: - languages = "Dual" - else: - languages = next(iter(languages), "SKIPPED") - - if "zxx" in languages: - languages = "NONE" - elif not languages: languages = "SKIPPED" return f'{codec} {channels} {languages}' @@ -168,25 +182,9 @@ def get_basename(self, meta): path = next(iter(meta['filelist']), meta['path']) return os.path.basename(path) - async def get_name(self, meta, region_id=None, distributor_id=None): - # Copied from Prep.get_name() then modified to match HUNO's naming convention. - # It was much easier to build the name from scratch than to alter the existing name. - - region_name = None - distributor_name = None - - if meta.get('is_disc') == "BDMV": - common = COMMON(config=self.config) - if not region_id: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - region_name = cli_ui.ask_string("ULCX: Region code not found for disc. Please enter it manually (UPPERCASE): ") - region_id = await common.unit3d_region_ids(region_name) - else: - region_id = "SKIPPED" - if not distributor_id: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - distributor_name = cli_ui.ask_string("ULCX: Distributor code not found for disc. Please enter it manually (UPPERCASE): ") - distributor_id = await common.unit3d_distributor_ids(distributor_name) + async def get_name(self, meta): + distributor_name = meta.get('distributor', "") + region = meta.get('region', '') basename = self.get_basename(meta) if meta.get('hardcoded-subs'): @@ -198,8 +196,6 @@ async def get_name(self, meta, region_id=None, distributor_id=None): year = meta.get('year', "") resolution = meta.get('resolution', "") audio = await self.get_audio(meta) - if "SKIPPED" in audio: - return "SKIPPED", "SKIPPED", "SKIPPED" service = meta.get('service', "") season = meta.get('season', "") if meta.get('tvdb_season_number', ""): @@ -214,10 +210,15 @@ async def get_name(self, meta, region_id=None, distributor_id=None): repack = f"[{repack}]" three_d = meta.get('3D', "") tag = meta.get('tag', "").replace("-", "- ") - if tag == "": + tag_lower = tag.lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + tag = re.sub(f"- {invalid_tag}", "", tag, flags=re.IGNORECASE) tag = "- NOGRP" source = meta.get('source', "").replace("Blu-ray", "BluRay") - console.print(f"[bold cyan]Source: {source}") + if source == "BluRay" and "2160" in resolution: + source = "UHD BluRay" if any(x in source.lower() for x in ["pal", "ntsc"]) and type == "ENCODE": source = "DVD" hdr = meta.get('hdr', "") @@ -230,10 +231,6 @@ async def get_name(self, meta, region_id=None, distributor_id=None): distributor = meta.get('distributor').title() else: distributor = "" - if region_name: - region = region_name - else: - region = meta.get('region', "") video_codec = meta.get('video_codec', "") video_encode = meta.get('video_encode', "").replace(".", "") if 'x265' in basename and not meta.get('type') == "WEBDL": @@ -248,19 +245,19 @@ async def get_name(self, meta, region_id=None, distributor_id=None): if meta['category'] == "MOVIE": # MOVIE SPECIFIC if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': - name = f"{title} ({year}) {distributor} {edition} {hc} ({resolution} {region} {three_d} {source} {hybrid} {video_codec} {hfr} {hdr} {audio} {tag}) {repack}" + name = f"{title} ({year}) {distributor} {edition} {hc} ({resolution} {region} {three_d} {source} {hybrid} {video_codec} {hdr} {hfr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'DVD': name = f"{title} ({year}) {distributor} {edition} {hc} ({resolution} {source} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'HDDVD': name = f"{title} ({year}) {distributor} {edition} {hc} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source == "BluRay": # BluRay Remux - name = f"{title} ({year}) {edition} ({resolution} {three_d} {source} {hybrid} REMUX {video_codec} {hfr} {hdr} {audio} {tag}) {repack}" + elif type == "REMUX" and source.endswith("BluRay"): # BluRay Remux + name = f"{title} ({year}) {edition} ({resolution} {three_d} {source} {hybrid} REMUX {video_codec} {hdr} {hfr} {audio} {tag}) {repack}" elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux name = f"{title} ({year}) {edition} {hc} ({resolution} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "ENCODE": # Encode - name = f"{title} ({year}) {edition} {hc} ({resolution} {scale} {source} {hybrid} {video_encode} {hfr} {hdr} {audio} {tag}) {repack}" + name = f"{title} ({year}) {edition} {hc} ({resolution} {scale} {source} {hybrid} {video_encode} {hdr} {hfr} {audio} {tag}) {repack}" elif type in ("WEBDL", "WEBRIP"): # WEB - name = f"{title} ({year}) {edition} {hc} ({resolution} {scale} {service} WEB-DL {hybrid} {video_encode} {hfr} {hdr} {audio} {tag}) {repack}" + name = f"{title} ({year}) {edition} {hc} ({resolution} {scale} {service} WEB-DL {hybrid} {video_encode} {hdr} {hfr} {audio} {tag}) {repack}" elif type == "HDTV": # HDTV name = f"{title} ({year}) {edition} {hc} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" elif type == "DVDRIP": @@ -268,47 +265,44 @@ async def get_name(self, meta, region_id=None, distributor_id=None): elif meta['category'] == "TV": # TV SPECIFIC if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': - name = f"{title} ({year}) {season}{episode} {distributor} {edition} {hc} ({resolution} {region} {three_d} {source} {hybrid} {video_codec} {hfr} {hdr} {audio} {tag}) {repack}" + name = f"{title} ({year}) {season}{episode} {distributor} {edition} {hc} ({resolution} {region} {three_d} {source} {hybrid} {video_codec} {hdr} {hfr} {audio} {tag}) {repack}" if meta['is_disc'] == 'DVD': name = f"{title} ({year}) {season}{episode} {distributor} {edition} {hc} ({resolution} {source} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'HDDVD': name = f"{title} ({year}) {season}{episode} {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "REMUX" and source == "BluRay": # BluRay Remux - name = f"{title} ({year}) {season}{episode} {edition} ({resolution} {three_d} {source} {hybrid} REMUX {video_codec} {hfr} {hdr} {audio} {tag}) {repack}" # SOURCE + name = f"{title} ({year}) {season}{episode} {edition} ({resolution} {three_d} {source} {hybrid} REMUX {video_codec} {hdr} {hfr} {audio} {tag}) {repack}" # SOURCE elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux name = f"{title} ({year}) {season}{episode} {edition} ({resolution} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE elif type == "ENCODE": # Encode - name = f"{title} ({year}) {season}{episode} {edition} ({resolution} {scale} {source} {hybrid} {video_encode} {hfr} {hdr} {audio} {tag}) {repack}" # SOURCE + name = f"{title} ({year}) {season}{episode} {edition} ({resolution} {scale} {source} {hybrid} {video_encode} {hdr} {hfr} {audio} {tag}) {repack}" # SOURCE elif type in ("WEBDL", "WEBRIP"): # WEB - name = f"{title} ({year}) {season}{episode} {edition} ({resolution} {scale} {service} WEB-DL {hybrid} {video_encode} {hfr} {hdr} {audio} {tag}) {repack}" + name = f"{title} ({year}) {season}{episode} {edition} ({resolution} {scale} {service} WEB-DL {hybrid} {video_encode} {hdr} {hfr} {audio} {tag}) {repack}" elif type == "HDTV": # HDTV name = f"{title} ({year}) {season}{episode} {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" - return ' '.join(name.split()).replace(": ", " - "), region_id, distributor_id - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + name = ' '.join(name.split()).replace(": ", " - ") + name = re.sub(r'\s{2,}', ' ', name) + return {'name': name} async def get_type_id(self, meta): - type = meta.get('type').upper() - video_encode = meta.get('video_encode') - - if type == 'REMUX': - return '2' - elif type in ('WEBDL', 'WEBRIP'): - return '15' if 'x265' in video_encode else '3' - elif type in ('ENCODE', 'HDTV'): - return '15' - elif type == 'DISC': - return '1' + type_value = (meta.get('type') or '').lower() + video_encode = (meta.get('video_encode') or '').lower() + + if type_value == 'remux': + type_id = '2' + elif type_value in ('webdl', 'webrip'): + type_id = '15' if 'x265' in video_encode else '3' + elif type_value in ('encode', 'hdtv'): + type_id = '15' + elif type_value == 'disc': + type_id = '1' else: - return '0' + type_id = '0' - async def get_res_id(self, resolution): + return {'type_id': type_id} + + async def get_resolution_id(self, meta): resolution_id = { 'Other': '10', '4320p': '1', @@ -323,97 +317,13 @@ async def get_res_id(self, resolution): '540i': '11', '480p': '8', '480i': '9' - }.get(resolution, '10') - return resolution_id + }.get(meta['resolution'], '10') + return {'resolution_id': resolution_id} async def is_plex_friendly(self, meta): lossy_audio_codecs = ["AAC", "DD", "DD+", "OPUS"] - if any(l in meta["audio"] for l in lossy_audio_codecs): # noqa E741 + if any(codec in meta["audio"] for codec in lossy_audio_codecs): return 1 return 0 - - async def search_existing(self, meta, disctype): - if meta['video_codec'] != "HEVC" and meta['type'] in {"ENCODE", "WEBRIP", "DVDRIP", "HDTV"}: - if not meta['unattended']: - console.print('[bold red]Only x265/HEVC encodes are allowed at HUNO') - meta['skipping'] = "HUNO" - return - - if not meta['is_disc'] and meta['type'] in ['ENCODE', 'WEBRIP', 'DVDRIP', 'HDTV']: - parsed_info = await parsed_mediainfo(meta) - for video_track in parsed_info.get('video', []): - encoding_settings = video_track.get('encoding_settings') - if not encoding_settings: - if not meta['unattended']: - console.print("No encoding settings found in MEDIAINFO for HUNO") - meta['skipping'] = "HUNO" - return [] - if encoding_settings: - crf_match = re.search(r'crf[ =:]+([\d.]+)', encoding_settings, re.IGNORECASE) - if crf_match: - crf_value = float(crf_match.group(1)) - if crf_value > 22: - if not meta['unattended']: - console.print(f"CRF value too high: {crf_value} for HUNO") - meta['skipping'] = "HUNO" - return [] - else: - bit_rate = video_track.get('bit_rate') - if bit_rate and "Animation" not in meta.get('genre', ""): - bit_rate_num = None - # Match number and unit (e.g., 42.4 Mb/s, 42400 kb/s, etc.) - match = re.search(r'([\d.]+)\s*([kM]?b/s)', bit_rate.replace(',', ''), re.IGNORECASE) - if match: - value = float(match.group(1)) - unit = match.group(2).lower() - if unit == 'mb/s': - bit_rate_num = int(value * 1000) - elif unit == 'kb/s': - bit_rate_num = int(value) - else: - bit_rate_num = int(value) - if bit_rate_num is not None and bit_rate_num < 3000: - if not meta['unattended']: - console.print(f"Video bitrate too low: {bit_rate_num} kbps for HUNO") - meta['skipping'] = "HUNO" - return [] - - dupes = [] - - params = { - 'api_token': self.config['TRACKERS']['HUNO']['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = f"{meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] + meta['edition'] - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - attributes = each['attributes'] - result = { - 'name': attributes['name'], - 'size': attributes['size'] - } - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/IHD.py b/src/trackers/IHD.py new file mode 100644 index 000000000..9fcb59210 --- /dev/null +++ b/src/trackers/IHD.py @@ -0,0 +1,187 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import cli_ui +import pycountry +import re + +from src.console import console +from src.languages import process_desc_language, has_english_language +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D + + +class IHD(UNIT3D): + def __init__(self, config): + super().__init__(config, tracker_name='IHD') + self.config = config + self.common = COMMON(config) + self.tracker = 'IHD' + self.source_flag = 'InfinityHD' + self.base_url = 'https://infinityhd.net/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] + pass + + async def get_category_id(self, meta, category=None, reverse=False, mapping_only=False): + category_name = meta['category'] + anime = meta.get('anime', False) + category_id = { + 'MOVIE': '1', + 'TV': '2', + 'ANIME': '3', + 'ANIME MOVIE': '4', + } + + is_anime_movie = False + is_anime = False + + if category_name == 'MOVIE' and anime is True: + is_anime_movie = True + + if category_name == 'TV' and anime is True: + is_anime = True + + if is_anime: + return {'category_id': '3'} + if is_anime_movie: + return {'category_id': '4'} + + if mapping_only: + return category_id + elif reverse: + return {v: k for k, v in category_id.items()} + elif category is not None: + return {'category_id': category_id.get(category, '0')} + else: + meta_category = meta.get('category', '') + resolved_id = category_id.get(meta_category, '0') + return {'category_id': resolved_id} + + async def get_resolution_id(self, meta, resolution=None, reverse=False, mapping_only=False): + resolution_id = { + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4' + } + if mapping_only: + return resolution_id + elif reverse: + return {v: k for k, v in resolution_id.items()} + elif resolution is not None: + return {'resolution_id': resolution_id.get(resolution, '10')} + else: + meta_resolution = meta.get('resolution', '') + resolved_id = resolution_id.get(meta_resolution, '10') + return {'resolution_id': resolved_id} + + def _get_language_code(self, track_or_string): + """Extract and normalize language to ISO alpha-2 code""" + if isinstance(track_or_string, dict): + lang = track_or_string.get("Language", "") + if isinstance(lang, dict): + lang = lang.get("String", "") + else: + lang = track_or_string + if not lang: + return "" + lang_str = str(lang).lower() + + # Strip country code if present (e.g., "en-US" → "en") + if "-" in lang_str: + lang_str = lang_str.split("-")[0] + + if len(lang_str) == 2: + return lang_str + try: + lang_obj = ( + pycountry.languages.get(name=lang_str.title()) + or pycountry.languages.get(alpha_2=lang_str) + or pycountry.languages.get(alpha_3=lang_str) + ) + return lang_obj.alpha_2.lower() if lang_obj else lang_str + except (AttributeError, KeyError, LookupError): + return lang_str + + def original_language_check(self, meta): + if "mediainfo" not in meta: + return False + + original_languages = { + lang.lower() + for lang in meta.get("original_language", []) + if isinstance(lang, str) and lang.strip() + } + if not original_languages: + return False + + tracks = meta["mediainfo"].get("media", {}).get("track", []) + for track in tracks: + if track.get("@type") != "Audio": + continue + if "commentary" in str(track.get("Title", "")).lower(): + continue + lang_code = self._get_language_code(track) + if lang_code and lang_code.lower() in original_languages: + return True + return False + + async def get_name(self, meta): + ihd_name = meta['name'] + resolution = meta.get('resolution') + + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + audio_languages = meta['audio_languages'] + if audio_languages and not await has_english_language(audio_languages): + foreign_lang = meta['audio_languages'][0].upper() + ihd_name = ihd_name.replace(resolution, f"{foreign_lang} {resolution}", 1) + + return {'name': ihd_name} + + async def get_additional_checks(self, meta): + should_continue = True + + if meta['resolution'] not in ['4320p', '2160p', '1440p', '1080p', '1080i']: + if not meta['unattended'] or meta['debug']: + console.print(f'[bold red]Uploads must be at least 1080 resolution for {self.tracker}.[/bold red]') + should_continue = False + + if not meta['valid_mi_settings']: + if not meta['unattended'] or meta['debug']: + console.print(f"[bold red]No encoding settings in mediainfo, skipping {self.tracker} upload.[/bold red]") + should_continue = False + + if not meta['is_disc'] == "BDMV": + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + original_language = self.original_language_check(meta) + has_eng_audio = await has_english_language(meta.get('audio_languages')) + has_eng_subs = await has_english_language(meta.get('subtitle_languages')) + # Require at least one English audio/subtitle track or an original language audio track + if not (original_language or has_eng_audio or has_eng_subs): + if not meta['unattended'] or meta['debug']: + console.print(f'[bold red]{self.tracker} requires at least one English audio or subtitle track or an original language audio track.') + should_continue = False + + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + if (not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False))): + console.print(f'[bold red]Pornographic content is not allowed at {self.tracker}, unless it follows strict rules.') + yes = cli_ui.ask_yes_no(f'Do you have permission to upload this torrent to {self.tracker}?', default=False) + if yes: + should_continue = True + else: + should_continue = False + else: + if not meta['unattended'] or meta['debug']: + console.print('[bold red]Pornographic content is not allowed at IHD, unless it follows strict rules.') + should_continue = False + + return should_continue diff --git a/src/trackers/IS.py b/src/trackers/IS.py new file mode 100644 index 000000000..4bfcba354 --- /dev/null +++ b/src/trackers/IS.py @@ -0,0 +1,299 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import aiofiles +import glob +import httpx +import os +import platform +import re +from bs4 import BeautifulSoup +from src.bbcode import BBCODE +from src.console import console +from src.cookie_auth import CookieValidator, CookieAuthUploader +from src.get_desc import DescriptionBuilder + + +class IS: + def __init__(self, config): + self.config = config + self.cookie_validator = CookieValidator(config) + self.cookie_auth_uploader = CookieAuthUploader(config) + self.tracker = 'IS' + self.source_flag = 'https://immortalseed.me/' + self.banned_groups = [''] + self.base_url = 'https://immortalseed.me/' + self.torrent_url = 'https://immortalseed.me/details.php?hash=' + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f"Upload Assistant/2.3 ({platform.system()} {platform.release()})" + }, timeout=30) + + async def validate_credentials(self, meta): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/upload.php', + error_text='Forget your password', + ) + + async def generate_description(self, meta): + builder = DescriptionBuilder(self.config) + desc_parts = [] + + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # TV + title, episode_image, episode_overview = await builder.get_tv_info(meta, self.tracker, resize=True) + if episode_overview: + desc_parts.append(f'Title: {title}') + desc_parts.append(f'Overview: {episode_overview}') + + # File information + mediainfo = await builder.get_mediainfo_section(meta, self.tracker) + if mediainfo: + desc_parts.append(f'{mediainfo}') + + bdinfo = await builder.get_bdinfo_section(meta) + if bdinfo: + desc_parts.append(f'{bdinfo}') + + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Screenshots + images = meta.get('image_list', []) + if images: + screenshots_block = '' + for image in images: + screenshots_block += f"{image['raw_url']}\n" + desc_parts.append('Screenshots:\n' + screenshots_block) + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + bbcode = BBCODE() + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description + + async def search_existing(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + dupes = [] + + if meta['category'] == "MOVIE": + search_type = 't_genre' + search_query = meta.get('imdb_info', {}).get('imdbID', '') + + elif meta['category'] == "TV": + search_type = 't_name' + search_query = meta.get('title') + f" {meta.get('season', '')}{meta.get('episode', '')}" + + search_url = f'{self.base_url}/browse.php?do=search&keywords={search_query}&search_type={search_type}' + + try: + response = await self.session.get(search_url) + response.raise_for_status() + soup = BeautifulSoup(response.text, 'html.parser') + + torrent_table = soup.find('table', id='sortabletable') + + if not torrent_table: + return dupes + + torrent_rows = torrent_table.select('tbody > tr')[1:] + + for row in torrent_rows: + name_tag = row.select_one('a[href*="details.php?id="]') + if not name_tag: + continue + + name = name_tag.get_text(strip=True) + torrent_link = name_tag.get('href') + + size_tag = row.select_one('td:nth-of-type(5)') + size = size_tag.get_text(strip=True) if size_tag else None + + duplicate_entry = { + 'name': name, + 'size': size, + 'link': torrent_link + } + dupes.append(duplicate_entry) + + except Exception as e: + console.print(f'[bold red]Error searching for duplicates on {self.tracker}: {e}[/bold red]') + + return dupes + + async def get_category_id(self, meta): + resolution = meta.get('resolution') + category = meta.get('category') + genres = meta.get('genres', '').lower() + keywords = meta.get('keywords', '').lower() + is_anime = meta.get('anime') + non_eng = False + sd = meta.get('sd', False) + if meta.get('original_language') != "en": + non_eng = True + + anime = 32 + childrens_cartoons = 31 + documentary_hd = 54 + documentary_sd = 53 + + movies_4k = 59 + movies_4k_non_english = 60 + + movies_hd = 16 + movies_hd_non_english = 18 + + movies_low_def = 17 + movies_low_def_non_english = 34 + + movies_sd = 14 + movies_sd_non_english = 33 + + tv_480p = 47 + tv_4k = 64 + tv_hd = 8 + tv_sd_x264 = 48 + tv_sd_xvid = 9 + + tv_season_packs_4k = 63 + tv_season_packs_hd = 4 + tv_season_packs_sd = 6 + + if category == "MOVIE": + if "documentary" in genres or "documentary" in keywords: + if sd: + return documentary_sd + else: + return documentary_hd + elif is_anime: + return anime + elif resolution == "2160p": + if non_eng: + return movies_4k_non_english + else: + return movies_4k + elif not sd: + if non_eng: + return movies_hd_non_english + else: + return movies_hd + elif sd: + if non_eng: + return movies_sd_non_english + else: + return movies_sd + else: + if non_eng: + return movies_low_def_non_english + else: + return movies_low_def + + elif category == "TV": + if "documentary" in genres or "documentary" in keywords: + if sd: + return documentary_sd + else: + return documentary_hd + elif is_anime: + return anime + elif "children" in genres or "cartoons" in genres or "children" in keywords or "cartoons" in keywords: + return childrens_cartoons + elif meta.get('tv_pack'): + if resolution == "2160p": + return tv_season_packs_4k + elif sd: + return tv_season_packs_sd + else: + return tv_season_packs_hd + elif resolution == "2160p": + return tv_4k + elif resolution in ["1080p", "1080i", "720p"]: + return tv_hd + elif sd: + if "xvid" in meta.get("video_encode", '').lower(): + return tv_sd_xvid + else: + return tv_sd_x264 + else: + return tv_480p + + async def get_nfo(self, meta): + nfo_dir = os.path.join(meta['base_dir'], "tmp", meta['uuid']) + nfo_files = glob.glob(os.path.join(nfo_dir, "*.nfo")) + + if nfo_files: + nfo_path = nfo_files[0] + return {'nfofile': (os.path.basename(nfo_path), open(nfo_path, "rb"), "application/octet-stream")} + else: + nfo_content = await self.generate_description(meta) + nfo_bytes = nfo_content.encode('utf-8') + nfo_filename = f"{meta.get('scene_name', meta['uuid'])}.nfo" + return {'nfofile': (nfo_filename, nfo_bytes, "application/octet-stream")} + + def get_name(self, meta): + if meta.get('scene_name'): + return meta.get('scene_name') + else: + is_name = meta.get('name').replace(meta['aka'], '').replace('Dubbed', '').replace('Dual-Audio', '') + is_name = re.sub(r"\s{2,}", " ", is_name) + is_name = is_name.replace(' ', '.') + return is_name + + async def get_data(self, meta): + data = { + 'UseNFOasDescr': 'no', + 'message': f"{meta.get('overview', '')}\n\n[youtube]{meta.get('youtube', '')}[/youtube]", + 'category': await self.get_category_id(meta), + 'subject': self.get_name(meta), + 'nothingtopost': "1", + 't_image_url': meta.get('poster'), + 'submit': 'Upload Torrent', + } + + if meta['category'] == "MOVIE": + data['t_link'] = meta['imdb_info']['imdb_url'] + + # Anon + anon = not (meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False)) + if anon: + data.update({ + 'anonymous': 'yes' + }) + else: + data.update({ + 'anonymous': 'no' + }) + + return data + + async def upload(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + data = await self.get_data(meta) + files = await self.get_nfo(meta) + + await self.cookie_auth_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + source_flag=self.source_flag, + torrent_url=self.torrent_url, + data=data, + hash_is_id=True, + torrent_field_name='torrentfile', + torrent_name=f"{meta.get('clean_name', 'placeholder')}", + upload_cookies=self.session.cookies, + upload_url="https://immortalseed.me/upload.php", + additional_files=files, + success_text="Thank you", + ) + + return diff --git a/src/trackers/ITT.py b/src/trackers/ITT.py index d2539568a..a9ddc5420 100644 --- a/src/trackers/ITT.py +++ b/src/trackers/ITT.py @@ -1,45 +1,53 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import os -import glob -import httpx - -from src.trackers.COMMON import COMMON +import re from src.console import console +from src.languages import process_desc_language +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class ITT(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class ITT(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='ITT') self.config = config + self.common = COMMON(config) self.tracker = 'ITT' self.source_flag = 'ItaTorrents' - self.upload_url = 'https://itatorrents.xyz/api/torrents/upload' - self.search_url = 'https://itatorrents.xyz/api/torrents/filter' - self.torrent_url = 'https://itatorrents.xyz/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.base_url = 'https://itatorrents.xyz/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + async def get_type_name(self, meta): + type_name = None + + uuid_string = meta.get('uuid', '') + if uuid_string: + lower_uuid = uuid_string.lower() + + if 'dlmux' in lower_uuid: + type_name = 'DLMux' + elif 'bdmux' in lower_uuid: + type_name = 'BDMux' + elif 'webmux' in lower_uuid: + type_name = 'WEBMux' + elif 'dvdmux' in lower_uuid: + type_name = 'DVDMux' + elif 'bdrip' in lower_uuid: + type_name = 'BDRip' + + if type_name is None: + type_name = meta.get('type') - async def get_type_id(self, type): - type_id = { + return type_name + + async def get_type_id(self, meta, mapping_only=False): + type_id_map = { 'DISC': '1', 'REMUX': '2', 'WEBDL': '4', @@ -51,149 +59,117 @@ async def get_type_id(self, type): 'WEBMux': '26', 'DVDMux': '39', 'BDRip': '25', - 'DVDRip': '24', + 'DVDRIP': '24', 'Cinema-MD': '14', - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', "False"): - anon = 0 - else: - anon = 1 + } + if mapping_only: + return type_id_map + type_name = await self.get_type_name(meta) + type_id = type_id_map.get(type_name, '0') - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + return {'type_id': type_id} + + async def get_name(self, meta): + type = await self.get_type_name(meta) + title = meta.get('title', "") + year = meta.get('year', "") + if int(meta.get('manual_year')) > 0: + year = meta.get('manual_year') + resolution = meta.get('resolution', "") + if resolution == "OTHER": + resolution = "" + audio = meta.get('audio', "") + season = meta.get('season') or "" + episode = meta.get('episode') or "" + repack = meta.get('repack', "") + three_d = meta.get('3D', "") + tag = meta.get('tag', "") + source = meta.get('source', "") + hdr = meta.get('hdr', "") + if meta.get('is_disc', "") == "BDMV": + video_codec = meta.get('video_codec', "") + region = meta.get('region', "") if meta.get('region', "") is not None else "" + elif meta.get('is_disc', "") == "DVD": + region = meta.get('region', "") if meta.get('region', "") is not None else "" else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + video_codec = meta.get('video_codec', "") + edition = meta.get('edition', "") + if 'hybrid' in edition.upper(): + edition = edition.replace('Hybrid', '').strip() + + if meta['category'] == "TV": + if meta['search_year'] != "": + year = meta['year'] + else: + year = "" + if meta.get('manual_date'): + season = '' + episode = '' + if meta.get('no_season', False) is True: + season = '' + if meta.get('no_year', False) is True: + year = '' + + dubs = await self.get_dubs(meta) + + """ + From https://itatorrents.xyz/wikis/20 + + Struttura Titolo per: Full Disc, Remux + Name Year S##E## Cut REPACK Resolution Edition Region 3D SOURCE TYPE Hi10P HDR VCodec Dub ACodec Channels Object-Tag + + Struttura Titolo per: Encode, WEB-DL, WEBRip, HDTV, DLMux, BDMux, WEBMux, DVDMux, BDRip, DVDRip + Name Year S##E## Cut REPACK Resolution Edition 3D SOURCE TYPE Dub ACodec Channels Object Hi10P HDR VCodec-Tag + """ + + if type == 'DISC' or type == "REMUX": + itt_name = f"{title} {year} {season}{episode} {repack} {resolution} {edition} {region} {three_d} {source} {'REMUX' if type == 'REMUX' else ''} {hdr} {video_codec} {dubs} {audio}" - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://itatorrents.xyz/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + type = ( + type + .replace('WEBDL', 'WEB-DL') + .replace('WEBRIP', 'WEBRip') + .replace('DVDRIP', 'DVDRip') + .replace('ENCODE', 'BluRay') + ) + itt_name = f"{title} {year} {season}{episode} {repack} {resolution} {edition} {three_d} {type} {dubs} {audio} {hdr} {video_codec}" + try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + itt_name = ' '.join(itt_name.split()) + except Exception: + console.print("[bold red]Unable to generate name. Please re-run and correct any of the following args if needed.") + console.print(f"--category [yellow]{meta['category']}") + console.print(f"--type [yellow]{meta['type']}") + console.print(f"--source [yellow]{meta['source']}") + console.print("[bold green]If you specified type, try also specifying source") + + exit() + name_notag = itt_name + itt_name = name_notag + tag + itt_name = itt_name.replace('Dubbed', '').replace('Dual-Audio', '') + + return {"name": re.sub(r"\s{2,}", " ", itt_name)} + + async def get_dubs(self, meta): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + dubs = '' + audio_languages = set(meta.get('audio_languages', [])) + if audio_languages: + dubs = " ".join([lang[:3].upper() for lang in audio_languages]) + return dubs + + async def get_additional_checks(self, meta): + # From rules: + # "Non sono ammessi film e serie tv che non comprendono il doppiaggio in italiano." + # Translates to "Films and TV series that do not include Italian dubbing are not permitted." + italian_languages = ["italian", "italiano"] + if not await self.common.check_language_requirements( + meta, self.tracker, languages_to_check=italian_languages, check_audio=True + ): + console.print( + "Upload Rules: https://itatorrents.xyz/wikis/5" + ) + return False + return True diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 1c576715b..5950e5d9e 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -1,138 +1,105 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests -import platform -import os -import glob -import httpx +import aiofiles +import re from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class LCD(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class LCD(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='LCD') self.config = config + self.common = COMMON(config) self.tracker = 'LCD' self.source_flag = 'LOCADORA' - self.search_url = 'https://locadora.cc/api/torrents/filter' - self.torrent_url = 'https://locadora.cc/torrents/' - self.upload_url = 'https://locadora.cc/api/torrents/upload' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.base_url = 'https://locadora.cc/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - cat_id = await self.get_cat_id(meta['category'], meta.get('edition', ''), meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - name = await self.edit_name(meta) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 + async def get_name(self, meta): + if meta.get('is_disc', '') == 'BDMV': + name = meta.get('name') - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD].torrent", 'rb') - files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + name = meta['uuid'] + + replacements = { + '.mkv': '', + '.mp4': '', + '.': ' ', + 'DDP2 0': 'DDP2.0', + 'DDP5 1': 'DDP5.1', + 'H 264': 'H.264', + 'H 265': 'H.265', + 'DD+7 1': 'DDP7.1', + 'AAC2 0': 'AAC2.0', + 'DD5 1': 'DD5.1', + 'DD2 0': 'DD2.0', + 'TrueHD 7 1': 'TrueHD 7.1', + 'TrueHD 5 1': 'TrueHD 5.1', + 'DTS-HD MA 7 1': 'DTS-HD MA 7.1', + 'DTS-HD MA 5 1': 'DTS-HD MA 5.1', + 'DTS-X 7 1': 'DTS-X 7.1', + 'DTS-X 5 1': 'DTS-X 5.1', + 'FLAC 2 0': 'FLAC 2.0', + 'FLAC 5 1': 'FLAC 5.1', + 'DD1 0': 'DD1.0', + 'DTS ES 5 1': 'DTS ES 5.1', + 'DTS5 1': 'DTS 5.1', + 'AAC1 0': 'AAC1.0', + 'DD+5 1': 'DDP5.1', + 'DD+2 0': 'DDP2.0', + 'DD+1 0': 'DDP1.0', } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + for old, new in replacements.items(): + name = name.replace(old, new) + + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + name = re.sub(f"-{invalid_tag}", "", name, flags=re.IGNORECASE) + name = f'{name}-NoGroup' + + return {'name': name} + + async def get_region_id(self, meta): + if meta.get('region') == 'EUR': + return {} + + region_id = await self.common.unit3d_region_ids(meta.get('region')) if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + return {'region_id': region_id} + + return {} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://locadora.cc/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return + async def get_mediainfo(self, meta): + if meta['bdinfo'] is not None: + mediainfo = await self.common.get_bdmv_mediainfo(meta, remove=['File size', 'Overall bit rate']) else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as f: + mediainfo = await f.read() + + return {'mediainfo': mediainfo} - async def get_cat_id(self, category_name, edition, meta): + async def get_category_id(self, meta): category_id = { 'MOVIE': '1', 'TV': '2', 'ANIMES': '6' - }.get(category_name, '0') + }.get(meta['category'], '0') if meta['anime'] is True and category_id == '2': category_id = '6' - return category_id + return {'category_id': category_id} - async def get_type_id(self, type): + async def get_type_id(self, meta): type_id = { 'DISC': '1', 'REMUX': '2', @@ -140,10 +107,10 @@ async def get_type_id(self, type): 'WEBDL': '4', 'WEBRIP': '5', 'HDTV': '6' - }.get(type, '0') - return type_id + }.get(meta['type'], '0') + return {'type_id': type_id} - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta): resolution_id = { # '8640p':'10', '4320p': '1', @@ -157,46 +124,5 @@ async def get_res_id(self, resolution): '480p': '8', '480i': '9', 'Other': '10', - }.get(resolution, '10') - return resolution_id - - async def search_existing(self, meta, disctype): - dupes = [] - console.print("[yellow]Buscando por duplicatas no tracker...") - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes - - async def edit_name(self, meta): - - name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") - - return name + }.get(meta['resolution'], '10') + return {'resolution_id': resolution_id} diff --git a/src/trackers/LDU.py b/src/trackers/LDU.py index 50c49e795..eae5bd164 100644 --- a/src/trackers/LDU.py +++ b/src/trackers/LDU.py @@ -1,31 +1,32 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests -import platform -import httpx -import glob -import os import langcodes +import re +from src.languages import has_english_language from src.trackers.COMMON import COMMON from src.console import console -from src.languages import has_english_language +from src.trackers.UNIT3D import UNIT3D -class LDU(): +class LDU(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='LDU') self.config = config + self.common = COMMON(config) self.tracker = 'LDU' self.source_flag = 'LDU' - self.upload_url = 'https://theldu.to/api/torrents/upload' - self.search_url = 'https://theldu.to/api/torrents/filter' - self.torrent_url = 'https://theldu.to/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://theldu.to/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [] pass - async def get_cat_id(self, meta): - genres = f"{meta.get('keywords', '')} {meta.get('genres', '')}" + async def get_category_id(self, meta): + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] sound_mixes = meta.get('imdb_info', {}).get('sound_mixes', []) category_id = { @@ -38,7 +39,7 @@ async def get_cat_id(self, meta): if 'hentai' in genres.lower(): category_id = '10' - elif any(x in genres.lower() for x in ['xxx', 'erotic', 'porn', 'adult', 'orgy']): + elif any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): if not await has_english_language(meta.get('subtitle_languages', [])): category_id = '45' else: @@ -82,9 +83,9 @@ async def get_cat_id(self, meta): else: category_id = '41' - return category_id + return {'category_id': category_id} - async def get_type_id(self, type, meta): + async def get_type_id(self, meta): type_id = { 'DISC': '1', 'REMUX': '2', @@ -92,129 +93,14 @@ async def get_type_id(self, type, meta): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(meta.get('type'), '0') if any(x in meta.get('edition', '').lower() for x in ["fanedit", "fanres"]): type_id = '16' - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta) - name = await self.edit_name(meta, cat_id) - type_id = await self.get_type_id(meta['type'], meta) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + return {'type_id': type_id} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - if response.status_code == 500: - meta['tracker_status'][self.tracker]['status_message'] = "500 Internal Server Error. It probably uploaded through" - else: - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - try: - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception as e: - console.print(f"[bold red]Error extracting torrent ID: {e}[/bold red]") - except Exception: - console.print("It may have uploaded, go check") - open_torrent.close() - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def edit_name(self, meta, cat_id): + async def get_name(self, meta): ldu_name = meta['name'] + cat_id = (await self.get_category_id(meta))['category_id'] non_eng = False non_eng_audio = False iso_audio = None @@ -257,37 +143,4 @@ async def edit_name(self, meta, cat_id): if language_parts: ldu_name = f"{ldu_name} {' '.join(language_parts)}" - return ldu_name - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb_id'], - 'types[]': await self.get_type_id(meta['type'], meta), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + return {'name': ldu_name} diff --git a/src/trackers/LST.py b/src/trackers/LST.py index a7b1e3d57..01fc53f20 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -1,44 +1,35 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import os -import glob -import httpx -from src.trackers.COMMON import COMMON from src.console import console +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class LST(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class LST(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='LST') self.config = config + self.common = COMMON(config) self.tracker = 'LST' self.source_flag = 'LST.GG' - self.upload_url = 'https://lst.gg/api/torrents/upload' - self.search_url = 'https://lst.gg/api/torrents/filter' - self.torrent_url = 'https://lst.gg/torrents/' - self.id_url = 'https://lst.gg/api/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://lst.gg/' + self.banned_url = f'{self.base_url}/api/bannedReleaseGroups' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + async def get_additional_checks(self, meta): + should_continue = True + if not meta['valid_mi_settings']: + console.print(f"[bold red]No encoding settings in mediainfo, skipping {self.tracker} upload.[/bold red]") + return False - async def get_type_id(self, type): + return should_continue + + async def get_type_id(self, meta): type_id = { 'DISC': '1', 'REMUX': '2', @@ -47,130 +38,45 @@ async def get_type_id(self, type): 'HDTV': '6', 'ENCODE': '3', 'DVDRIP': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') - draft = await self.get_flag(meta, 'draft') - name = await self.edit_name(meta) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None + }.get(meta['type'], '0') + return {'type_id': type_id} - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - if meta.get('service') == "hentai": - desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + "\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + "\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc - - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - open_torrent = open(torrent_file_path, 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + async def get_additional_data(self, meta): data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, - 'draft_queue_opt_in': draft, + 'mod_queue_opt_in': await self.get_flag(meta, 'modq'), + 'draft_queue_opt_in': await self.get_flag(meta, 'draft'), } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if meta.get('freeleech', 0) != 0: - data['free'] = meta.get('freeleech', 0) - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + # Only add edition_id if we have a valid edition + edition_id = await self.get_edition(meta) + if edition_id is not None: + data['edition_id'] = edition_id + + return data + + async def get_edition(self, meta): + edition_mapping = { + 'Alternative Cut': 12, + 'Collector\'s Edition': 1, + 'Director\'s Cut': 2, + 'Extended Cut': 3, + 'Extended Uncut': 4, + 'Extended Unrated': 5, + 'Limited Edition': 6, + 'Special Edition': 7, + 'Theatrical Cut': 8, + 'Uncut': 9, + 'Unrated': 10, + 'X Cut': 11, + 'Other': 0 # Default value for "Other" } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://lst.gg/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return + edition = meta.get('edition', '') + if edition in edition_mapping: + return edition_mapping[edition] else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + return None - async def edit_name(self, meta): + async def get_name(self, meta): lst_name = meta['name'] resolution = meta.get('resolution') video_encode = meta.get('video_encode') @@ -184,45 +90,4 @@ async def edit_name(self, meta): lst_name = lst_name.replace(f"{meta['source']}", f"{resolution}", 1) lst_name = lst_name.replace(f"{meta['video_codec']}", f"{meta['audio']} {meta['video_codec']}", 1) - return lst_name - - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 - - return 1 if meta.get(flag_name, False) else 0 - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + return {'name': lst_name} diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 98a513c50..9c8b31193 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -1,233 +1,138 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import os -import glob -import httpx +import re from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class LT(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class LT(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='LT') self.config = config + self.common = COMMON(config) self.tracker = 'LT' self.source_flag = 'Lat-Team "Poder Latino"' - self.upload_url = 'https://lat-team.com/api/torrents/upload' - self.search_url = 'https://lat-team.com/api/torrents/filter' - self.torrent_url = 'https://lat-team.com/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.base_url = 'https://lat-team.com/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = ["EVO"] pass - async def get_cat_id(self, category_name, meta): + async def get_category_id(self, meta): category_id = { 'MOVIE': '1', 'TV': '2', - 'ANIME': '5', - 'TELENOVELAS': '8', - 'Asiáticas & Turcas': '20', - }.get(category_name, '0') - # if is anime - if meta['anime'] is True and category_id == '2': - category_id = '5' - # elif is telenovela - elif category_id == '2' and ("telenovela" in meta['keywords'] or "telenovela" in meta['overview']): - category_id = '8' - # if is TURCAS o Asiáticas - elif meta["original_language"] in ['ja', 'ko', 'tr'] and category_id == '2' and 'Drama' in meta['genres']: - category_id = '20' - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def edit_name(self, meta): - lt_name = meta['name'].replace('Dual-Audio', '').replace('Dubbed', '').replace(meta['aka'], '').replace(' ', ' ').strip() + }.get(meta['category'], '0') + + keywords = meta.get('keywords', '').lower() + overview = meta.get('overview', '').lower() + genres = meta.get('genres', '').lower() + soap_keywords = ['telenovela', 'novela', 'soap', 'culebrón', 'culebron'] + origin_countries = meta.get('origin_country', []) + + if meta['category'] == 'TV': + # Anime + if meta.get('anime', False): + category_id = '5' + # Telenovela / Soap + elif any(kw in keywords for kw in soap_keywords) or any(kw in overview for kw in soap_keywords): + category_id = '8' + # Turkish & Asian + elif 'drama' in genres and any(c in [ + 'AE', 'AF', 'AM', 'AZ', 'BD', 'BH', 'BN', 'BT', 'CN', 'CY', 'GE', 'HK', 'ID', 'IL', 'IN', + 'IQ', 'IR', 'JO', 'JP', 'KG', 'KH', 'KP', 'KR', 'KW', 'KZ', 'LA', 'LB', 'LK', 'MM', 'MN', + 'MO', 'MV', 'MY', 'NP', 'OM', 'PH', 'PK', 'PS', 'QA', 'SA', 'SG', 'SY', 'TH', 'TJ', 'TL', + 'TM', 'TR', 'TW', 'UZ', 'VN', 'YE' + ] for c in origin_countries): + category_id = '20' + + return {'category_id': category_id} + + async def get_name(self, meta): + lt_name = ( + meta['name'] + .replace('Dual-Audio', '') + .replace('Dubbed', '') + .replace(meta['aka'], '') + ) + if meta['type'] != 'DISC': # DISC don't have mediainfo - # Check if is HYBRID (Copied from BLU.py) - if 'hybrid' in meta.get('uuid').lower(): - if "repack" in meta.get('uuid').lower(): - lt_name = lt_name.replace('REPACK', 'Hybrid REPACK') - else: - lt_name = lt_name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") # Check if original language is "es" if true replace title for AKA if available if meta.get('original_language') == 'es' and meta.get('aka') != "": lt_name = lt_name.replace(meta.get('title'), meta.get('aka').replace('AKA', '')).strip() # Check if audio Spanish exists - audios = [ - audio for audio in meta['mediainfo']['media']['track'][2:] - if audio.get('@type') == 'Audio' - and isinstance(audio.get('Language'), str) - and audio.get('Language').lower() in {'es-419', 'es', 'es-mx', 'es-ar', 'es-cl', 'es-ve', 'es-bo', 'es-co', - 'es-cr', 'es-do', 'es-ec', 'es-sv', 'es-gt', 'es-hn', 'es-ni', 'es-pa', - 'es-py', 'es-pe', 'es-pr', 'es-uy'} - and "commentary" not in str(audio.get('Title', '')).lower() - ] + + audio_latino_check = { + "es-419", "es-mx", "es-ar", "es-cl", "es-ve", + "es-bo", "es-co", "es-cr", "es-do", "es-ec", + "es-sv", "es-gt", "es-hn", "es-ni", "es-pa", + "es-py", "es-pe", "es-pr", "es-uy"} + + audio_castilian_check = ["es", "es-es"] + # Use keywords instead of massive exact-match lists + # "latino" matches: "latino", "latinoamérica", "latinoamericano", etc. + latino_keywords = ["latino", "latin america"] + # "castellano" matches any title explicitly labeled as such. + castilian_keywords = ["castellano"] + + audios = [] + has_latino = False + has_castilian = False + + for audio in meta['mediainfo']['media']['track'][2:]: + if audio.get("@type") != "Audio": + continue + lang = audio.get("Language", "").lower() + title = str(audio.get("Title", "")).lower() + + if "commentary" in title: + continue + + # Check if title contains keywords + is_latino_title = any(kw in title for kw in latino_keywords) + is_castilian_title = any(kw in title for kw in castilian_keywords) + + # 1. Check strict Latino language codes or Edge Case: Language is 'es' but Title contains Latino keywords + if lang in audio_latino_check or (lang == 'es' and is_latino_title): + has_latino = True + audios.append(audio) + + # 2. Edge Case: Language is 'es' and Title contains Castilian keywords or Fallback: Check strict Castilian codes (includes 'es' as default) + elif (lang == 'es' and is_castilian_title) or lang in audio_castilian_check: + has_castilian = True + audios.append(audio) + if len(audios) > 0: # If there is at least 1 audio spanish - lt_name = lt_name + if not has_latino and has_castilian: + lt_name = lt_name.replace(meta['tag'], f" [CAST]{meta['tag']}") + # else: no special tag needed for Latino-only or mixed audio # if not audio Spanish exists, add "[SUBS]" elif not meta.get('tag'): lt_name = lt_name + " [SUBS]" else: lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") - return lt_name - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category'], meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - # region_id = await common.unit3d_region_ids(meta.get('region')) - # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - lt_name = await self.edit_name(meta) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + return {"name": re.sub(r"\s{2,}", " ", lt_name)} + + async def get_additional_checks(self, meta): + spanish_languages = ["spanish", "spanish (latin america)"] + if not await self.common.check_language_requirements( + meta, self.tracker, languages_to_check=spanish_languages, check_audio=True, check_subtitle=True + ): + return False + return True + + async def get_additional_data(self, meta): data = { - 'name': lt_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - # if distributor_id != 0: - # data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = int(meta.get('season_int', '0')) - data['episode_number'] = int(meta.get('episode_int', '0')) - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'mod_queue_opt_in': await self.get_flag(meta, 'modq'), } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://lat-team.com/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + return data + + async def get_distributor_ids(self, meta): + return {} + + async def get_region_id(self, meta): + return {} diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index e1325058d..637d48bcd 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -1,19 +1,23 @@ -import requests +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles +import aiofiles.os import asyncio -from src.console import console -import traceback -from torf import Torrent +import cli_ui import httpx -import xml.etree.ElementTree as ET import os -import cli_ui import pickle +import pyotp import re -from pathlib import Path -from src.trackers.COMMON import COMMON -from datetime import datetime -from src.torrentcreate import CustomTorrent, torf_cb, create_torrent +import traceback +import xml.etree.ElementTree as ET + +from torf import Torrent + +from data.config import config +from src.console import console from src.rehostimages import check_hosts +from src.torrentcreate import create_torrent +from src.trackers.COMMON import COMMON class MTV(): @@ -33,79 +37,63 @@ def __init__(self, config): self.forum_link = 'https://www.morethantv.me/wiki.php?action=article&id=73' self.search_url = 'https://www.morethantv.me/api/torznab' self.banned_groups = [ - 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'iPlanet', - 'KiNGDOM', 'Leffe', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'PRODJi', 'RDN', 'SANTi', - 'STUTTERSHIT', 'TERMiNAL', 'ViSION', 'WAF', 'x0r', 'YIFY', ['EVO', 'WEB-DL Only'] + '3LTON', '[Oj]', 'aXXo', 'BDP', 'BRrip', 'CM8', 'CrEwSaDe', 'CMCT', + 'DeadFish', 'DNL', 'ELiTE', 'AFG', 'ZMNT', + 'FaNGDiNG0', 'FRDS', 'FUM', 'h65', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'JIVE', + 'KiNGDOM', 'LAMA', 'Leffe', 'LOAD', 'mHD', 'mRS', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'nSD', + 'PandaRG', 'PRODJi', 'QxR', 'RARBG', 'RDN', 'SANTi', 'STUTTERSHIT', + 'TERMiNAL', # TERMiNAL: low bitrate UHD + 'TM', 'ViSiON', # ViSiON: Xvid releases -- re-encoded + 'WAF', 'x0r', 'XS', 'YIFY', 'ZKBL', 'ZmN' ] pass + # For loading + async def async_pickle_loads(self, data): + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, pickle.loads, data) + + # For dumping + async def async_pickle_dumps(self, obj): + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, pickle.dumps, obj) + + async def check_image_hosts(self, meta): + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + url_host_mapping = { + "ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "imgbox.com": "imgbox", + } + + await check_hosts(meta, self.tracker, url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=approved_image_hosts) + return + async def upload(self, meta, disctype): common = COMMON(config=self.config) cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename="BASE") torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - if not os.path.exists(torrent_file_path): - torrent_filename = "BASE" - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + if not await aiofiles.os.path.exists(torrent_file_path): + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename="BASE") - torrent = Torrent.read(torrent_file_path) + loop = asyncio.get_running_loop() + torrent = await loop.run_in_executor(None, Torrent.read, torrent_file_path) if torrent.piece_size > 8388608: tracker_config = self.config['TRACKERS'].get(self.tracker, {}) if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "false": console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") - if meta.get('mkbrr', False): - from data.config import config - tracker_url = config['TRACKERS']['MTV'].get('announce_url', "https://fake.tracker/").strip() + meta['max_piece_size'] = '8' + tracker_url = config['TRACKERS']['MTV'].get('announce_url', "https://fake.tracker/").strip() + torrent_create = f"[{self.tracker}]" - # Create the torrent with the tracker URL - torrent_create = f"[{self.tracker}]" - create_torrent(meta, meta['path'], torrent_create, tracker_url=tracker_url) - torrent_filename = "[MTV]" - - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - else: - meta['max_piece_size'] = '8' - if meta['is_disc']: - include = [] - exclude = [] - else: - include = ["*.mkv", "*.mp4", "*.ts"] - exclude = ["*.*", "*sample.mkv", "!sample*.*"] - - new_torrent = CustomTorrent( - meta=meta, - path=Path(meta['path']), - trackers=["https://fake.tracker/"], - source="Audionut", - private=True, - exclude_globs=exclude, # Ensure this is always a list - include_globs=include, # Ensure this is always a list - creation_date=datetime.now(), - comment="Created by Audionut's Upload Assistant", - created_by="Audionut's Upload Assistant" - ) - - new_torrent.piece_size = 8 * 1024 * 1024 - new_torrent.validate_piece_size() - new_torrent.generate(callback=torf_cb, interval=5) - new_torrent.write(torrent_file_path, overwrite=True) - - torrent_filename = "[MTV]" - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) + create_torrent(meta, meta['path'], torrent_create, tracker_url=tracker_url) + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_create) else: console.print("[red]Piece size is OVER 8M and skip_if_rehash enabled. Skipping upload.") return - approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] - url_host_mapping = { - "ibb.co": "imgbb", - "ptpimg.me": "ptpimg", - "imgbox.com": "imgbox", - } - - await check_hosts(meta, self.tracker, url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=approved_image_hosts) cat_id = await self.get_cat_id(meta) resolution_id = await self.get_res_id(meta['resolution']) source_id = await self.get_source_id(meta) @@ -121,11 +109,11 @@ async def upload(self, meta, disctype): anon = 1 desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - desc = open(desc_path, 'r', encoding='utf-8').read() + async with aiofiles.open(desc_path, 'r', encoding='utf-8') as f: + desc = await f.read() - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - with open(torrent_file_path, 'rb') as f: - tfile = f.read() + async with aiofiles.open(torrent_file_path, 'rb') as f: + tfile = await f.read() files = { 'file_input': (f"[{self.tracker}].torrent", tfile) @@ -152,55 +140,78 @@ async def upload(self, meta, disctype): } if not meta['debug']: - with requests.Session() as session: - with open(cookiefile, 'rb') as cf: - session.cookies.update(pickle.load(cf)) - response = session.post(url=self.upload_url, data=data, files=files, allow_redirects=True) - try: - if "torrents.php" in str(response.url): - meta['tracker_status'][self.tracker]['status_message'] = response.url - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), str(response.url)) - elif 'https://www.morethantv.me/upload.php' in str(response.url): - meta['tracker_status'][self.tracker]['status_message'] = "data error - Still on upload page - upload may have failed" - if "error" in response.text.lower() or "failed" in response.text.lower(): - meta['tracker_status'][self.tracker]['status_message'] = "data error - Upload failed - check form data" - elif str(response.url) == "https://www.morethantv.me/" or str(response.url) == "https://www.morethantv.me/index.php": - if "Project Luminance" in response.text: - meta['tracker_status'][self.tracker]['status_message'] = "data error - Not logged in - session may have expired" - if "'GroupID' cannot be null" in response.text: - meta['tracker_status'][self.tracker]['status_message'] = "data error - You are hitting this site bug: https://www.morethantv.me/forum/thread/3338?" - elif "Integrity constraint violation" in response.text: - meta['tracker_status'][self.tracker]['status_message'] = "data error - Proper site bug" - else: - if "authkey.php" in str(response.url): - meta['tracker_status'][self.tracker]['status_message'] = "data error - No DL link in response, It may have uploaded, check manually." + try: + async with aiofiles.open(cookiefile, 'rb') as cf: + cookie_data = await cf.read() + cookies = await self.async_pickle_loads(cookie_data) + + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' + } + + async with httpx.AsyncClient( + cookies=cookies, + timeout=10.0, + follow_redirects=True, + headers=headers + ) as client: + + response = await client.post(url=self.upload_url, data=data, files=files) + + try: + if "torrents.php" in str(response.url): + meta['tracker_status'][self.tracker]['status_message'] = response.url + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), str(response.url)) + elif 'https://www.morethantv.me/upload.php' in str(response.url): + meta['tracker_status'][self.tracker]['status_message'] = "data error - Still on upload page - upload may have failed" + if "error" in response.text.lower() or "failed" in response.text.lower(): + meta['tracker_status'][self.tracker]['status_message'] = "data error - Upload failed - check form data" + elif str(response.url) == "https://www.morethantv.me/" or str(response.url) == "https://www.morethantv.me/index.php": + if "Project Luminance" in response.text: + meta['tracker_status'][self.tracker]['status_message'] = "data error - Not logged in - session may have expired" + if "'GroupID' cannot be null" in response.text: + meta['tracker_status'][self.tracker]['status_message'] = "data error - You are hitting this site bug: https://www.morethantv.me/forum/thread/3338?" + elif "Integrity constraint violation" in response.text: + meta['tracker_status'][self.tracker]['status_message'] = "data error - Proper site bug" else: - console.print(f"response URL: {response.url}") - console.print(f"response status: {response.status_code}") - except Exception: - meta['tracker_status'][self.tracker]['status_message'] = "data error -It may have uploaded, check manually." - print(traceback.print_exc()) + if "authkey.php" in str(response.url): + meta['tracker_status'][self.tracker]['status_message'] = "data error - No DL link in response, It may have uploaded, check manually." + else: + console.print(f"response URL: {response.url}") + console.print(f"response status: {response.status_code}") + except Exception: + meta['tracker_status'][self.tracker]['status_message'] = "data error -It may have uploaded, check manually." + print(traceback.print_exc()) + except (httpx.RequestError, Exception) as e: + meta['tracker_status'][self.tracker]['status_message'] = f"data error: {e}" + return else: - console.print("[cyan]Request Data:") - console.print(data) + console.print("[cyan]MTV Request Data:") + debug_data = data.copy() + if 'auth' in debug_data: + debug_data['auth'] = debug_data['auth'][:3] + '...' if len(debug_data['auth']) > 3 else '***' + console.print(debug_data) meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." return async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8') as f: + base = await f.read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: if meta['bdinfo'] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as f: + bd_dump = await f.read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read().strip() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as f: + mi_dump = (await f.read()).strip() bd_dump = None if bd_dump: - desc.write("[mediainfo]" + bd_dump + "[/mediainfo]\n\n") + await desc.write("[mediainfo]" + bd_dump + "[/mediainfo]\n\n") elif mi_dump: - desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") + await desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") if ( meta.get('is_disc') == "DVD" and @@ -208,8 +219,15 @@ async def edit_desc(self, meta): len(meta['discs']) > 0 and 'vob_mi' in meta['discs'][0] ): - desc.write("[mediainfo]" + meta['discs'][0]['vob_mi'] + "[/mediainfo]\n\n") - + await desc.write("[mediainfo]" + meta['discs'][0]['vob_mi'] + "[/mediainfo]\n\n") + try: + if meta.get('tonemapped', False) and self.config['DEFAULT'].get('tonemapped_header', None): + console.print("[green]Adding tonemapped header to description") + tonemapped_header = self.config['DEFAULT'].get('tonemapped_header') + await desc.write(tonemapped_header) + await desc.write("\n\n") + except Exception as e: + console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") if f'{self.tracker}_images_key' in meta: images = meta[f'{self.tracker}_images_key'] else: @@ -218,18 +236,18 @@ async def edit_desc(self, meta): for image in images: raw_url = image['raw_url'] img_url = image['img_url'] - desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]") + await desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]") base = re.sub(r'\[/?quote\]', '', base, flags=re.IGNORECASE).strip() if base != "": - desc.write(f"\n\n[spoiler=Notes]{base}[/spoiler]") - desc.close() + await desc.write(f"\n\n[spoiler=Notes]{base}[/spoiler]") + return async def edit_group_desc(self, meta): description = "" if meta['imdb_id'] != 0: - description += f"https://www.imdb.com/title/tt%7Bmeta['imdb']}" + description += str(meta.get('imdb_info', {}).get('imdb_url', '')) if meta['tmdb'] != 0: description += f"\nhttps://www.themoviedb.org/{str(meta['category'].lower())}/{str(meta['tmdb'])}" if meta['tvdb_id'] != 0: @@ -285,9 +303,14 @@ async def edit_name(self, meta): # Check if there is a valid file extension, otherwise, skip the split if '.' in mtv_name and mtv_name.split('.')[-1].isalpha() and len(mtv_name.split('.')[-1]) <= 4: mtv_name = os.path.splitext(mtv_name)[0] - # Add -NoGrp if missing tag - if meta['tag'] == "": - mtv_name = f"{mtv_name}-NoGrp" + + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + mtv_name = re.sub(f"-{invalid_tag}", "", mtv_name, flags=re.IGNORECASE) + mtv_name = f"{mtv_name}-NOGRP" + mtv_name = ' '.join(mtv_name.split()) mtv_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) mtv_name = mtv_name.replace(' ', '.').replace('..', '.') @@ -363,7 +386,7 @@ async def get_tags(self, meta): tags = [] # Genres # MTV takes issue with some of the pulled TMDB tags, and I'm not hand checking and attempting - # to regex however many tags need changing, so they're just geting skipped + # to regex however many tags need changing, so they're just getting skipped # tags.extend([x.strip(', ').lower().replace(' ', '.') for x in meta['genres'].split(',')]) # Resolution tags.append(meta['resolution'].lower()) @@ -443,68 +466,57 @@ async def get_tags(self, meta): async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") - if not os.path.exists(cookiefile): + if not await aiofiles.os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your username and password is valid.') - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if 'mtv_timeout' in meta and meta['mtv_timeout']: + meta['skipping'] = "MTV" + return False + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): recreate = cli_ui.ask_yes_no("Log in again and create new session?") else: recreate = True if recreate is True: - if os.path.exists(cookiefile): - os.remove(cookiefile) + if await aiofiles.os.path.exists(cookiefile): + await aiofiles.os.remove(cookiefile) # Using async file removal await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) return vcookie else: return False - vapi = await self.validate_api() - if vapi is not True: - console.print('[red]Failed to validate API. Please confirm that the site is up and your API key is valid.') - return True - async def validate_api(self): - url = self.search_url - params = { - 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - } - try: - r = requests.get(url, params=params) - if not r.ok: - if "unauthorized api key" in r.text.lower(): - console.print("[red]Invalid API Key") - return False - return True - except Exception: - return False + return True async def validate_cookies(self, meta, cookiefile): url = "https://www.morethantv.me/index.php" - if os.path.exists(cookiefile): + if await aiofiles.os.path.exists(cookiefile): try: - with requests.Session() as session: - # Add a timeout to prevent hanging indefinitely - session.timeout = 10 # 10 seconds timeout - with open(cookiefile, 'rb') as cf: - session.cookies.update(pickle.load(cf)) + async with aiofiles.open(cookiefile, 'rb') as cf: + data = await cf.read() + cookies_dict = await self.async_pickle_loads(data) - # Add error handling for the request + async with httpx.AsyncClient(cookies=cookies_dict, timeout=10) as client: try: - resp = session.get(url=url, timeout=10) - if resp.text.find("Logout") != -1: + resp = await client.get(url=url) + if meta['debug']: + console.print('[cyan]Validating MTV Cookies:') + + if "Logout" in resp.text: return True else: console.print("[yellow]Valid session not found in cookies") return False - except requests.exceptions.Timeout: + except httpx.TimeoutException: console.print(f"[red]Connection to {url} timed out. The site may be down or unreachable.") + meta['mtv_timeout'] = True return False - except requests.exceptions.ConnectionError: + except httpx.ConnectError: console.print(f"[red]Failed to connect to {url}. The site may be down or your connection is blocked.") + meta['mtv_timeout'] = True return False except Exception as e: console.print(f"[red]Error connecting to MTV: {str(e)}") @@ -519,19 +531,21 @@ async def validate_cookies(self, meta, cookiefile): async def get_auth(self, cookiefile): url = "https://www.morethantv.me/index.php" try: - if os.path.exists(cookiefile): - with requests.Session() as session: - with open(cookiefile, 'rb') as cf: - session.cookies.update(pickle.load(cf)) + if await aiofiles.os.path.exists(cookiefile): + async with aiofiles.open(cookiefile, 'rb') as cf: + data = await cf.read() + cookies = await self.async_pickle_loads(data) + + async with httpx.AsyncClient(cookies=cookies, timeout=10) as client: try: - resp = session.get(url=url, timeout=10) + resp = await client.get(url=url) if "authkey=" in resp.text: auth = resp.text.rsplit('authkey=', 1)[1][:32] return auth else: console.print("[yellow]Auth key not found in response") return "" - except requests.exceptions.RequestException as e: + except httpx.RequestError as e: console.print(f"[red]Error getting auth key: {str(e)}") return "" else: @@ -543,10 +557,7 @@ async def get_auth(self, cookiefile): async def login(self, cookiefile): try: - with requests.Session() as session: - # Add a timeout to all requests - session.timeout = 15 - + async with httpx.AsyncClient(timeout=25, follow_redirects=True) as client: url = 'https://www.morethantv.me/login' payload = { 'username': self.config['TRACKERS'][self.tracker].get('username'), @@ -558,50 +569,115 @@ async def login(self, cookiefile): } try: - res = session.get(url="https://www.morethantv.me/login", timeout=15) + res = await client.get(url="https://www.morethantv.me/login") + + if 'name="token" value="' not in res.text: + console.print("[red]Unable to find token in login page") + return False + token = res.text.rsplit('name="token" value="', 1)[1][:48] - # token and CID from cookie needed for post to login + payload["token"] = token - resp = session.post(url=url, data=payload, timeout=10) + resp = await client.post(url=url, data=payload) + + if str(resp.url).endswith('twofactor/login'): - # handle 2fa - if resp.url.endswith('twofactor/login'): otp_uri = self.config['TRACKERS'][self.tracker].get('otp_uri') if otp_uri: - import pyotp mfa_code = pyotp.parse_uri(otp_uri).now() else: mfa_code = console.input('[yellow]MTV 2FA Code: ') + two_factor_token = resp.text.rsplit('name="token" value="', 1)[1][:48] two_factor_payload = { - 'token': resp.text.rsplit('name="token" value="', 1)[1][:48], + 'token': two_factor_token, 'code': mfa_code, 'submit': 'login' } - resp = session.post(url="https://www.morethantv.me/twofactor/login", data=two_factor_payload) - # checking if logged in + resp = await client.post(url="https://www.morethantv.me/twofactor/login", data=two_factor_payload) + + await asyncio.sleep(1) if 'authkey=' in resp.text: console.print('[green]Successfully logged in to MTV') - with open(cookiefile, 'wb') as cf: - pickle.dump(session.cookies, cf) + cookies_dict = dict(client.cookies) + cookies_data = await self.async_pickle_dumps(cookies_dict) + async with aiofiles.open(cookiefile, 'wb') as cf: + await cf.write(cookies_data) + console.print(f"[green]Cookies saved to {cookiefile}") + return True else: console.print('[bold red]Something went wrong while trying to log into MTV') - await asyncio.sleep(1) - console.print(resp.url) - except requests.exceptions.Timeout: + console.print(f"[red]Final URL: {resp.url}") + return False + + except httpx.TimeoutException: console.print("[red]Connection to MTV timed out. The site may be down or unreachable.") return False - except requests.exceptions.ConnectionError: + except httpx.ConnectError: console.print("[red]Failed to connect to MTV. The site may be down or your connection is blocked.") return False except Exception as e: console.print(f"[red]Error during MTV login: {str(e)}") + console.print(f"[dim red]{traceback.format_exc()}[/dim red]") return False except Exception as e: console.print(f"[red]Unexpected error during login: {str(e)}") + console.print(f"[dim red]{traceback.format_exc()}[/dim red]") return False async def search_existing(self, meta, disctype): + if meta.get('bloated', False): + console.print(f'[bold red]Bloated releases are not allowed at {self.tracker}[/bold red]') + meta['skipping'] = "MTV" + return [] + if meta['type'] not in ['WEBDL']: + if meta.get('tag', "") and any(x in meta['tag'] for x in ['EVO']): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]Group {meta["tag"]} is only allowed for raw type content at {self.tracker}[/bold red]') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + meta['skipping'] = "MTV" + return [] + else: + meta['skipping'] = "MTV" + return [] + + allowed_anime = ['Thighs', 'sam', 'Vanilla', 'OZR', 'Netaro', 'Datte13', 'UDF', 'Baws', 'ARC', 'Dae', 'MTBB', + 'Okay-Subs', 'hchcsen', 'Noyr', 'TTGA', 'GJM', 'Kaleido-Subs', 'GJM-Kaleido', 'LostYears', + 'Reza', 'Aergia', 'Drag', 'Crow', 'Arid', 'JySzE', 'iKaos', 'Spirale', 'CsS', 'FLE', 'WSE', + 'Legion', 'AC', 'UQW', 'Commie', 'Chihiro'] + if meta['resolution'] not in ['2160p'] and meta['video_codec'] in ['HEVC']: + if meta['anime'] and meta.get('tag', "") and not any(x in meta['tag'] for x in allowed_anime): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]Only 4K HEVC anime releases from {meta["tag"]} are allowed at {self.tracker}[/bold red]') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + meta['skipping'] = "MTV" + return [] + else: + console.print(f'[bold red]Only 4K HEVC releases are allowed at {self.tracker}[/bold red]') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + meta['skipping'] = "MTV" + return [] + + disallowed_keywords = {'XXX', 'Erotic', 'Porn'} + disallowed_genres = {'Adult', 'Erotica'} + if any(keyword.lower() in disallowed_keywords for keyword in map(str.lower, meta['keywords'])) or any(genre.lower() in disallowed_genres for genre in map(str.lower, meta.get('combined_genres', []))): + if (not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False))): + console.print(f'[bold red]Porn/xxx is not allowed at {self.tracker}.[/bold red]') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + meta['skipping'] = "MTV" + return [] + else: + meta['skipping'] = "MTV" + return [] + dupes = [] # Build request parameters @@ -628,9 +704,17 @@ async def search_existing(self, meta, disctype): if response.status_code == 200 and response.text: # Parse XML response try: - response_xml = ET.fromstring(response.text) + loop = asyncio.get_running_loop() + response_xml = await loop.run_in_executor(None, ET.fromstring, response.text) for each in response_xml.find('channel').findall('item'): - result = each.find('title').text + result = { + 'name': each.find('title').text, + 'files': each.find('title').text, + 'file_count': int(each.find('files').text), + 'size': int(each.find('size').text), + 'link': each.find('guid').text, + 'download': each.find('link').text + } dupes.append(result) except ET.ParseError: console.print("[red]Failed to parse XML response from MTV API") diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 864a55ac8..d187f0bf2 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -1,7 +1,9 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -import json -import requests +import aiofiles +import cli_ui import httpx +import json from src.trackers.COMMON import COMMON from src.console import console @@ -15,6 +17,7 @@ class NBL(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config self.tracker = 'NBL' @@ -47,11 +50,15 @@ async def upload(self, meta, disctype): await common.edit_torrent(meta, self.tracker, self.source_flag) if meta['bdinfo'] is not None: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as f: + mi_dump = await f.read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read().strip() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'file_input': open_torrent} + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as f: + mi_dump = await f.read() + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + async with aiofiles.open(torrent_file_path, 'rb') as f: + torrent_bytes = await f.read() + files = {'file_input': ('torrent.torrent', torrent_bytes, 'application/x-bittorrent')} data = { 'api_key': self.api_key, 'tvmazeid': int(meta.get('tvmaze_id', 0)), @@ -60,30 +67,47 @@ async def upload(self, meta, disctype): 'ignoredupes': 'on' } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data) - try: - if response.ok: - response = response.json() - meta['tracker_status'][self.tracker]['status_message'] = response - else: - meta['tracker_status'][self.tracker]['status_message'] = response.text - except Exception: - console.print_exception() - console.print("[bold yellow]It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + try: + if not meta['debug']: + async with httpx.AsyncClient(timeout=10) as client: + response = await client.post(url=self.upload_url, files=files, data=data) + if response.status_code in [200, 201]: + try: + response_data = response.json() + except json.JSONDecodeError: + meta['tracker_status'][self.tracker]['status_message'] = "data error: NBL json decode error, the API is probably down" + return + else: + response_data = { + "error": f"Unexpected status code: {response.status_code}", + "response_content": response.text + } + meta['tracker_status'][self.tracker]['status_message'] = response_data + else: + console.print("[cyan]NBL Request Data:") + console.print(data) + meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." + except Exception as e: + meta['tracker_status'][self.tracker]['status_message'] = f"data error: Upload failed: {e}" async def search_existing(self, meta, disctype): if meta['category'] != 'TV': - if not meta['unattended']: - console.print("[red]Only TV Is allowed at NBL") - meta['skipping'] = "NBL" - return [] + if meta['tvmaze_id'] != 0: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print("[red]Only TV or TV Movies are allowed at NBL, this has a tvmaze ID[/red]") + if cli_ui.ask_yes_no("Do you want to upload it?", default=False): + pass + else: + meta['skipping'] = "NBL" + return [] + else: + meta['skipping'] = "NBL" + return [] + else: + if not meta['unattended']: + console.print("[red]Only TV Is allowed at NBL") + meta['skipping'] = "NBL" + return [] if meta.get('is_disc') is not None: if not meta['unattended']: @@ -117,19 +141,28 @@ async def search_existing(self, meta, disctype): data = response.json() for each in data.get('result', {}).get('items', []): if meta['resolution'] in each.get('tags', []): - dupes.append(each['rls_name']) + file_list = each.get('file_list', []) + result = { + 'name': each.get('rls_name', ''), + 'files': ', '.join(file_list) if isinstance(file_list, list) else str(file_list), + 'size': int(each.get('size', 0)), + 'link': f'https://nebulance.io/torrents.php?id={each.get("group_id", "")}', + 'file_count': len(file_list) if isinstance(file_list, list) else 1, + 'download': each.get('download', ''), + } + dupes.append(result) except json.JSONDecodeError: - console.print("[bold yellow]Response content is not valid JSON. Skipping this API call.") + console.print("[bold yellow]NBL response content is not valid JSON. Skipping this API call.") meta['skipping'] = "NBL" else: - console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + console.print(f"[bold red]NBL HTTP request failed. Status: {response.status_code}") meta['skipping'] = "NBL" except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") + console.print("[bold red]NBL request timed out after 5 seconds") meta['skipping'] = "NBL" except httpx.RequestError as e: - console.print(f"[bold red]An error occurred while making the request: {e}") + console.print(f"[bold red]NBL an error occurred while making the request: {e}") meta['skipping'] = "NBL" except KeyError as e: console.print(f"[bold red]Unexpected KeyError: {e}") @@ -138,7 +171,7 @@ async def search_existing(self, meta, disctype): dupes.append("ERROR: PLEASE CHECK FOR EXISTING RELEASES MANUALLY") except Exception as e: meta['skipping'] = "NBL" - console.print(f"[bold red]Unexpected error: {e}") + console.print(f"[bold red]NBL unexpected error: {e}") console.print_exception() return dupes diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 6512dec1d..d12b42633 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -1,55 +1,62 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import re +import aiofiles import os -import httpx +import re from src.bbcode import BBCODE -from src.trackers.COMMON import COMMON from src.console import console -from src.rehostimages import check_hosts from src.languages import process_desc_language, has_english_language +from src.rehostimages import check_hosts +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class OE(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class OE(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='OE') self.config = config + self.common = COMMON(config) self.tracker = 'OE' self.source_flag = 'OE' - self.search_url = 'https://onlyencodes.cc/api/torrents/filter' - self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' - self.torrent_url = 'https://onlyencodes.cc/torrents/' - self.id_url = 'https://onlyencodes.cc/api/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://onlyencodes.cc/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [ - '0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', + '0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'Alcaide_Kira', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DE3PM', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', - 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', + 'HiQVE', 'ION10', 'iPlanet', 'iVy', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta', 'MezRips', 'mHD', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', - 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', - 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', 'TSPxL', 'URANiME', 'UTR', - 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', + 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SM737', 'SpaceFish', 'SPASM', 'SSA', + 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'ToVaR', 'TSP', 'TSPxL', 'UnKn0wn', 'URANiME', 'UTR', + 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YAWNTiC', 'YAWNiX', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT' ] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) + async def get_additional_checks(self, meta): + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + if not meta['unattended']: + console.print('[bold red]Erotic not allowed at OE.') + return False + + if not meta['is_disc'] == "BDMV": + if not await self.common.check_language_requirements( + meta, self.tracker, languages_to_check=["english"], check_audio=True, check_subtitle=True + ): + return False + + return True + + async def check_image_hosts(self, meta): approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'onlyimage', 'ptscreens', "passtheimage"] url_host_mapping = { "ibb.co": "imgbb", @@ -62,152 +69,130 @@ async def upload(self, meta, disctype): } await check_hosts(meta, self.tracker, url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=approved_image_hosts) - await self.edit_desc(meta, self.tracker, self.signature) - should_skip = meta['tracker_status'][self.tracker].get('skip_upload', False) - if should_skip: - meta['tracker_status'][self.tracker]['status_message'] = "data error: oe_no_language" - return - cat_id = await self.get_cat_id(meta['category']) - if meta.get('type') == "DVDRIP": - meta['type'] = "ENCODE" - type_id = await self.get_type_id(meta['type'], meta.get('video_codec', 'N/A')) - resolution_id = await self.get_res_id(meta['resolution']) - oe_name = await self.edit_name(meta) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - open_torrent = open(torrent_file_path, 'rb') - files = {'torrent': open_torrent} - data = { - 'name': oe_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if meta.get('freeleech', 0) != 0: - data['free'] = meta.get('freeleech', 0) - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id + return - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - data['tvdb'] = meta['tvdb_id'] - elif meta.get('category') == "MOVIE": - data['tvdb'] = 0 - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + async def get_description(self, meta): + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8') as f: + base = await f.read() + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: + await process_desc_language(meta, descfile, tracker=self.tracker) + + bbcode = BBCODE() + if meta.get('discs', []) != []: + discs = meta['discs'] + if discs[0]['type'] == "DVD": + await descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n\n") + if len(discs) >= 2: + for each in discs[1:]: + if each['type'] == "BDMV": + await descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + await descfile.write(f"{each['name']}:\n") + await descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n\n") + elif each['type'] == "HDDVD": + await descfile.write(f"{each['name']}:\n") + await descfile.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n\n") - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + desc = base + desc = bbcode.convert_pre_to_code(desc) + desc = bbcode.convert_hide_to_spoiler(desc) + desc = bbcode.convert_comparison_to_collapse(desc, 1000) try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://onlyencodes.cc/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + if meta.get('tonemapped', False) and self.config['DEFAULT'].get('tonemapped_header', None): + tonemapped_header = self.config['DEFAULT'].get('tonemapped_header') + desc = desc + tonemapped_header + desc = desc + "\n\n" + except Exception as e: + console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") + desc = desc.replace('[img]', '[img=300]') + await descfile.write(desc) + if f'{self.tracker}_images_key' in meta: + images = meta[f'{self.tracker}_images_key'] + else: + images = meta['image_list'] + if len(images) > 0: + await descfile.write("[center]") + for each in range(len(images[:int(meta['screens'])])): + web_url = images[each]['web_url'] + raw_url = images[each]['raw_url'] + await descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") + await descfile.write("[/center]") + + await descfile.write(f"\n[right][url=https://github.com/Audionut/Upload-Assistant][size=4]{meta['ua_signature']}[/size][/url][/right]") - async def edit_name(self, meta): + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8') as f: + desc = await f.read() + + return {'description': desc} + + async def get_name(self, meta): oe_name = meta.get('name') resolution = meta.get('resolution') video_encode = meta.get('video_encode') name_type = meta.get('type', "") - tag_lower = meta['tag'].lower() - invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + source = meta.get('source', "") + audio = meta.get('audio', "") + video_codec = meta.get('video_codec', "") + imdb_name = meta.get('imdb_info', {}).get('title', "") - title = meta.get('title', "") - oe_name = oe_name.replace(f"{title}", imdb_name, 1) - year = str(meta.get('year', "")) imdb_year = str(meta.get('imdb_info', {}).get('year', "")) - scale = "DS4K" if "DS4K" in meta['uuid'].upper() else "RM4K" if "RM4K" in meta['uuid'].upper() else "" - if not meta.get('category') == "TV": + imdb_aka = meta.get('imdb_info', {}).get('aka', "") + year = str(meta.get('year', "")) + aka = meta.get('aka', "") + if imdb_name and imdb_name.strip(): + if aka: + oe_name = oe_name.replace(f"{aka} ", "", 1) + oe_name = oe_name.replace(f"{meta['title']}", imdb_name, 1) + + if imdb_aka and imdb_aka.strip() and imdb_aka != imdb_name and not meta.get('no_aka', False): + oe_name = oe_name.replace(f"{imdb_name}", f"{imdb_name} AKA {imdb_aka}", 1) + + if not meta.get('category') == "TV" and imdb_year and imdb_year.strip() and year and year.strip() and imdb_year != year: oe_name = oe_name.replace(f"{year}", imdb_year, 1) if name_type == "DVDRIP": if meta.get('category') == "MOVIE": - oe_name = oe_name.replace(f"{meta['source']}{meta['video_encode']}", f"{resolution}", 1) - oe_name = oe_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) + oe_name = oe_name.replace(f"{source}{video_encode}", f"{resolution}", 1) + oe_name = oe_name.replace((audio), f"{audio}{video_encode}", 1) else: - oe_name = oe_name.replace(f"{meta['source']}", f"{resolution}", 1) - oe_name = oe_name.replace(f"{meta['video_codec']}", f"{meta['audio']} {meta['video_codec']}", 1) + oe_name = oe_name.replace(f"{source}", f"{resolution}", 1) + oe_name = oe_name.replace(f"{video_codec}", f"{audio} {video_codec}", 1) if not meta.get('audio_languages'): await process_desc_language(meta, desc=None, tracker=self.tracker) elif meta.get('audio_languages'): - audio_languages = meta['audio_languages'][0].upper() + audio_languages = meta['audio_languages'] if audio_languages and not await has_english_language(audio_languages) and not meta.get('is_disc') == "BDMV": - oe_name = oe_name.replace(meta['resolution'], f"{audio_languages} {meta['resolution']}", 1) + foreign_lang = meta['audio_languages'][0].upper() + oe_name = oe_name.replace(f"{resolution}", f"{foreign_lang} {resolution}", 1) + scale = "DS4K" if "DS4K" in meta['uuid'].upper() else "RM4K" if "RM4K" in meta['uuid'].upper() else "" if name_type in ["ENCODE", "WEBDL", "WEBRIP"] and scale != "": oe_name = oe_name.replace(f"{resolution}", f"{scale}", 1) + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): for invalid_tag in invalid_tags: oe_name = re.sub(f"-{invalid_tag}", "", oe_name, flags=re.IGNORECASE) oe_name = f"{oe_name}-NOGRP" - return oe_name + return {'name': oe_name} + + async def get_type_id(self, meta): + video_codec = meta.get('video_codec', 'N/A') - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + meta_type = meta['type'] + if meta_type == "DVDRIP": + meta_type = "ENCODE" - async def get_type_id(self, type, video_codec): type_id = { 'DISC': '19', 'REMUX': '20', 'WEBDL': '21', - }.get(type, '0') - if type == "WEBRIP": + }.get(meta_type, '0') + if meta_type == "WEBRIP": if video_codec == "HEVC": # x265 Encode type_id = '10' @@ -217,7 +202,7 @@ async def get_type_id(self, type, video_codec): if video_codec == 'AVC': # x264 Encode type_id = '15' - if type == "ENCODE": + if meta_type == "ENCODE": if video_codec == "HEVC": # x265 Encode type_id = '10' @@ -227,122 +212,4 @@ async def get_type_id(self, type, video_codec): if video_codec == 'AVC': # x264 Encode type_id = '15' - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() - - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: - if desc_header != "": - descfile.write(desc_header) - - await process_desc_language(meta, descfile, tracker=self.tracker) - - bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n\n") - if len(discs) >= 2: - for each in discs[1:]: - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n\n") - elif each['type'] == "HDDVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n\n") - - desc = base - desc = bbcode.convert_pre_to_code(desc) - desc = bbcode.convert_hide_to_spoiler(desc) - desc = bbcode.convert_comparison_to_collapse(desc, 1000) - - desc = desc.replace('[img]', '[img=300]') - descfile.write(desc) - if f'{self.tracker}_images_key' in meta: - images = meta[f'{self.tracker}_images_key'] - else: - images = meta['image_list'] - if len(images) > 0: - descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - raw_url = images[each]['raw_url'] - descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") - descfile.write("[/center]") - - if signature is not None: - descfile.write(signature) - return - - async def search_existing(self, meta, disctype): - disallowed_keywords = {'XXX', 'softcore', 'concert'} - if any(keyword.lower() in disallowed_keywords for keyword in map(str.lower, meta['keywords'])): - if not meta['unattended']: - console.print('[bold red]Erotic not allowed at OE.') - meta['skipping'] = "OE" - return - - if not meta['is_disc'] == "BDMV": - if not meta.get('audio_languages') or not meta.get('subtitle_languages'): - await process_desc_language(meta, desc=None, tracker=self.tracker) - if not await has_english_language(meta.get('audio_languages')) and not await has_english_language(meta.get('subtitle_languages')): - if not meta['unattended']: - console.print('[bold red]OE requires at least one English audio or subtitle track.') - meta['skipping'] = "OE" - return - - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type'], meta.get('video_codec', 'N/A')), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = f"{meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] + meta['edition'] - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - attributes = each['attributes'] - result = { - 'name': attributes['name'], - 'size': attributes['size'] - } - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + return {'type_id': type_id} diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 8dc2b5711..e8902115a 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -1,54 +1,83 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import platform -import os -import httpx -import glob -import requests import cli_ui +import re from src.trackers.COMMON import COMMON from src.console import console +from src.trackers.UNIT3D import UNIT3D -class OTW(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class OTW(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='OTW') self.config = config + self.common = COMMON(config) self.tracker = 'OTW' self.source_flag = 'OTW' - self.upload_url = 'https://oldtoons.world/api/torrents/upload' - self.search_url = 'https://oldtoons.world/api/torrents/filter' - self.torrent_url = 'https://oldtoons.world/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://oldtoons.world/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [ - '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', - 'FRDS', 'FUM', 'GalaxyRG', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Lama', 'Leffe', 'LOAD', 'mHD', 'NhaNc3', 'nHD', 'NOIVTC', - 'nSD', 'PiRaTeS', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'STUTTERSHIT', 'Telly', 'TM', - 'UPiNSMOKE', 'WAF', 'xRed', 'XS', 'YELLO', 'YIFY', 'YTS', 'ZKBL', 'ZmN', '4f8c4100292', 'Azkars', 'Sync0rdi', - ['EVO', 'Raw Content Only'], ['TERMiNAL', 'Raw Content Only'], ['ViSION', 'Note the capitalization and characters used'], ['CMRG', 'Raw Content Only'] + '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', + 'AROMA', 'aXXo', 'CM8', 'CrEwSaDe', 'DeadFish', 'DNL', 'ELiTE', + 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', 'FUM', 'GalaxyRG', 'HAiKU', + 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'INFINITY', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', + 'LAMA', 'Leffe', 'LOAD', 'mHD', 'NhaNc3', 'nHD', 'NOIVTC', 'nSD', 'PiRaTeS', + 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', + 'SicFoI', 'SPASM', 'STUTTERSHIT', 'Telly', 'TM', 'UPiNSMOKE', 'WAF', 'xRed', + 'XS', 'YELLO', 'YIFY', 'YTS', 'ZKBL', 'ZmN', '4f8c4100292', 'Azkars', 'Sync0rdi' ] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + async def get_additional_checks(self, meta): + should_continue = True - async def get_type_id(self, type, meta): - if meta.get('is_disc') == "BDMV": - return '1' - elif meta.get('is_disc') and meta.get('is_disc') != "BDMV": - return '7' + if not any(genre in meta['combined_genres'] for genre in ['Animation', 'Family']): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print('[bold red]Genre does not match Animation or Family for OTW.') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + return False + else: + return False + + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy', 'hentai', 'adult animation', 'softcore'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print('[bold red]Adult animation not allowed at OTW.') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + return False + else: + return False + + if meta['type'] not in ['WEBDL'] and not meta['is_disc']: + if meta.get('tag', "") in ['CMRG', 'EVO', 'TERMiNAL', 'ViSION']: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]Group {meta["tag"]} is only allowed for raw type content at OTW[/bold red]') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + return False + else: + return False + + return should_continue + + async def get_type_id(self, meta, type=None, reverse=False, mapping_only=False): + type = meta['type'] + if meta.get('is_disc') == 'BDMV': + return {'type_id': '1'} + elif meta.get('is_disc') and meta.get('is_disc') != 'BDMV': + return {'type_id': '7'} + if type == "DVDRIP": + return {'type_id': '8'} type_id = { 'DISC': '1', 'REMUX': '2', @@ -56,214 +85,57 @@ async def get_type_id(self, type, meta): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id + } + if mapping_only: + return type_id + elif reverse: + return {v: k for k, v in type_id.items()} + elif type is not None: + return {'type_id': type_id.get(type, '0')} + else: + meta_type = meta.get('type', '') + resolved_id = type_id.get(meta_type, '0') + return {'type_id': resolved_id} - async def edit_name(self, meta): + async def get_name(self, meta): otw_name = meta['name'] source = meta['source'] resolution = meta['resolution'] aka = meta.get('aka', '') type = meta['type'] + video_codec = meta.get('video_codec', '') if aka: - otw_name = otw_name.replace(meta["aka"], '') - if meta['is_disc'] == "DVD": - otw_name = otw_name.replace(source, f"{source} {resolution}") - if meta['is_disc'] == "DVD" or type == "REMUX": - otw_name = otw_name.replace(meta['audio'], f"{meta.get('video_codec', '')} {meta['audio']}", 1) - elif meta['is_disc'] == "DVD" or (type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): + otw_name = otw_name.replace(f"{aka} ", '') + if meta['is_disc'] == "DVD" or (type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): otw_name = otw_name.replace((meta['source']), f"{resolution} {meta['source']}", 1) + otw_name = otw_name.replace((meta['audio']), f"{video_codec} {meta['audio']}", 1) if meta['category'] == "TV": years = [] tmdb_year = meta.get('year') if tmdb_year and str(tmdb_year).isdigit(): - years.append(int(tmdb_year)) - - imdb_year = meta.get('imdb_info', {}).get('year') - if imdb_year and str(imdb_year).isdigit(): - years.append(int(imdb_year)) - - series_year = meta.get('tvdb_episode_data', {}).get('series_year') - if series_year and str(series_year).isdigit(): - years.append(int(series_year)) - # Use the oldest year if any found, else empty string - year = str(min(years)) if years else "" + year = str(tmdb_year) + else: + if tmdb_year and str(tmdb_year).isdigit(): + years.append(int(tmdb_year)) + + imdb_year = meta.get('imdb_info', {}).get('year') + if imdb_year and str(imdb_year).isdigit(): + years.append(int(imdb_year)) + + series_year = meta.get('tvdb_episode_data', {}).get('series_year') + if series_year and str(series_year).isdigit(): + years.append(int(series_year)) + # Use the oldest year if any found, else empty string + year = str(min(years)) if years else "" if not meta.get('no_year', False) and not meta.get('search_year', ''): otw_name = otw_name.replace(meta['title'], f"{meta['title']} {year}", 1) - return otw_name - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - otw_name = await self.edit_name(meta) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - modq = await self.get_flag(meta, 'modq') - type_id = await self.get_type_id(meta['type'], meta) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 + return {'name': otw_name} - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + async def get_additional_data(self, meta): data = { - 'name': otw_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'mod_queue_opt_in': modq, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://oldtoons.world/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 - - return 1 if meta.get(flag_name, False) else 0 - - async def search_existing(self, meta, disctype): - if not any(genre in meta['genres'] for genre in ['Animation', 'Family']): - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - console.print('[bold red]Genre does not match Animation or Family for OTW.') - if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): - pass - else: - meta['skipping'] = "OTW" - return - else: - meta['skipping'] = "OTW" - return - disallowed_keywords = {'XXX', 'Erotic', 'Porn', 'Hentai', 'Adult Animation', 'Orgy', 'softcore'} - if any(keyword.lower() in disallowed_keywords for keyword in map(str.lower, meta['keywords'])): - if not meta['unattended']: - console.print('[bold red]Adult animation not allowed at OTW.') - meta['skipping'] = "OTW" - return [] - if meta['sd'] and 'BluRay' in meta['source']: - if not meta['unattended']: - console.print("[bold red]SD content from HD source not allowed") - meta['skipping'] = "OTW" - return [] - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type'], meta), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + 'mod_queue_opt_in': await self.get_flag(meta, 'modq'), } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - return dupes + return data diff --git a/src/trackers/PHD.py b/src/trackers/PHD.py new file mode 100644 index 000000000..201ac33c3 --- /dev/null +++ b/src/trackers/PHD.py @@ -0,0 +1,352 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +from datetime import datetime +from src.trackers.COMMON import COMMON +from src.trackers.AVISTAZ_NETWORK import AZTrackerBase + + +class PHD(AZTrackerBase): + def __init__(self, config): + super().__init__(config, tracker_name='PHD') + self.config = config + self.common = COMMON(config) + self.tracker = 'PHD' + self.source_flag = 'PrivateHD' + self.banned_groups = [ + "RARBG", "STUTTERSHIT", "LiGaS", "DDR", "Zeus", "TBS", "SWTYBLZ", "EASports", "C4K", "d3g", + "MeGusta", "YTS", "YIFY", "Tigole", "x0r", "nikt0", "NhaNc3", "PRoDJi", "RDN", "SANTi", + "FaNGDiNG0", "FRDS", "HD2DVD", "HDTime", "iPlanet", "KiNGDOM", "Leffe", "4K4U", "Xiaomi", + "VisionXpert", "WKS" + ] + self.base_url = 'https://privatehd.to/' + self.torrent_url = f'{self.base_url}/torrent/' + self.requests_url = f'{self.base_url}/requests' + + async def rules(self, meta): + warnings = [] + + is_bd_disc = False + if meta.get('is_disc', '') == 'BDMV': + is_bd_disc = True + + video_codec = meta.get('video_codec', '') + if video_codec: + video_codec = video_codec.strip().lower() + + video_encode = meta.get('video_encode', '') + if video_encode: + video_encode = video_encode.strip().lower() + + type = meta.get('type', '') + if type: + type = type.strip().lower() + + source = meta.get('source', '') + if source: + source = source.strip().lower() + + # This also checks the rule 'FANRES content is not allowed' + if meta['category'] not in ('MOVIE', 'TV'): + warnings.append( + 'The only allowed content to be uploaded are Movies and TV Shows.\n' + 'Anything else, like games, music, software and porn is not allowed!' + ) + + if meta.get('anime', False): + warnings.append("Upload Anime content to our sister site AnimeTorrents.me instead. If it's on AniDB, it's an anime.") + + year = meta.get('year') + current_year = datetime.now().year + is_older_than_50_years = (current_year - year) >= 50 + if is_older_than_50_years: + warnings.append('Upload movies/series 50+ years old to our sister site CinemaZ.to instead.') + + # https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes + + africa = [ + 'AO', 'BF', 'BI', 'BJ', 'BW', 'CD', 'CF', 'CG', 'CI', 'CM', 'CV', 'DJ', 'DZ', 'EG', 'EH', + 'ER', 'ET', 'GA', 'GH', 'GM', 'GN', 'GQ', 'GW', 'IO', 'KE', 'KM', 'LR', 'LS', 'LY', 'MA', + 'MG', 'ML', 'MR', 'MU', 'MW', 'MZ', 'NA', 'NE', 'NG', 'RE', 'RW', 'SC', 'SD', 'SH', 'SL', + 'SN', 'SO', 'SS', 'ST', 'SZ', 'TD', 'TF', 'TG', 'TN', 'TZ', 'UG', 'YT', 'ZA', 'ZM', 'ZW' + ] + + america = [ + 'AG', 'AI', 'AR', 'AW', 'BB', 'BL', 'BM', 'BO', 'BQ', 'BR', 'BS', 'BV', 'BZ', 'CA', 'CL', + 'CO', 'CR', 'CU', 'CW', 'DM', 'DO', 'EC', 'FK', 'GD', 'GF', 'GL', 'GP', 'GS', 'GT', 'GY', + 'HN', 'HT', 'JM', 'KN', 'KY', 'LC', 'MF', 'MQ', 'MS', 'MX', 'NI', 'PA', 'PE', 'PM', 'PR', + 'PY', 'SR', 'SV', 'SX', 'TC', 'TT', 'US', 'UY', 'VC', 'VE', 'VG', 'VI' + ] + + asia = [ + 'AE', 'AF', 'AM', 'AZ', 'BD', 'BH', 'BN', 'BT', 'CN', 'CY', 'GE', 'HK', 'ID', 'IL', 'IN', + 'IQ', 'IR', 'JO', 'JP', 'KG', 'KH', 'KP', 'KR', 'KW', 'KZ', 'LA', 'LB', 'LK', 'MM', 'MN', + 'MO', 'MV', 'MY', 'NP', 'OM', 'PH', 'PK', 'PS', 'QA', 'SA', 'SG', 'SY', 'TH', 'TJ', 'TL', + 'TM', 'TR', 'TW', 'UZ', 'VN', 'YE' + ] + + europe = [ + 'AD', 'AL', 'AT', 'AX', 'BA', 'BE', 'BG', 'BY', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', + 'FO', 'FR', 'GB', 'GG', 'GI', 'GR', 'HR', 'HU', 'IE', 'IM', 'IS', 'IT', 'JE', 'LI', 'LT', + 'LU', 'LV', 'MC', 'MD', 'ME', 'MK', 'MT', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'RU', 'SE', + 'SI', 'SJ', 'SK', 'SM', 'SU', 'UA', 'VA', 'XC' + ] + + oceania = [ + 'AS', 'AU', 'CC', 'CK', 'CX', 'FJ', 'FM', 'GU', 'HM', 'KI', 'MH', 'MP', 'NC', 'NF', 'NR', + 'NU', 'NZ', 'PF', 'PG', 'PN', 'PW', 'SB', 'TK', 'TO', 'TV', 'UM', 'VU', 'WF', 'WS' + ] + + phd_allowed_countries = [ + 'AG', 'AI', 'AU', 'BB', 'BM', 'BS', 'BZ', 'CA', 'CW', 'DM', 'GB', 'GD', 'IE', + 'JM', 'KN', 'KY', 'LC', 'MS', 'NZ', 'PR', 'TC', 'TT', 'US', 'VC', 'VG', 'VI', + ] + + all_countries = africa + america + europe + oceania + cinemaz_countries = list(set(all_countries) - set(phd_allowed_countries)) + + origin_countries_codes = meta.get('origin_country', []) + + if any(code in phd_allowed_countries for code in origin_countries_codes): + pass + + # CinemaZ + elif any(code in cinemaz_countries for code in origin_countries_codes): + warnings.append('Upload European (EXCLUDING United Kingdom and Ireland), South American and African content to our sister site CinemaZ.to instead.') + + # AvistaZ + elif any(code in asia for code in origin_countries_codes): + origin_country_str = ', '.join(origin_countries_codes) + warnings.append( + 'DO NOT upload content originating from countries shown in this map (https://imgur.com/nIB9PM1).\n' + 'In case of doubt, message the staff first. Upload Asian content to our sister site Avistaz.to instead.\n' + f'Origin country for your upload: {origin_country_str}' + ) + + elif not any(code in phd_allowed_countries for code in origin_countries_codes): + warnings.append( + 'Only upload content to PrivateHD from all major English speaking countries.\n' + 'Including United States, Canada, UK, Ireland, Australia, and New Zealand.' + ) + + # Tags + tag = meta.get('tag', '') + if tag: + tag = tag.strip().lower() + if tag in ('rarbg', 'fgt', 'grym', 'tbs'): + warnings.append('Do not upload RARBG, FGT, Grym or TBS. Existing uploads by these groups can be trumped at any time.') + + if tag == 'evo' and source != 'web': + warnings.append('Do not upload non-web EVO releases. Existing uploads by this group can be trumped at any time.') + + if meta.get('sd', '') == 1: + warnings.append('SD (Standard Definition) content is forbidden.') + + if not is_bd_disc: + if meta.get('container') not in ['mkv', 'mp4']: + warnings.append('Allowed containers: MKV, MP4.') + + # Video codec + # 1 + if type == 'remux': + if video_codec not in ('mpeg-2', 'vc-1', 'h.264', 'h.265', 'avc'): + warnings.append('Allowed Video Codecs for BluRay (Untouched + REMUX): MPEG-2, VC-1, H.264, H.265') + + # 2 + if type == 'encode' and source == 'bluray': + if video_encode not in ('h.264', 'h.265', 'x264', 'x265'): + warnings.append('Allowed Video Codecs for BluRay (Encoded): H.264, H.265 (x264 and x265 respectively are the only permitted encoders)') + + # 3 + if type in ('webdl', 'web-dl') and source == 'web': + if video_encode not in ('h.264', 'h.265', 'vp9'): + warnings.append('Allowed Video Codecs for WEB (Untouched): H.264, H.265, VP9') + + # 4 + if type == 'encode' and source == 'web': + if video_encode not in ('h.264', 'h.265', 'x264', 'x265'): + warnings.append('Allowed Video Codecs for WEB (Encoded): H.264, H.265 (x264 and x265 respectively are the only permitted encoders)') + + # 5 + if type == 'encode': + if video_encode == 'x265': + if meta.get('bit_depth', '') != '10': + warnings.append('Allowed Video Codecs for x265 encodes must be 10-bit') + + # 6 + resolution = int(meta.get('resolution').lower().replace('p', '').replace('i', '')) + if resolution > 1080: + if video_encode in ('h.264', 'x264'): + warnings.append('H.264/x264 only allowed for 1080p and below.') + + # 7 + if video_codec not in ('avc', 'mpeg-2', 'vc-1', 'avc', 'h.264', 'vp9', 'h.265', 'x264', 'x265', 'hevc'): + warnings.append(f'Video codec not allowed in your upload: {video_codec}.') + + # Audio codec + if is_bd_disc: + pass + else: + # 1 + allowed_keywords = ['AC3', 'Dolby Digital', 'Dolby TrueHD', 'DTS', 'DTS-HD', 'FLAC', 'AAC', 'Dolby'] + + # 2 + forbidden_keywords = ['LPCM', 'PCM', 'Linear PCM'] + + audio_tracks = [] + media_tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) + for track in media_tracks: + if track.get('@type') == 'Audio': + codec_info = track.get('Format_Commercial_IfAny') + codec = codec_info if isinstance(codec_info, str) else '' + audio_tracks.append({ + 'codec': codec, + 'language': track.get('Language', '') + }) + + # 3 + original_language = meta.get('original_language', '') + language_track = track.get('language', '') + if original_language and language_track: + # Filter to only have audio tracks that are in the original language + original_language_tracks = [ + track for track in audio_tracks if track.get('language', '').lower() == original_language.lower() + ] + + # Now checks are only done on the original language track list + if original_language_tracks: + has_truehd_atmos = any( + 'truehd' in track['codec'].lower() and 'atmos' in track['codec'].lower() + for track in original_language_tracks + ) + + # Check if there is an AC-3 compatibility track in the same language + has_ac3_compat_track = any( + 'ac-3' in track['codec'].lower() or 'dolby digital' in track['codec'].lower() + for track in original_language_tracks + ) + + if has_truehd_atmos and not has_ac3_compat_track: + warnings.append( + f'A TrueHD Atmos track was detected in the original language ({original_language}), ' + f'but no AC-3 (Dolby Digital) compatibility track was found for that same language.\n' + 'Rule: TrueHD/Atmos audio must have a compatibility track due to poor compatibility with most players.' + ) + + # 4 + invalid_codecs = [] + for track in audio_tracks: + codec = track['codec'] + if not codec: + continue + + is_forbidden = any(kw.lower() in codec.lower() for kw in forbidden_keywords) + if is_forbidden: + invalid_codecs.append(codec) + continue + + is_allowed = any(kw.lower() in codec.lower() for kw in allowed_keywords) + if not is_allowed: + invalid_codecs.append(codec) + + if invalid_codecs: + unique_invalid_codecs = sorted(list(set(invalid_codecs))) + warnings.append( + f"Unallowed audio codec(s) detected: {', '.join(unique_invalid_codecs)}\n" + f'Allowed codecs: AC3 (Dolby Digital), Dolby TrueHD, DTS, DTS-HD (MA), FLAC, AAC, all other Dolby codecs.\n' + f'Dolby Exceptions: Any uncompressed audio codec that comes on a BluRay disc like; PCM, LPCM, etc.' + ) + + # Quality check + BITRATE_RULES = { + ('x265', 'web', 720): 1500000, + ('x265', 'web', 1080): 2500000, + ('x265', 'bluray', 720): 2000000, + ('x265', 'bluray', 1080): 3500000, + + ('x264', 'web', 720): 2500000, + ('x264', 'web', 1080): 4500000, + ('x264', 'bluray', 720): 3500000, + ('x264', 'bluray', 1080): 6000000, + } + + WEB_SOURCES = ('hdtv', 'web', 'hdrip') + + if type == 'encode': + bitrate = 0 + for track in media_tracks: + if track.get('@type') == 'Video': + bitrate = int(track.get('BitRate')) + break + + source_type = None + if source in WEB_SOURCES: + source_type = 'web' + elif source == 'bluray': + source_type = 'bluray' + + if source_type: + rule_key = (video_encode, source_type, resolution) + + if rule_key in BITRATE_RULES: + min_bitrate = BITRATE_RULES[rule_key] + + if bitrate < min_bitrate: + quality_rule_text = ( + 'Only upload proper encodes.\n' + 'Any encodes where the size and/or the bitrate imply a bad quality will be deleted.' + ) + rule = ( + f'Your upload was rejected due to low quality.\n' + f'Minimum bitrate for {resolution}p {source.upper()} {video_encode.upper()} is {min_bitrate / 1000} Kbps.' + ) + warnings.append(quality_rule_text + rule) + + if resolution < 720: + rule = 'Video must be at least 720p.' + warnings.append(rule) + + # Hybrid + if type in ('remux', 'encode'): + if 'hybrid' in meta.get('name', '').lower(): + warnings.append( + 'Hybrid Remuxes and Encodes are subject to the following condition:\n\n' + 'Hybrid user releases are permitted, but are treated similarly to regular ' + 'user releases and must be approved by staff before you upload them ' + '(please see the torrent approvals forum for details).' + ) + + # Log + if type == 'remux': + warnings.append( + 'Remuxes must have a demux/eac3to log under spoilers in description.\n' + 'Do you have these logs and will you add them to the description after upload?' + ) + + # Bloated + if meta.get('bloated', False): + warnings.append( + 'Audio dubs are never preferred and can always be trumped by original audio only rip (Exception for BD50/BD25).\n' + 'Do NOT upload a multi audio release when there is already a original audio only release on site.\n' + ) + + if warnings: + all_warnings = '\n\n'.join(filter(None, warnings)) + return all_warnings + + return + + def get_rip_type(self, meta): + source_type = meta.get('type') + + keyword_map = { + 'bdrip': '1', + 'encode': '2', + 'disc': '3', + 'hdrip': '6', + 'hdtv': '7', + 'webdl': '12', + 'webrip': '13', + 'remux': '14', + } + + return keyword_map.get(source_type.lower()) diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py deleted file mode 100644 index 94d968ad4..000000000 --- a/src/trackers/PSS.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import re -import os -import glob -import httpx -from src.trackers.COMMON import COMMON -from src.console import console - - -class PSS(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - - def __init__(self, config): - self.config = config - self.tracker = 'PSS' - self.source_flag = 'PSS' - self.upload_url = 'https://privatesilverscreen.cc/api/torrents/upload' - self.search_url = 'https://privatesilverscreen.cc/api/torrents/filter' - self.torrent_url = 'https://privatesilverscreen.cc/torrents/' - self.signature = '\n[center][url=https://privatesilverscreen.cc/pages/1]Please Seed[/url][/center]' - self.banned_groups = ['AROMA', 'd3g', 'EVO', 'FGT', 'NeXus', 'LAMA', 'MeGusta', 'RARBG', 'STUTTERSHIT', 'TSP', 'TSPxL', 'Will1869', 'x0r', 'YIFY', 'core', 'ZMNT', 'iPlanet', 'STC', - 'msd', 'nikt0', 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'Leffe', 'mHD', 'nHD', 'nSD', 'NhaNc3', 'PRODJi', 'C4K', - 'RDN', 'SANTi', 'ViSION', 'WAF', 'YTS', 'FROZEN', 'UTR', 'Grym', 'GrymLegacy', 'ProRes', 'MezRips', 'GalaxyRG', 'RCDiVX', 'LycanHD', '$andra', 'AR', 'KiNGDOM'] - pass - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def edit_name(self, meta): - pss_name = meta['name'] - tag_lower = meta['tag'].lower() - invalid_tags = ["nogrp", "nogroup", "NoGroup", "unknown", "-unk-"] - - if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): - for invalid_tag in invalid_tags: - pss_name = re.sub(f"-{invalid_tag}", "", pss_name, flags=re.IGNORECASE) - pss_name = f"{pss_name}-NOGROUP" - - return pss_name - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - pss_name = await self.edit_name(meta) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': pss_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://privatesilverscreen.cc/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/PT.py b/src/trackers/PT.py index c1b01fa46..424f9383a 100644 --- a/src/trackers/PT.py +++ b/src/trackers/PT.py @@ -1,35 +1,27 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import httpx -import re import os +import re from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class PT(): +class PT(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='PT') self.config = config + self.common = COMMON(config) self.tracker = 'PT' self.source_flag = 'Portugas' - self.upload_url = 'https://portugas.org/api/torrents/upload' - self.search_url = 'https://portugas.org/api/torrents/filter' - self.torrent_url = 'https://portugas.org/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.base_url = 'https://portugas.org/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): + async def get_type_id(self, meta): type_id = { 'DISC': '1', 'REMUX': '2', @@ -37,10 +29,10 @@ async def get_type_id(self, type): 'WEBRIP': '39', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') - return type_id + }.get(meta['type'], '0') + return {'type_id': type_id} - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta): resolution_id = { '4320p': '1', '2160p': '2', @@ -53,10 +45,10 @@ async def get_res_id(self, resolution): '540p': '11', '480p': '8', '480i': '9' - }.get(resolution, '10') - return resolution_id + }.get(meta['resolution'], '10') + return {'resolution_id': resolution_id} - async def edit_name(self, meta): + async def get_name(self, meta): name = meta['name'].replace(' ', '.') pt_name = name @@ -68,7 +60,7 @@ async def edit_name(self, meta): pt_name = re.sub(f"-{invalid_tag}", "", pt_name, flags=re.IGNORECASE) pt_name = f"{pt_name}-NOGROUP" - return pt_name + return {'name': pt_name} def get_audio(self, meta): found_portuguese_audio = False @@ -99,13 +91,16 @@ def get_audio(self, meta): audio_sections = re.findall(r'Audio(?: #\d+)?\s*\n(.*?)(?=\n\n(?:Audio|Video|Text|Menu)|$)', media_info_text, re.DOTALL | re.IGNORECASE) for section in audio_sections: language_match = re.search(r'Language\s*:\s*(.+)', section, re.IGNORECASE) - if language_match: - lang_raw = language_match.group(1).strip() - # Clean "Portuguese (Brazil)" variation. - lang_clean = re.sub(r'[/\\].*|\(.*?\)', '', lang_raw).strip() - if lang_clean.lower() == "portuguese": - found_portuguese_audio = True - break + title_match = re.search(r'Title\s*:\s*(.+)', section, re.IGNORECASE) + + lang_raw = language_match.group(1).strip() if language_match else "" + title_raw = title_match.group(1).strip() if title_match else "" + + text = f'{lang_raw} {title_raw}'.lower() + + if "portuguese" in text and not any(keyword in text for keyword in ["(br)", "brazilian"]): + found_portuguese_audio = True + break except FileNotFoundError: pass @@ -146,13 +141,16 @@ def get_subtitles(self, meta): for section in text_sections: language_match = re.search(r'Language\s*:\s*(.+)', section, re.IGNORECASE) - if language_match: - lang_raw = language_match.group(1).strip() - # Clean "Portuguese (Brazil)" variation. - lang_clean = re.sub(r'[/\\].*|\(.*?\)', '', lang_raw).strip() - if lang_clean.lower() == "portuguese": - found_portuguese_subtitle = True - break + title_match = re.search(r'Title\s*:\s*(.+)', section, re.IGNORECASE) + + lang_raw = language_match.group(1).strip() if language_match else "" + title_raw = title_match.group(1).strip() if title_match else "" + + text = f'{lang_raw} {title_raw}'.lower() + + if "portuguese" in text and not any(keyword in text for keyword in ["(br)", "brazilian"]): + found_portuguese_subtitle = True + break except FileNotFoundError: pass @@ -161,124 +159,19 @@ def get_subtitles(self, meta): return 1 if found_portuguese_subtitle else 0 - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - pt_name = await self.edit_name(meta) + async def get_distributor_ids(self, meta): + return {} + + async def get_region_id(self, meta): + return {} + + async def get_additional_data(self, meta): audio_flag = self.get_audio(meta) subtitle_flag = self.get_subtitles(meta) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - # region_id = await common.unit3d_region_ids(meta.get('region')) - # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} + data = { - 'name': pt_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, 'audio_pt': audio_flag, 'legenda_pt': subtitle_flag, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - # if region_id != 0: - # data['region_id'] = region_id - # if distributor_id != 0: - # data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + return data diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index 6fdb6ac3d..d1461123f 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 from bs4 import BeautifulSoup import requests import re diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 84bdd9e0a..540802e32 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -1,25 +1,32 @@ -import cli_ui -import requests +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles +import aiofiles.os import asyncio -import re -import os -from pathlib import Path -import json +import cli_ui +import click import glob +import httpx +import json import platform import pickle -import click -import httpx +import os +import re +import requests + +from pathlib import Path from pymediainfo import MediaInfo -from src.trackers.COMMON import COMMON +from torf import Torrent + +from cogs.redaction import redact_private_info +from data.config import config from src.bbcode import BBCODE -from src.exceptions import * # noqa F403 from src.console import console -from torf import Torrent -from datetime import datetime +from src.exceptions import * # noqa F403 +from src.rehostimages import check_hosts from src.takescreens import disc_screenshots, dvd_screenshots, screenshots +from src.torrentcreate import create_torrent +from src.trackers.COMMON import COMMON from src.uploadscreens import upload_screens -from src.torrentcreate import CustomTorrent, torf_cb, create_torrent class PTP(): @@ -30,14 +37,17 @@ def __init__(self, config): self.source_flag = 'PTP' self.api_user = config['TRACKERS']['PTP'].get('ApiUser', '').strip() self.api_key = config['TRACKERS']['PTP'].get('ApiKey', '').strip() - self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() + announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() + if announce_url: + self.announce_url = announce_url.replace('http://', 'https://') if announce_url.startswith('http://') else announce_url self.username = config['TRACKERS']['PTP'].get('username', '').strip() self.password = config['TRACKERS']['PTP'].get('password', '').strip() self.web_source = self._is_true(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True)) - self.user_agent = f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + self.user_agent = f'Upload Assistant/2.3 ({platform.system()} {platform.release()})' self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'x0r', 'YIFY', 'LAMA', 'WORLD'] + self.approved_image_hosts = ['ptpimg', 'pixhost'] self.sub_lang_map = { ("Arabic", "ara", "ar"): 22, @@ -260,7 +270,51 @@ async def get_group_by_imdb(self, imdb): await asyncio.sleep(1) try: response = response.json() - if response.get("Page") == "Browse": # No Releases on Site with ID + if response.get('TotalResults'): # Search results page + total_results = int(response.get('TotalResults', 0)) + if total_results == 0: + console.print(f"[yellow]No results found for IMDb: tt{imdb}[/yellow]") + return None + elif total_results == 1: + # Single result - use it + movie = response.get('Movies', [{}])[0] + groupID = movie.get('GroupId') + title = movie.get('Title', 'Unknown') + year = movie.get('Year', 'Unknown') + console.print(f"[green]Found single match for IMDb: [yellow]tt{imdb}[/yellow] -> Group ID: [yellow]{groupID}[/yellow][/green]") + console.print(f"[green]Title: [yellow]{title}[/yellow] ([yellow]{year}[/yellow])") + return groupID + else: + # Multiple results - let user choose + console.print(f"[yellow]Found {total_results} matches for IMDb: tt{imdb}[/yellow]") + movies = response.get('Movies', []) + choices = [] + for i, movie in enumerate(movies): + title = movie.get('Title', 'Unknown') + year = movie.get('Year', 'Unknown') + group_id = movie.get('GroupId', 'Unknown') + choice_text = f"{title} ({year}) - Group ID: {group_id}" + choices.append(choice_text) + + choices.append("Skip - Don't use any of these matches") + + try: + selected = cli_ui.ask_choice("Select the correct movie:", choices=choices) + if selected == "Skip - Don't use any of these matches": + console.print("[yellow]User chose to skip all matches[/yellow]") + return None + + selected_index = choices.index(selected) + selected_movie = movies[selected_index] + groupID = selected_movie.get('GroupId') + + console.print(f"[green]User selected: Group ID [yellow]{groupID}[/yellow][/green]") + return groupID + + except (KeyboardInterrupt, cli_ui.Interrupted): + console.print("[yellow]Selection cancelled by user[/yellow]") + return None + elif response.get("Page") == "Browse": # No Releases on Site with ID return None elif response.get('Page') == "Details": # Group Found groupID = response.get('GroupId') @@ -268,7 +322,7 @@ async def get_group_by_imdb(self, imdb): console.print(f"[green]Title: [yellow]{response.get('Name')}[/yellow] ([yellow]{response.get('Year')}[/yellow])") return groupID except Exception: - console.print("[red]An error has occured trying to find a group ID") + console.print("[red]An error has occurred trying to find a group ID") console.print("[red]Please check that the site is online and your ApiUser/ApiKey values are correct") return None @@ -471,7 +525,7 @@ def get_codec(self, meta): def get_resolution(self, meta): other_res = None res = meta.get('resolution', "OTHER") - if (res == "OTHER" and meta['is_disc'] != "BDMV") or (meta['sd'] == 1 and meta['type'] == "WEBDL"): + if (res == "OTHER" and meta['is_disc'] != "BDMV") or (meta['sd'] == 1 and meta['type'] == "WEBDL") or (meta['sd'] == 1 and meta['type'] == "DVDRIP"): video_mi = meta['mediainfo']['media']['track'][1] other_res = f"{video_mi['Width']}x{video_mi['Height']}" res = "Other" @@ -669,9 +723,39 @@ def convert_bbcode(self, desc): desc = re.sub(r"\[img=[^\]]+\]", "[img]", desc) return desc + async def check_image_hosts(self, meta): + url_host_mapping = { + "ptpimg.me": "ptpimg", + "pixhost.to": "pixhost", + } + + await check_hosts(meta, self.tracker, url_host_mapping=url_host_mapping, img_host_index=1, approved_image_hosts=self.approved_image_hosts) + return + async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() + if meta.get('scene_nfo_file', None): + # Remove NFO from description + meta_description = re.sub( + r"\[center\]\[spoiler=.*? NFO:\]\[code\](.*?)\[/code\]\[/spoiler\]\[/center\]", + "", + base, + flags=re.DOTALL, + ) + base = meta_description multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) + if multi_screens < 2: + multi_screens = 2 + console.print("[yellow]PTP requires at least 2 screenshots for multi disc/file content, overriding config") + + if not meta.get('skip_imghost_upload', False): + if 'PTP_images_key' in meta: + image_list = meta['PTP_images_key'] + else: + image_list = meta['image_list'] + else: + image_list = [] + images = image_list # Check for saved pack_image_links.json file pack_images_file = os.path.join(meta['base_dir'], "tmp", meta['uuid'], "pack_image_links.json") @@ -680,14 +764,60 @@ async def edit_desc(self, meta): try: with open(pack_images_file, 'r', encoding='utf-8') as f: pack_images_data = json.load(f) - if meta['debug']: - console.print(f"[green]Loaded previously uploaded images from {pack_images_file}") - console.print(f"[blue]Found {pack_images_data.get('total_count', 0)} previously uploaded images") + + # Filter out keys with non-approved image hosts + keys_to_remove = [] + for key_name, key_data in pack_images_data.get('keys', {}).items(): + images_to_keep = [] + for img in key_data.get('images', []): + raw_url = img.get('raw_url', '') + # Extract hostname from URL (e.g., ptpimg.me -> ptpimg) + try: + import urllib.parse + parsed_url = urllib.parse.urlparse(raw_url) + hostname = parsed_url.netloc + # Get the main domain name (first part before the dot) + host_key = hostname.split('.')[0] if hostname else '' + + if host_key in self.approved_image_hosts: + images_to_keep.append(img) + elif meta['debug']: + console.print(f"[yellow]Filtering out image from non-approved host: {hostname}[/yellow]") + except Exception: + # If URL parsing fails, skip this image + if meta['debug']: + console.print(f"[yellow]Could not parse URL: {raw_url}[/yellow]") + continue + + if images_to_keep: + # Update the key with only approved images + pack_images_data['keys'][key_name]['images'] = images_to_keep + pack_images_data['keys'][key_name]['count'] = len(images_to_keep) + else: + # Mark key for removal if no approved images + keys_to_remove.append(key_name) + + # Remove keys with no approved images + for key_name in keys_to_remove: + del pack_images_data['keys'][key_name] + if meta['debug']: + console.print(f"[yellow]Removed key '{key_name}' - no approved image hosts[/yellow]") + + # Recalculate total count + pack_images_data['total_count'] = sum(key_data['count'] for key_data in pack_images_data.get('keys', {}).values()) + + if pack_images_data.get('total_count', 0) < 3: + pack_images_data = {} # Invalidate if less than 3 images total + if meta['debug']: + console.print("[yellow]Invalidating pack images - less than 3 approved images total[/yellow]") + else: + if meta['debug']: + console.print(f"[green]Loaded previously uploaded images from {pack_images_file}") + console.print(f"[blue]Found {pack_images_data.get('total_count', 0)} approved images across {len(pack_images_data.get('keys', {}))} keys[/blue]") except Exception as e: console.print(f"[yellow]Warning: Could not load pack image data: {str(e)}[/yellow]") with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: - images = meta['image_list'] discs = meta.get('discs', []) filelist = meta.get('filelist', []) @@ -707,8 +837,16 @@ async def edit_desc(self, meta): if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") + try: + if meta.get('tonemapped', False) and self.config['DEFAULT'].get('tonemapped_header', None): + tonemapped_header = self.config['DEFAULT'].get('tonemapped_header') + tonemapped_header = self.convert_bbcode(tonemapped_header) + desc.write(tonemapped_header) + desc.write("\n\n") + except Exception as e: + console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") for img_index in range(len(images[:int(meta['screens'])])): - raw_url = meta['image_list'][img_index]['raw_url'] + raw_url = image_list[img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") elif each['type'] == "DVD": @@ -720,7 +858,7 @@ async def edit_desc(self, meta): desc.write(base2ptp) desc.write("\n\n") for img_index in range(len(images[:int(meta['screens'])])): - raw_url = meta['image_list'][img_index]['raw_url'] + raw_url = image_list[img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") if len(bdinfo_keys) > 1: @@ -775,7 +913,7 @@ async def edit_desc(self, meta): print(f"Error during BDMV screenshot capture: {e}") new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"PLAYLIST_{i}-*.png") if new_screens and not meta.get('skip_imghost_upload', False): - uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}) + uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}, allowed_hosts=self.approved_image_hosts) if uploaded_images and not meta.get('skip_imghost_upload', False): await self.save_image_links(meta, new_images_key, uploaded_images) for img in uploaded_images: @@ -797,9 +935,6 @@ async def edit_desc(self, meta): elif len(discs) > 1: if 'retry_count' not in meta: meta['retry_count'] = 0 - if multi_screens < 2: - multi_screens = 2 - console.print("[yellow]PTP requires at least 2 screenshots for multi disc content, overriding config") for i, each in enumerate(discs): new_images_key = f'new_images_disc_{i}' if each['type'] == "BDMV": @@ -809,8 +944,16 @@ async def edit_desc(self, meta): if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") - for img_index in range(min(multi_screens, len(meta['image_list']))): - raw_url = meta['image_list'][img_index]['raw_url'] + try: + if meta.get('tonemapped', False) and self.config['DEFAULT'].get('tonemapped_header', None): + tonemapped_header = self.config['DEFAULT'].get('tonemapped_header') + tonemapped_header = self.convert_bbcode(tonemapped_header) + desc.write(tonemapped_header) + desc.write("\n\n") + except Exception as e: + console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") + for img_index in range(min(multi_screens, len(image_list))): + raw_url = image_list[img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") else: @@ -849,7 +992,7 @@ async def edit_desc(self, meta): print(f"Error during BDMV screenshot capture: {e}") new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if new_screens and not meta.get('skip_imghost_upload', False): - uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}) + uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}, allowed_hosts=self.approved_image_hosts) if uploaded_images and not meta.get('skip_imghost_upload', False): await self.save_image_links(meta, new_images_key, uploaded_images) for img in uploaded_images: @@ -875,8 +1018,8 @@ async def edit_desc(self, meta): if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") - for img_index in range(min(multi_screens, len(meta['image_list']))): - raw_url = meta['image_list'][img_index]['raw_url'] + for img_index in range(min(multi_screens, len(image_list))): + raw_url = image_list[img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") else: @@ -919,7 +1062,7 @@ async def edit_desc(self, meta): print(f"Error during DVD screenshot capture: {e}") new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") if new_screens and not meta.get('skip_imghost_upload', False): - uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}) + uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}, allowed_hosts=self.approved_image_hosts) if uploaded_images and not meta.get('skip_imghost_upload', False): await self.save_image_links(meta, new_images_key, uploaded_images) for img in uploaded_images: @@ -971,16 +1114,22 @@ async def edit_desc(self, meta): desc.write("[/comparison]\n\n") + try: + if meta.get('tonemapped', False) and self.config['DEFAULT'].get('tonemapped_header', None): + tonemapped_header = self.config['DEFAULT'].get('tonemapped_header') + tonemapped_header = self.convert_bbcode(tonemapped_header) + desc.write(tonemapped_header) + desc.write("\n\n") + except Exception as e: + console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") + for img_index in range(len(images[:int(meta['screens'])])): - raw_url = meta['image_list'][img_index]['raw_url'] + raw_url = image_list[img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") # Handle multiple files case elif len(filelist) > 1: - if multi_screens < 2: - multi_screens = 2 - console.print("[yellow]PTP requires at least 2 screenshots for multi disc/file content, overriding config") for i in range(len(filelist)): file = filelist[i] if i == 0: @@ -992,8 +1141,16 @@ async def edit_desc(self, meta): desc.write("\n\n") mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") - for img_index in range(min(multi_screens, len(meta['image_list']))): - raw_url = meta['image_list'][img_index]['raw_url'] + try: + if meta.get('tonemapped', False) and self.config['DEFAULT'].get('tonemapped_header', None): + tonemapped_header = self.config['DEFAULT'].get('tonemapped_header') + tonemapped_header = self.convert_bbcode(tonemapped_header) + desc.write(tonemapped_header) + desc.write("\n\n") + except Exception as e: + console.print(f"[yellow]Warning: Error setting tonemapped header: {str(e)}[/yellow]") + for img_index in range(min(multi_screens, len(image_list))): + raw_url = image_list[img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") else: @@ -1034,7 +1191,7 @@ async def edit_desc(self, meta): print(f"Error during generic screenshot capture: {e}") new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if new_screens and not meta.get('skip_imghost_upload', False): - uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}) + uploaded_images, _ = await upload_screens(meta, multi_screens, 1, 0, multi_screens, new_screens, {new_images_key: meta[new_images_key]}, allowed_hosts=self.approved_image_hosts) if uploaded_images and not meta.get('skip_imghost_upload', False): await self.save_image_links(meta, new_images_key, uploaded_images) for img in uploaded_images: @@ -1140,7 +1297,7 @@ async def get_AntiCsrfToken(self, meta): resp = loginresponse.json() if resp['Result'] == "TfaRequired": data['TfaType'] = "normal" - data['TfaCode'] = cli_ui.ask_string("2FA Required: Please enter 2FA code") + data['TfaCode'] = cli_ui.ask_string("2FA Required: Please enter PTP 2FA code") loginresponse = session.post("https://passthepopcorn.me/ajax.php?action=login", data=data, headers=headers) await asyncio.sleep(2) resp = loginresponse.json() @@ -1263,6 +1420,8 @@ async def fill_upload_form(self, groupID, meta): } if data["remaster_year"] != "" or data["remaster_title"] != "": data["remaster"] = "on" + if meta.get('scene', False) is True: + data["scene"] = "on" if resolution == "Other": data["other_resolution"] = other_resolution if meta.get('personalrelease', False) is True: @@ -1273,7 +1432,6 @@ async def fill_upload_form(self, groupID, meta): data["imdb"] = "0" else: data["imdb"] = str(meta["imdb_id"]).zfill(7) - if groupID is None: # If need to make new group url = "https://passthepopcorn.me/upload.php" if data["imdb"] == '0': @@ -1306,7 +1464,7 @@ async def fill_upload_form(self, groupID, meta): if meta.get('mode', 'discord') == 'cli': console.print('[yellow]Unable to match any tags') console.print("Valid tags can be found on the PTP upload form") - new_data["tags"] = console.input("Please enter at least one tag. Comma seperated (action, animation, short):") + new_data["tags"] = console.input("Please enter at least one tag. Comma separated (action, animation, short):") data.update(new_data) if meta["imdb_info"].get("directors", None) is not None: data["artist[]"] = tuple(meta['imdb_info'].get('directors')) @@ -1318,56 +1476,26 @@ async def fill_upload_form(self, groupID, meta): return url, data async def upload(self, meta, url, data, disctype): - torrent_filename = f"[{self.tracker}].torrent" - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}" - torrent = Torrent.read(torrent_path) + common = COMMON(config=self.config) + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + if not await aiofiles.os.path.exists(torrent_file_path): + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename="BASE") + + loop = asyncio.get_running_loop() + torrent = await loop.run_in_executor(None, Torrent.read, torrent_file_path) # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed if torrent.piece_size > 16777216: # 16 MiB in bytes console.print("[red]Piece size is OVER 16M and does not work on PTP. Generating a new .torrent") - if meta.get('mkbrr', False): - from data.config import config - common = COMMON(config=self.config) - tracker_url = config['TRACKERS']['PTP'].get('announce_url', "https://fake.tracker/").strip() + tracker_url = config['TRACKERS']['PTP'].get('announce_url', "https://fake.tracker/").strip() + meta['max_piece_size'] = '16' + torrent_create = f"[{self.tracker}]" - # Create the torrent with the tracker URL - torrent_create = f"[{self.tracker}]" - create_torrent(meta, meta['path'], torrent_create, tracker_url=tracker_url) - torrent_filename = "[PTP]" - - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - else: - if meta['is_disc']: - include = [] - exclude = [] - else: - include = ["*.mkv", "*.mp4", "*.ts"] - exclude = ["*.*", "*sample.mkv", "!sample*.*"] - - new_torrent = CustomTorrent( - meta=meta, - path=Path(meta['path']), - trackers=[self.announce_url], - source="Audionut", - private=True, - exclude_globs=exclude, # Ensure this is always a list - include_globs=include, # Ensure this is always a list - creation_date=datetime.now(), - comment="Created by Audionut's Upload Assistant", - created_by="Audionut's Upload Assistant" - ) - - # Explicitly set the piece size and update metainfo - new_torrent.piece_size = 16777216 # 16 MiB in bytes - new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set - - # Validate and write the new torrent - new_torrent.validate_piece_size() - new_torrent.generate(callback=torf_cb, interval=5) - new_torrent.write(torrent_path, overwrite=True) + create_torrent(meta, meta['path'], torrent_create, tracker_url=tracker_url) + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_create) # Proceed with the upload process - with open(torrent_path, 'rb') as torrentFile: + with open(torrent_file_path, 'rb') as torrentFile: files = { "file_input": ("placeholder.torrent", torrentFile, "application/x-bittorent") } @@ -1382,9 +1510,10 @@ async def upload(self, meta, url, data, disctype): if 'AntiCsrfToken' in debug_data: debug_data['AntiCsrfToken'] = '[REDACTED]' console.log(url) - console.log(debug_data) + console.log(redact_private_info(debug_data)) meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." else: + failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]PTP_upload_failure.html" with requests.Session() as session: cookiefile = f"{meta['base_dir']}/data/cookies/PTP.pickle" with open(cookiefile, 'rb') as cf: @@ -1400,14 +1529,16 @@ async def upload(self, meta, url, data, disctype): if match is not None: errorMessage = match.group(1) - raise UploadException(f"Upload to PTP failed: {errorMessage} ({response.status_code}). (We are still on the upload page.)") # noqa F405 + with open(failure_path, 'w', encoding='utf-8') as f: + f.write(responsetext) + meta['tracker_status'][self.tracker]['status_message'] = f"data error: see {failure_path} | {errorMessage}" # URL format in case of successful upload: https://passthepopcorn.me/torrents.php?id=9329&torrentid=91868 match = re.match(r".*?passthepopcorn\.me/torrents\.php\?id=(\d+)&torrentid=(\d+)", response.url) if match is None: - console.print(url) - console.print(data) - raise UploadException(f"Upload to PTP failed: result URL {response.url} ({response.status_code}) is not the expected one.") # noqa F405 + with open(failure_path, 'w', encoding='utf-8') as f: + f.write(responsetext) + meta['tracker_status'][self.tracker]['status_message'] = f"data error: see {failure_path}" # having UA add the torrent link as a comment. if match: diff --git a/src/trackers/PTS.py b/src/trackers/PTS.py new file mode 100644 index 000000000..fc1cc2d36 --- /dev/null +++ b/src/trackers/PTS.py @@ -0,0 +1,210 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import httpx +import os +import platform +import re +from bs4 import BeautifulSoup +from pymediainfo import MediaInfo +from src.console import console +from src.cookie_auth import CookieValidator, CookieAuthUploader +from src.trackers.COMMON import COMMON + + +class PTS: + def __init__(self, config): + self.config = config + self.common = COMMON(config) + self.cookie_validator = CookieValidator(config) + self.cookie_auth_uploader = CookieAuthUploader(config) + self.tracker = "PTS" + self.banned_groups = [] + self.source_flag = "[www.ptskit.org] PTSKIT" + self.base_url = "https://www.ptskit.org/" + self.torrent_url = "https://www.ptskit.org/details.php?id=" + self.announce = self.config['TRACKERS'][self.tracker]['announce_url'] + self.auth_token = None + self.session = httpx.AsyncClient(headers={ + 'User-Agent': f"Upload Assistant/2.3 ({platform.system()} {platform.release()})" + }, timeout=60.0) + + async def validate_credentials(self, meta): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + return await self.cookie_validator.cookie_validation( + meta=meta, + tracker=self.tracker, + test_url=f'{self.base_url}/upload.php', + success_text='forums.php', + ) + + async def get_type(self, meta): + if meta.get('anime'): + return '407' + + category_map = { + 'TV': '405', + 'MOVIE': '404' + } + + return category_map.get(meta['category']) + + async def generate_description(self, meta): + base_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt" + final_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + + description_parts = [] + + # MediaInfo/BDInfo + tech_info = "" + if meta.get('is_disc') != 'BDMV': + video_file = meta['filelist'][0] + mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") + if os.path.exists(mi_template): + try: + media_info = MediaInfo.parse(video_file, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) + tech_info = str(media_info) + except Exception: + console.print("[bold red]Couldn't find the MediaInfo template[/bold red]") + mi_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" + if os.path.exists(mi_file_path): + with open(mi_file_path, 'r', encoding='utf-8') as f: + tech_info = f.read() + else: + console.print("[bold yellow]Using normal MediaInfo for the description.[/bold yellow]") + mi_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" + if os.path.exists(mi_file_path): + with open(mi_file_path, 'r', encoding='utf-8') as f: + tech_info = f.read() + else: + bd_summary_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" + if os.path.exists(bd_summary_file): + with open(bd_summary_file, 'r', encoding='utf-8') as f: + tech_info = f.read() + + if tech_info: + description_parts.append(tech_info) + + if os.path.exists(base_desc_path): + with open(base_desc_path, 'r', encoding='utf-8') as f: + manual_desc = f.read() + description_parts.append(manual_desc) + + # Screenshots + if f'{self.tracker}_images_key' in meta: + images = meta[f'{self.tracker}_images_key'] + else: + images = meta['image_list'] + if images: + screenshots_block = "[center][b]Screenshots[/b]\n\n" + for image in images: + img_url = image['img_url'] + web_url = image['web_url'] + screenshots_block += f"[url={web_url}][img]{img_url}[/img][/url] " + screenshots_block += "[/center]" + description_parts.append(screenshots_block) + + custom_description_header = self.config['DEFAULT'].get('custom_description_header', '') + if custom_description_header: + description_parts.append(custom_description_header) + + description_parts.append(f"[right][url=https://github.com/Audionut/Upload-Assistant][size=1]{meta['ua_signature']}[/size][/url][/right]") + + final_description = "\n\n".join(filter(None, description_parts)) + from src.bbcode import BBCODE + bbcode = BBCODE() + desc = final_description + desc = desc.replace("[user]", "").replace("[/user]", "") + desc = desc.replace("[align=left]", "").replace("[/align]", "") + desc = desc.replace("[right]", "").replace("[/right]", "") + desc = desc.replace("[align=right]", "").replace("[/align]", "") + desc = desc.replace("[sup]", "").replace("[/sup]", "") + desc = desc.replace("[sub]", "").replace("[/sub]", "") + desc = desc.replace("[alert]", "").replace("[/alert]", "") + desc = desc.replace("[note]", "").replace("[/note]", "") + desc = desc.replace("[hr]", "").replace("[/hr]", "") + desc = desc.replace("[h1]", "[u][b]").replace("[/h1]", "[/b][/u]") + desc = desc.replace("[h2]", "[u][b]").replace("[/h2]", "[/b][/u]") + desc = desc.replace("[h3]", "[u][b]").replace("[/h3]", "[/b][/u]") + desc = desc.replace("[ul]", "").replace("[/ul]", "") + desc = desc.replace("[ol]", "").replace("[/ol]", "") + desc = desc.replace("[hide]", "").replace("[/hide]", "") + desc = re.sub(r"\[center\]\[spoiler=.*? NFO:\]\[code\](.*?)\[/code\]\[/spoiler\]\[/center\]", r"", desc, flags=re.DOTALL) + desc = bbcode.convert_comparison_to_centered(desc, 1000) + desc = bbcode.remove_spoiler(desc) + desc = re.sub(r'\n{3,}', '\n\n', desc) + + with open(final_desc_path, 'w', encoding='utf-8') as f: + f.write(desc) + + return desc + + async def search_existing(self, meta, disctype): + mandarin = await self.common.check_language_requirements( + meta, self.tracker, languages_to_check=['mandarin', 'chinese'], check_audio=True, check_subtitle=True + ) + + if not mandarin: + response = input("Warning: Mandarin subtitle or audio not found. Do you want to continue with the upload anyway? (y/n): ") + if response.lower() not in ['y', 'yes']: + print("Upload cancelled by user.") + meta['skipping'] = f"{self.tracker}" + return + + search_url = f"{self.base_url}/torrents.php" + params = { + 'incldead': 1, + 'search': meta['imdb_info']['imdbID'], + 'search_area': 4 + } + found_items = [] + + try: + response = await self.session.get(search_url, params=params, cookies=self.session.cookies) + response.raise_for_status() + + soup = BeautifulSoup(response.text, 'html.parser') + + torrents_table = soup.find('table', class_='torrents') + + if torrents_table: + torrent_name_tables = torrents_table.find_all('table', class_='torrentname') + + for torrent_table in torrent_name_tables: + name_tag = torrent_table.find('b') + if name_tag: + torrent_name = name_tag.get_text(strip=True) + found_items.append(torrent_name) + + except Exception as e: + print(f"An error occurred while searching: {e}") + + return found_items + + async def get_data(self, meta): + data = { + 'name': meta['name'], + 'url': str(meta.get('imdb_info', {}).get('imdb_url', '')), + 'descr': await self.generate_description(meta), + 'type': await self.get_type(meta), + } + + return data + + async def upload(self, meta, disctype): + self.session.cookies = await self.cookie_validator.load_session_cookies(meta, self.tracker) + data = await self.get_data(meta) + + await self.cookie_auth_uploader.handle_upload( + meta=meta, + tracker=self.tracker, + source_flag=self.source_flag, + torrent_url=self.torrent_url, + data=data, + torrent_field_name='file', + upload_cookies=self.session.cookies, + upload_url=f"{self.base_url}/takeupload.php", + id_pattern=r'download\.php\?id=([^&]+)', + success_status_code="302, 303", + ) + + return diff --git a/src/trackers/PTT.py b/src/trackers/PTT.py index 152a9f61b..bdb7c65c8 100644 --- a/src/trackers/PTT.py +++ b/src/trackers/PTT.py @@ -1,200 +1,28 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests -import platform -import os -import glob -import httpx - from src.trackers.COMMON import COMMON -from src.console import console - +from src.trackers.UNIT3D import UNIT3D -class PTT(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class PTT(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='PTT') self.config = config + self.common = COMMON(config) self.tracker = 'PTT' self.source_flag = 'PTT' - self.upload_url = 'https://polishtorrent.top/api/torrents/upload' - self.search_url = 'https://polishtorrent.top/api/torrents/filter' - self.torrent_url = 'https://polishtorrent.top/torrents/' - self.signature = "\n[center]Created by Audionut's Upload Assistant[/center]" + self.base_url = 'https://polishtorrent.top/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = ['ViP', 'BiRD', 'M@RTiNU$', 'inTGrity', 'CiNEMAET', 'MusicET', 'TeamET', 'R2D2'] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def edit_name(self, meta): + async def get_name(self, meta): ptt_name = meta['name'] if meta.get('original_language', '') == 'pl' and meta.get('imdb_info'): ptt_name = ptt_name.replace(meta.get('aka', ''), '') ptt_name = ptt_name.replace(meta['title'], meta['imdb_info']['aka']) - return ptt_name.strip() - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - ptt_name = await self.edit_name(meta) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': ptt_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://polishtorrent.top/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + return {'name': ptt_name.strip()} diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 38938d4a6..0ad541796 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -1,129 +1,48 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord import asyncio -import requests -import platform -import os -import glob import httpx -from src.trackers.COMMON import COMMON from src.console import console +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class R4E(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class R4E(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='R4E') self.config = config + self.common = COMMON(config) self.tracker = 'R4E' self.source_flag = 'R4E' - # self.signature = f"\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.signature = None - self.banned_groups = [""] + self.base_url = 'https://racing4everyone.eu/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category'], meta['tmdb'], meta) - type_id = await self.get_type_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - name = await self.edit_name(meta) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - # 'personal_release' : int(meta.get('personalrelease', False)), NOT IMPLEMENTED on R4E - # 'internal' : 0, - # 'featured' : 0, - # 'free' : 0, - # 'double_up' : 0, - # 'sticky' : 0, - } - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - url = f"https://racing4everyone.eu/api/torrents/upload?api_token={self.config['TRACKERS']['R4E']['api_key'].strip()}" - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] is False: - response = requests.post(url=url, files=files, data=data, headers=headers) - try: - - meta['tracker_status'][self.tracker]['status_message'] = response.json() - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def edit_name(self, meta): - name = meta['name'] - return name - - async def get_cat_id(self, category_name, tmdb_id, meta): + async def get_category_id(self, meta): # Use stored genre IDs if available if meta and meta.get('genre_ids'): genre_ids = meta['genre_ids'].split(',') is_docu = '99' in genre_ids - if category_name == 'MOVIE': + if meta['category'] == 'MOVIE': category_id = '70' # Motorsports Movie if is_docu: category_id = '66' # Documentary - elif category_name == 'TV': + elif meta['category'] == 'TV': category_id = '79' # TV Series if is_docu: category_id = '2' # TV Documentary else: category_id = '24' - return category_id + return {'category_id': category_id} - async def get_type_id(self, type): + async def get_type_id(self, meta): type_id = { '8640p': '2160p', '4320p': '2160p', @@ -136,15 +55,29 @@ async def get_type_id(self, type): '576i': 'SD', '480p': 'SD', '480i': 'SD' - }.get(type, '10') - return type_id + }.get(meta['type'], '10') + return {'type_id': type_id} + + async def get_personal_release(self, meta): + return {} + + async def get_internal(self, meta): + return {} + + async def get_featured(self, meta): + return {} + + async def get_free(self, meta): + return {} + + async def get_doubleup(self, meta): + return {} + + async def get_sticky(self, meta): + return {} - async def is_docu(self, genres): - is_docu = False - for each in genres: - if each['id'] == 99: - is_docu = True - return is_docu + async def get_resolution_id(self, meta): + return {} async def search_existing(self, meta, disctype): dupes = [] @@ -152,8 +85,8 @@ async def search_existing(self, meta, disctype): params = { 'api_token': self.config['TRACKERS']['R4E']['api_key'].strip(), 'tmdb': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), + 'categories[]': (await self.get_category_id(meta))['category_id'], + 'types[]': await self.get_type_id(meta), 'name': "" } if meta['category'] == 'TV': diff --git a/src/trackers/RAS.py b/src/trackers/RAS.py index 9e877f669..b3c1aca0e 100644 --- a/src/trackers/RAS.py +++ b/src/trackers/RAS.py @@ -1,172 +1,49 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import httpx -from src.trackers.COMMON import COMMON +from data.config import config from src.console import console +from src.get_desc import DescriptionBuilder +from src.languages import process_desc_language +from src.tmdb import get_logo +from src.trackers.UNIT3D import UNIT3D -class RAS(): +class RAS(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='RAS') self.config = config self.tracker = 'RAS' self.source_flag = 'Rastastugan' - self.upload_url = 'https://rastastugan.org/api/torrents/upload' - self.search_url = 'https://rastastugan.org/api/torrents/filter' - self.torrent_url = 'https://rastastugan.org/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://rastastugan.org/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = ['YTS', 'YiFY', 'LAMA', 'MeGUSTA', 'NAHOM', 'GalaxyRG', 'RARBG', 'INFINITY'] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + async def get_additional_checks(self, meta): + should_continue = True + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + nordic_languages = ['Danish', 'Swedish', 'Norwegian', 'Icelandic', 'Finnish', 'English'] + if not any(lang in meta.get('audio_languages', []) for lang in nordic_languages) and not any(lang in meta.get('subtitle_languages', []) for lang in nordic_languages): + console.print(f'[bold red]{self.tracker} requires at least one Nordic/English audio or subtitle track.') + should_continue = False + + return should_continue + + async def get_description(self, meta): + if meta.get('logo', "") == "": + TMDB_API_KEY = config['DEFAULT'].get('tmdb_api', False) + TMDB_BASE_URL = "https://api.themoviedb.org/3" + tmdb_id = meta.get('tmdb') + category = meta.get('category') + debug = meta.get('debug') + logo_languages = ['da', 'sv', 'no', 'fi', 'is', 'en'] + logo_path = await get_logo(tmdb_id, category, debug, logo_languages=logo_languages, TMDB_API_KEY=TMDB_API_KEY, TMDB_BASE_URL=TMDB_BASE_URL) + if logo_path: + meta['logo'] = logo_path + + return {'description': await DescriptionBuilder(self.config).unit3d_edit_desc(meta, self.tracker)} diff --git a/src/trackers/RF.py b/src/trackers/RF.py index aef14a087..51a62a418 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -1,127 +1,44 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform import re -import os -import glob -import httpx from src.trackers.COMMON import COMMON from src.console import console +from src.trackers.UNIT3D import UNIT3D -class RF(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class RF(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='RF') self.config = config + self.common = COMMON(config) self.tracker = 'RF' self.source_flag = 'ReelFliX' - self.upload_url = 'https://reelflix.xyz/api/torrents/upload' - self.search_url = 'https://reelflix.xyz/api/torrents/filter' - self.torrent_url = 'https://reelflix.xyz/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.base_url = 'https://reelflix.cc/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - rf_name = await self.edit_name(meta) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - open_torrent = open(torrent_file_path, 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': rf_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if meta.get('freeleech', 0) != 0: - data['free'] = meta.get('freeleech', 0) - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://reelflix.xyz/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + async def get_additional_checks(self, meta): + should_continue = True + + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + if not meta['unattended']: + console.print('[bold red]Erotic not allowed at RF.') + should_continue = False + if meta.get('category') == "TV": + if not meta['unattended']: + console.print('[bold red]RF only ALLOWS Movies.') + should_continue = False - async def edit_name(self, meta): + return should_continue + + async def get_name(self, meta): rf_name = meta['name'] tag_lower = meta['tag'].lower() invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] @@ -131,15 +48,9 @@ async def edit_name(self, meta): rf_name = re.sub(f"-{invalid_tag}", "", rf_name, flags=re.IGNORECASE) rf_name = f"{rf_name}-NoGroup" - return rf_name - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - }.get(category_name, '0') - return category_id + return {'name': rf_name} - async def get_type_id(self, type): + async def get_type_id(self, meta, type=None, reverse=False, mapping_only=False): type_id = { 'DISC': '43', 'REMUX': '40', @@ -148,10 +59,19 @@ async def get_type_id(self, type): # 'FANRES': '6', 'ENCODE': '41', 'HDTV': '35', - }.get(type, '0') - return type_id + } + if mapping_only: + return type_id + elif reverse: + return {v: k for k, v in type_id.items()} + elif type is not None: + return {'type_id': type_id.get(type, '0')} + else: + meta_type = meta.get('type', '') + resolved_id = type_id.get(meta_type, '0') + return {'type_id': resolved_id} - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta, resolution=None, reverse=False, mapping_only=False): resolution_id = { # '8640p':'10', '4320p': '1', @@ -164,52 +84,14 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def search_existing(self, meta, disctype): - disallowed_keywords = {'XXX', 'Erotic', 'softcore'} - if any(keyword.lower() in disallowed_keywords for keyword in map(str.lower, meta['keywords'])): - console.print('[bold red]Erotic not allowed at RF.') - meta['skipping'] = "RF" - return - if meta.get('category') == "TV": - console.print('[bold red]RF only ALLOWS Movies.') - meta['skipping'] = "RF" - return - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + meta['edition'] - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - attributes = each['attributes'] - result = { - 'name': attributes['name'], - 'size': attributes['size'] - } - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + if mapping_only: + return resolution_id + elif reverse: + return {v: k for k, v in resolution_id.items()} + elif resolution is not None: + return {'resolution_id': resolution_id.get(resolution, '10')} + else: + meta_resolution = meta.get('resolution', '') + resolved_id = resolution_id.get(meta_resolution, '10') + return {'resolution_id': resolved_id} diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index ed4d56b80..4cd5d4cdd 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -1,14 +1,15 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord +import aiofiles import asyncio -import requests import base64 -import re import datetime import httpx - -from src.trackers.COMMON import COMMON +import re from src.console import console +from src.get_desc import DescriptionBuilder +from src.trackers.COMMON import COMMON class RTF(): @@ -19,6 +20,7 @@ class RTF(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config self.tracker = 'RTF' @@ -33,12 +35,14 @@ def __init__(self, config): async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) + await DescriptionBuilder(self.config).unit3d_edit_desc(meta, self.tracker, self.forum_link) if meta['bdinfo'] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as f: + bd_dump = await f.read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as f: + mi_dump = await f.read() bd_dump = None screenshots = [] @@ -54,7 +58,7 @@ async def upload(self, meta, disctype): # editing mediainfo so that instead of 1 080p its 1,080p as site mediainfo parser wont work other wise. 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump is None else f"{bd_dump}", "nfo": "", - "url": "https://www.imdb.com/title/" + (meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + str(meta['imdb_id'])) + "/", + "url": str(meta.get('imdb_info', {}).get('imdb_url', '') + '/'), # auto pulled from IMDB "descr": "This is short description", "poster": meta["poster"] if meta["poster"] is not None else "", @@ -63,8 +67,8 @@ async def upload(self, meta, disctype): 'isAnonymous': self.config['TRACKERS'][self.tracker]["anon"], } - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') as binary_file: - binary_file_data = binary_file.read() + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') as binary_file: + binary_file_data = await binary_file.read() base64_encoded_data = base64.b64encode(binary_file_data) base64_message = base64_encoded_data.decode('utf-8') json_data['file'] = base64_message @@ -76,33 +80,79 @@ async def upload(self, meta, disctype): } if meta['debug'] is False: - response = requests.post(url=self.upload_url, json=json_data, headers=headers) try: - response_json = response.json() - meta['tracker_status'][self.tracker]['status_message'] = response.json() - - t_id = response_json['torrent']['id'] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://retroflix.club/browse/t/" + str(t_id)) - + async with httpx.AsyncClient(timeout=15.0) as client: + response = await client.post(url=self.upload_url, json=json_data, headers=headers) + try: + response_json = response.json() + meta['tracker_status'][self.tracker]['status_message'] = response.json() + + t_id = response_json['torrent']['id'] + meta['tracker_status'][self.tracker]['torrent_id'] = t_id + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, + self.config['TRACKERS'][self.tracker].get('announce_url'), + "https://retroflix.club/browse/t/" + str(t_id)) + + except Exception: + console.print("It may have uploaded, go check") + return + except httpx.TimeoutException: + meta['tracker_status'][self.tracker]['status_message'] = "data error: RTF request timed out while uploading." + except httpx.RequestError as e: + meta['tracker_status'][self.tracker]['status_message'] = f"data error: An error occurred while making the request: {e}" except Exception: meta['tracker_status'][self.tracker]['status_message'] = "data error - It may have uploaded, go check" return + else: - console.print("[cyan]Request Data:") - console.print(json_data) + console.print("[cyan]RTF Request Data:") + debug_data = json_data.copy() + if 'file' in debug_data and debug_data['file']: + debug_data['file'] = debug_data['file'][:10] + '...' + console.print(debug_data) meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." async def search_existing(self, meta, disctype): - disallowed_keywords = {'XXX', 'Erotic', 'softcore'} - if any(keyword.lower() in disallowed_keywords for keyword in map(str.lower, meta['keywords'])): + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): console.print('[bold red]Erotic not allowed at RTF.') meta['skipping'] = "RTF" return [] - if meta.get('category') == "TV" and meta.get('tv_year') is not None: - meta['year'] = meta['tv_year'] - if datetime.date.today().year - meta['year'] <= 9: + year = meta.get('year') + # Collect all possible years from different sources + years = [] + + # IMDB end year + imdb_end_year = meta.get('imdb_info', {}).get('end_year') + if imdb_end_year: + years.append(int(imdb_end_year)) + + # TVDB episode year + tvdb_episode_year = meta.get('tvdb_episode_year') + if tvdb_episode_year: + years.append(int(tvdb_episode_year)) + + # Get most recent aired date from all TVDB episodes + tvdb_episodes = meta.get('tvdb_episode_data', {}).get('episodes', []) + if tvdb_episodes: + for episode in tvdb_episodes: + aired_date = episode.get('aired', '') + if aired_date and '-' in aired_date: + try: + episode_year = int(aired_date.split('-')[0]) + years.append(episode_year) + except (ValueError, IndexError): + continue + + # Use the most recent year found, fallback to meta year + most_recent_year = max(years) if years else year + + # Update year with the most recent year for TV shows + if meta.get('category') == "TV": + year = most_recent_year + if datetime.date.today().year - year <= 9: console.print("[red]Content must be older than 10 Years to upload at RTF") meta['skipping'] = "RTF" return [] @@ -115,17 +165,37 @@ async def search_existing(self, meta, disctype): params = {'includingDead': '1'} if meta['imdb_id'] != 0: - params['imdbId'] = meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + str(meta['imdb_id']) + params['imdbId'] = str(meta['imdb_id']) if str(meta['imdb_id']).startswith("tt") else "tt" + str(meta['imdb_id']) else: params['search'] = meta['title'].replace(':', '').replace("'", '').replace(",", '') + def build_download_url(https://github.com/entry): + torrent_id = entry.get('id') + torrent_url = entry.get('url', '') + if not torrent_id and isinstance(torrent_url, str): + match = re.search(r"/browse/t/(\d+)", torrent_url) + if match: + torrent_id = match.group(1) + + if torrent_id: + return f"https://retroflix.club/api/torrent/%7Btorrent_id%7D/download" + + return torrent_url + try: async with httpx.AsyncClient(timeout=5.0) as client: response = await client.get(self.search_url, params=params, headers=headers) if response.status_code == 200: data = response.json() for each in data: - result = each['name'] + download_url = build_download_url(https://github.com/each) + result = { + 'name': each['name'], + 'size': each['size'], + 'files': each['name'], + 'link': each['url'], + 'download': download_url, + } dupes.append(result) else: console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") @@ -148,13 +218,21 @@ async def api_test(self, meta): 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } - response = requests.get('https://retroflix.club/api/test', headers=headers) + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get('https://retroflix.club/api/test', headers=headers) - if response.status_code != 200: - console.print('[bold red]Your API key is incorrect SO generating a new one') + if response.status_code != 200: + console.print('[bold red]Your API key is incorrect SO generating a new one') + await self.generate_new_api(meta) + else: + return True + except httpx.RequestError as e: + console.print(f'[bold red]Error testing API: {str(e)}') + await self.generate_new_api(meta) + except Exception as e: + console.print(f'[bold red]Unexpected error testing API: {str(e)}') await self.generate_new_api(meta) - else: - return async def generate_new_api(self, meta): headers = { @@ -198,6 +276,7 @@ async def generate_new_api(self, meta): file.write(new_config_data) console.print(f'[bold green]API Key successfully saved to {config_path}') + return True else: console.print('[bold red]API response does not contain a token.') else: diff --git a/src/trackers/SAM.py b/src/trackers/SAM.py index 6586daee3..6291b160d 100644 --- a/src/trackers/SAM.py +++ b/src/trackers/SAM.py @@ -1,171 +1,93 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import httpx +import re from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class SAM(): +class SAM(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name="SAM") self.config = config - self.tracker = 'SAM' - self.source_flag = 'SAMARITANO' - self.upload_url = 'https://samaritano.cc/api/torrents/upload' - self.search_url = 'https://samaritano.cc/api/torrents/filter' - self.torrent_url = 'https://samaritano.cc/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.common = COMMON(config) + self.tracker = "SAM" + self.source_flag = "SAMARITANO" + self.base_url = "https://samaritano.cc/" + self.id_url = f"{self.base_url}/api/torrents/" + self.upload_url = f"{self.base_url}/api/torrents/upload" + self.search_url = f"{self.base_url}/api/torrents/filter" + self.torrent_url = f"{self.base_url}/torrents/" + self.requests_url = f"{self.base_url}/api/requests/filter" + self.banned_groups = [] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + async def get_name(self, meta): + name = ( + meta["name"] + .replace("DD+ ", "DDP") + .replace("DD ", "DD") + .replace("AAC ", "AAC") + .replace("FLAC ", "FLAC") + .replace("Dubbed", "") + .replace("Dual-Audio", "") + ) - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id + # If it is a Series or Anime, remove the year from the title. + if meta.get("category") in ["TV", "ANIMES"]: + year = str(meta.get("year", "")) + if year and year in name: + name = name.replace(year, "").replace(f"({year})", "").strip() - async def get_res_id(self, resolution): - resolution_id = { - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id + # Remove the AKA title, unless it is Brazilian + if meta.get("original_language") != "pt": + name = name.replace(meta["aka"], "") - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 + # If it is Brazilian, use only the AKA title, deleting the foreign title + if meta.get("original_language") == "pt" and meta.get("aka"): + aka_clean = meta["aka"].replace("AKA", "").strip() + title = meta.get("title") + name = name.replace(meta["aka"], "").replace(title, aka_clean).strip() - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + sam_name = name + tag_lower = meta["tag"].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + audio_tag = "" + if meta.get("audio_languages"): + audio_languages = set(meta["audio_languages"]) - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() + if "Portuguese" in audio_languages: + if len(audio_languages) >= 3: + audio_tag = " MULTI" + elif len(audio_languages) == 2: + audio_tag = " DUAL" + else: + audio_tag = "" - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) + if audio_tag: + if "-" in sam_name: + parts = sam_name.rsplit("-", 1) + sam_name = f"{parts[0]}{audio_tag}-{parts[1]}" else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) + sam_name += audio_tag + + if meta["tag"] == "" or any( + invalid_tag in tag_lower for invalid_tag in invalid_tags + ): + for invalid_tag in invalid_tags: + sam_name = re.sub(f"-{invalid_tag}", "", sam_name, flags=re.IGNORECASE) + sam_name = f"{sam_name}-NoGroup" + + return {"name": re.sub(r"\s{2,}", " ", sam_name)} + + async def get_additional_data(self, meta): + data = { + "mod_queue_opt_in": await self.get_flag(meta, "modq"), + } + + return data - return dupes + async def get_additional_checks(self, meta): + return await self.common.check_language_requirements( + meta, self.tracker, languages_to_check=["portuguese", "português"], check_audio=True, check_subtitle=True + ) diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index 7e9d8ff3f..991a67774 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -1,253 +1,1199 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord +from typing import Literal import asyncio -import requests -import platform +import aiofiles +import certifi +import cli_ui import os -import glob -import httpx +import pycountry +import random +import re +import requests +from babel import Locale +from babel.core import UnknownLocaleError +from src.audio import get_audio_v2 from src.languages import process_desc_language from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D + +_shri_session_data = {} -class SHRI(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class SHRI(UNIT3D): + """ShareIsland tracker implementation with Italian localization support""" + + # Pre-compile regex patterns for performance + INVALID_TAG_PATTERN = re.compile(r"-(nogrp|nogroup|unknown|unk)", re.IGNORECASE) + WHITESPACE_PATTERN = re.compile(r"\s{2,}") + MARKER_PATTERN = re.compile(r"\b(UNTOUCHED|VU1080|VU720|VU)\b", re.IGNORECASE) + CINEMA_NEWS_PATTERN = re.compile( + r"\b(HDTS|TS|MD|LD|CAM|HDCAM|TC|HDTC)\b", re.IGNORECASE + ) + CINEMA_VIDEO_PATTERN = re.compile(r"\b(HDTS|TS|CAM|HDCAM|TC|HDTC)\b", re.IGNORECASE) + CINEMA_AUDIO_PATTERN = re.compile(r"\b(MD|LD)\b", re.IGNORECASE) + def __init__(self, config): + super().__init__(config, tracker_name="SHRI") self.config = config - self.tracker = 'SHRI' - self.source_flag = 'Shareisland' - self.search_url = 'https://shareisland.org/api/torrents/filter' - self.upload_url = 'https://shareisland.org/api/torrents/upload' - self.torrent_url = 'https://shareisland.org/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.common = COMMON(config) + self.tracker = "SHRI" + self.source_flag = "ShareIsland" + self.base_url = "https://shareisland.org/" + self.id_url = f"{self.base_url}/api/torrents/" + self.upload_url = f"{self.base_url}/api/torrents/upload" + self.search_url = f"{self.base_url}/api/torrents/filter" + self.requests_url = f"{self.base_url}/api/requests/filter" + self.torrent_url = f"{self.base_url}/torrents/" self.banned_groups = [] - pass - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') - name = await self.edit_name(meta) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - open_torrent = open(torrent_file_path, 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - bhd_dir_path = os.path.join(base_dir, "tmp", uuid, "bhd.nfo") - bhd_files = glob.glob(bhd_dir_path) - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files and not bhd_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, - } - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://shareisland.org/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return + def _get_language_code(self, track_or_string): + """Extract and normalize language to ISO alpha-2 code""" + if isinstance(track_or_string, dict): + lang = track_or_string.get("Language", "") + if isinstance(lang, dict): + lang = lang.get("String", "") else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 - - return 1 if meta.get(flag_name, False) else 0 - - async def edit_name(self, meta): - shareisland_name = meta['name'] - media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 - resolution = meta.get('resolution') - video_codec = meta.get('video_codec') - video_encode = meta.get('video_encode') - name_type = meta.get('type', "") - source = meta.get('source', "") - - if name_type == "DVDRIP": - shareisland_name = shareisland_name.replace(f"{meta['source']}", f"{resolution} {meta['source']}", 1) - shareisland_name = shareisland_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) - - if not meta.get('audio_languages'): + lang = track_or_string + if not lang: + return "" + lang_str = str(lang).lower() + + # Strip country code if present (e.g., "en-US" → "en") + if "-" in lang_str: + lang_str = lang_str.split("-")[0] + + if len(lang_str) == 2: + return lang_str + try: + lang_obj = ( + pycountry.languages.get(name=lang_str.title()) + or pycountry.languages.get(alpha_2=lang_str) + or pycountry.languages.get(alpha_3=lang_str) + ) + return lang_obj.alpha_2.lower() if lang_obj else lang_str + except (AttributeError, KeyError, LookupError): + return lang_str + + async def get_additional_data(self, meta): + """Get additional tracker-specific upload data""" + return {"mod_queue_opt_in": await self.get_flag(meta, "modq")} + + async def get_name(self, meta): + """ + Rebuild release name from meta components following ShareIsland naming rules. + + Handles: + - REMUX detection from filename markers (VU/UNTOUCHED) + - Italian title substitution from IMDb AKAs + - Multi-language audio tags (ITALIAN - ENGLISH format) + - Italian subtitle [SUBS] tag when no Italian audio present + - Release group tag cleaning and validation + - DISC region injection + """ + if not meta.get("language_checked", False): await process_desc_language(meta, desc=None, tracker=self.tracker) - elif meta.get('audio_languages'): - audio_languages = meta['audio_languages'][0] - if audio_languages: - if name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): - shareisland_name = shareisland_name.replace(str(meta['year']), f"{meta['year']} {audio_languages}", 1) - elif not meta.get('is_disc') == "BDMV": - shareisland_name = shareisland_name.replace(meta['resolution'], f"{audio_languages} {meta['resolution']}", 1) - - if meta['is_disc'] == "DVD" or (name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): - shareisland_name = shareisland_name.replace((meta['source']), f"{resolution} {meta['source']}", 1) - shareisland_name = shareisland_name.replace((meta['audio']), f"{video_codec} {meta['audio']}", 1) - - return shareisland_name - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type=None, reverse=False): + + # Title and basic info + title = meta.get("title", "") + italian_title = self._get_italian_title(meta.get("imdb_info", {})) + use_italian_title = self.config["TRACKERS"][self.tracker].get( + "use_italian_title", False + ) + if italian_title and use_italian_title: + title = italian_title + + year = str(meta.get("year", "")) + resolution = meta.get("resolution", "") + source = meta.get("source", "") + if isinstance(source, list): + source = source[0] if source else "" + video_codec = meta.get("video_codec", "") + video_encode = meta.get("video_encode", "") + + # TV specific + season = meta.get("season") or "" + episode = meta.get("episode") or "" + episode_title = meta.get("episode_title") or "" + part = meta.get("part") or "" + + # Optional fields + edition = meta.get("edition") or "" + hdr = meta.get("hdr") or "" + uhd = meta.get("uhd") or "" + three_d = meta.get("3D") or "" + + # Clean audio: remove Dual-Audio and trailing language codes + audio = await self._get_best_italian_audio_format(meta) + + # Build audio language tag: original -> ITALIAN -> ENGLISH -> others/Multi (4+) + audio_lang_str = "" + if meta.get("audio_languages"): + # Normalize all to full names + audio_langs = [ + self._get_language_name(lang.upper()) + for lang in meta["audio_languages"] + ] + audio_langs = [lang for lang in audio_langs if lang] # Remove empty + audio_langs = list(dict.fromkeys(audio_langs)) # Dedupe preserving order + + orig_lang_iso = meta.get("original_language", "").upper() + orig_lang_full = self._get_language_name(orig_lang_iso) + + result = [] + remaining = audio_langs.copy() + + # Priority 1: Original language + if orig_lang_full and orig_lang_full in remaining: + result.append(orig_lang_full) + remaining.remove(orig_lang_full) + + # Priority 2: Italian (if not already added) + if "ITALIAN" in remaining: + result.append("ITALIAN") + remaining.remove("ITALIAN") + + # Priority 3: English (if not already added) + if "ENGLISH" in remaining: + result.append("ENGLISH") + remaining.remove("ENGLISH") + + # Handle remaining: show individually if <=3 total, else add Multi + if len(result) + len(remaining) > 3: + result.append("Multi") + else: + result.extend(remaining) + + audio_lang_str = " - ".join(result) + + effective_type = self.get_effective_type(meta) + + if effective_type != "DISC": + source = source.replace("Blu-ray", "BluRay") + + # Detect Hybrid from filename if not in title + hybrid = "" + if ( + not edition + and (meta.get("webdv", False) or isinstance(meta.get("source", ""), list)) + and "HYBRID" not in title.upper() + ): + hybrid = "Hybrid" + + repack = meta.get("repack", "").strip() + + name = None + # Build name per ShareIsland type-specific format + if effective_type == "DISC": + # Inject region from validated session data if available + region = _shri_session_data.get(meta["uuid"], {}).get( + "_shri_region_name" + ) or meta.get("region", "") + if meta["is_disc"] == "BDMV": + # BDMV: Title Year 3D Edition Hybrid REPACK Resolution Region UHD Source HDR VideoCodec Audio + name = f"{title} {year} {season}{episode} {three_d} {edition} {hybrid} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" + elif meta["is_disc"] == "DVD": + dvd_size = meta.get("dvd_size", "") + # DVD: Title Year 3D Edition REPACK Resolution Region Source DVDSize Audio + name = f"{title} {year} {season}{episode} {three_d} {edition} {repack} {resolution} {region} {source} {dvd_size} {audio}" + elif meta["is_disc"] == "HDDVD": + # HDDVD: Title Year Edition REPACK Resolution Region Source VideoCodec Audio + name = f"{title} {year} {edition} {repack} {resolution} {region} {source} {video_codec} {audio}" + + elif effective_type == "REMUX": + # REMUX: Title Year 3D LANG Edition Hybrid REPACK Resolution UHD Source REMUX HDR VideoCodec Audio + name = f"{title} {year} {season}{episode} {episode_title} {part} {three_d} {audio_lang_str} {edition} {hybrid} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" + + elif effective_type in ("DVDRIP", "BRRIP"): + type_str = "DVDRip" if effective_type == "DVDRIP" else "BRRip" + # DVDRip/BRRip: Title Year LANG Edition Hybrid REPACK Resolution Type Audio HDR VideoCodec + name = f"{title} {year} {season} {audio_lang_str} {edition} {hybrid} {repack} {resolution} {type_str} {audio} {hdr} {video_encode}" + + elif effective_type in ("ENCODE", "HDTV"): + # Encode/HDTV: Title Year LANG Edition Hybrid REPACK Resolution UHD Source Audio HDR VideoCodec + name = f"{title} {year} {season}{episode} {episode_title} {part} {audio_lang_str} {edition} {hybrid} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" + + elif effective_type in ("WEBDL", "WEBRIP"): + service = meta.get("service", "") + type_str = "WEB-DL" if effective_type == "WEBDL" else "WEBRip" + # WEB: Title Year LANG Edition Hybrid REPACK Resolution UHD Service Type Audio HDR VideoCodec + name = f"{title} {year} {season}{episode} {episode_title} {part} {audio_lang_str} {edition} {hybrid} {repack} {resolution} {uhd} {service} {type_str} {audio} {hdr} {video_encode}" + + elif effective_type == "CINEMA_NEWS": + basename_upper = self.get_basename(meta).upper() + markers = [] + + video_match = self.CINEMA_VIDEO_PATTERN.search(basename_upper) + if video_match: + markers.append(video_match.group(0)) + + audio_match = self.CINEMA_AUDIO_PATTERN.search(basename_upper) + if audio_match: + markers.append(audio_match.group(0)) + + source_marker = " ".join(markers) + + # Cinema News: Title Year LANG Edition REPACK Resolution Source Audio VideoCodec + name = f"{title} {year} {audio_lang_str} {edition} {repack} {resolution} {source_marker} {audio} {video_encode}" + + else: + # Fallback: use original name with cleaned audio + name = meta["name"].replace("Dual-Audio", "").strip() + + # Ensure name is always a string + if not name: + name = meta.get("name", "UNKNOWN") + + # Add [SUBS] for Italian subtitles without Italian audio + if not self._has_italian_audio(meta) and self._has_italian_subtitles(meta): + name = f"{name} [SUBS]" + + # Cleanup whitespace + name = self.WHITESPACE_PATTERN.sub(" ", name).strip() + + # Extract tag and append if valid + tag = self._extract_clean_release_group(meta, name) + if tag: + name = f"{name}-{tag}" + + return {"name": name} + + def _extract_clean_release_group(self, meta, current_name): + """Extract release group - only accepts VU/UNTOUCHED markers from filename""" + tag = meta.get("tag", "").strip().lstrip("-") + if tag and " " not in tag and not self.INVALID_TAG_PATTERN.search(tag): + return tag + + basename = self.get_basename(meta) + # Get extension from mediainfo and remove it + ext = ( + meta.get("mediainfo", {}) + .get("media", {}) + .get("track", [{}])[0] + .get("FileExtension", "") + ) + name_no_ext = ( + basename[: -len(ext) - 1] + if ext and basename.endswith(f".{ext}") + else basename + ) + parts = re.split(r"[-.]", name_no_ext) + if not parts: + return "NoGroup" + + potential_tag = parts[-1].strip() + # Handle space-separated components + if " " in potential_tag: + potential_tag = potential_tag.split()[-1] + + if ( + not potential_tag + or len(potential_tag) > 30 + or not potential_tag.replace("_", "").isalnum() + ): + return "NoGroup" + + # ONLY accept if it's a VU/UNTOUCHED marker + if not self.MARKER_PATTERN.search(potential_tag): + return "NoGroup" + + return potential_tag + + async def get_type_id(self, meta, type=None, reverse=False, mapping_only=False): + """Map release type to ShareIsland type IDs""" type_mapping = { - 'DISC': '26', - 'REMUX': '7', - 'WEBDL': '27', - 'WEBRIP': '15', - 'HDTV': '6', - 'ENCODE': '15', + "CINEMA_NEWS": "42", + "DISC": "26", + "REMUX": "7", + "WEBDL": "27", + "WEBRIP": "15", + "HDTV": "33", + "ENCODE": "15", + "DVDRIP": "15", + "BRRIP": "15", } - if reverse: - # Return a reverse mapping of type IDs to type names + if mapping_only: + return type_mapping + + elif reverse: return {v: k for k, v in type_mapping.items()} elif type is not None: - # Return the specific type ID - return type_mapping.get(type, '0') + return {"type_id": type_mapping.get(type, "0")} else: - # Return the full mapping - return type_mapping + effective_type = self.get_effective_type(meta) + type_id = type_mapping.get(effective_type, "0") + return {"type_id": type_id} - async def get_res_id(self, resolution=None, reverse=False): - resolution_mapping = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9', - } + async def get_additional_checks(self, meta) -> Literal[True]: + """ + Validate and prompt for DVD/HDDVD region/distributor before upload. + Stores validated IDs in module-level dict keyed by UUID for use during upload. + """ + if meta.get("is_disc") in ["DVD", "HDDVD"]: + region_name = meta.get("region") + + # Prompt for region if not in meta + if not region_name: + if not meta.get("unattended") or meta.get("unattended_confirm"): + while True: + region_name = cli_ui.ask_string( + "SHRI: Region code not found for disc. Please enter it manually (mandatory): " + ) + region_name = ( + region_name.strip().upper() if region_name else None + ) + if region_name: + break + print("Region code is required.") + + # Validate region name was provided + if not region_name: + cli_ui.error("Region required; skipping SHRI.") + raise ValueError("Region required for disc upload") + + # Validate region code with API + region_id = await self.common.unit3d_region_ids(region_name) + if not region_id: + cli_ui.error(f"Invalid region code '{region_name}'; skipping SHRI.") + raise ValueError(f"Invalid region code: {region_name}") + + # Handle optional distributor + distributor_name = meta.get("distributor") + distributor_id = None + if not distributor_name and not meta.get("unattended"): + distributor_name = cli_ui.ask_string( + "SHRI: Distributor (optional, Enter to skip): " + ) + distributor_name = ( + distributor_name.strip().upper() if distributor_name else None + ) + + if distributor_name: + distributor_id = await self.common.unit3d_distributor_ids( + distributor_name + ) + + # Store in module-level dict keyed by UUID (survives instance recreation) + _shri_session_data[meta["uuid"]] = { + "_shri_region_id": region_id, + "_shri_region_name": region_name, + "_shri_distributor_id": distributor_id if distributor_name else None, + } + + return await super().get_additional_checks(meta) + + async def get_region_id(self, meta): + """Override to use validated region ID stored in meta""" + data = _shri_session_data.get(meta["uuid"], {}) + region_id = data.get("_shri_region_id") + if region_id: + return {"region_id": region_id} + return await super().get_region_id(meta) + + async def get_distributor_id(self, meta): + """Override to use validated distributor ID stored in meta""" + data = _shri_session_data.get(meta["uuid"], {}) + distributor_id = data.get("_shri_distributor_id") + if distributor_id: + return {"distributor_id": distributor_id} + return await super().get_distributor_id(meta) + + def get_basename(self, meta): + """Extract basename from first file in filelist or path""" + path = next(iter(meta["filelist"]), meta["path"]) + return os.path.basename(path) + + def _detect_type_from_technical_analysis(self, meta): + """Unified type detection: filename markers + MediaInfo analysis""" + # Priority 1: Explicit REMUX markers (filename check FIRST) + if self._has_remux_marker(meta): + return "REMUX" + # Priority 2: Base type from upstream + base_type = meta.get("type", "ENCODE") + if base_type in ("DISC", "DVDRIP", "BRRIP"): + return base_type + # Priority 3: Technical mediainfo analysis + return self._analyze_encode_type(meta) + + def _has_remux_marker(self, meta): + name_no_ext = os.path.splitext(self.get_basename(meta))[0].lower() + if "remux" in name_no_ext: + return True + if self.MARKER_PATTERN.search(name_no_ext): + return True + + # Check for MakeMKV + no encoding + mi = meta.get("mediainfo", {}).get("media", {}).get("track", []) + if mi: + general = mi[0] + encoded_app = str(general.get("Encoded_Application", "")).lower() + encoded_lib = str(general.get("Encoded_Library", "")).lower() + + if "makemkv" in encoded_app or "makemkv" in encoded_lib: + video = next((t for t in mi if t.get("@type") == "Video"), {}) + settings = video.get("Encoded_Library_Settings") + if not settings or isinstance(settings, dict): + return True + + return False + + def _analyze_encode_type(self, meta): + """ + Detect release type from MediaInfo technical analysis. + + Priority order: + 1. DV profile (05/07/08) + no encoding -> WEB-DL (overrides source field) + 2. CRF in settings -> WEBRIP/ENCODE + 3. Service fingerprints -> WEB-DL (CR/Netflix patterns) + 4. BluRay encoding detection -> ENCODE (settings, library, or GPU stripped metadata) + 5. Encoding tools (source-aware) -> WEBRIP/ENCODE (Handbrake/Staxrip/etc in general track) + 6. No encoding + WEB -> WEB-DL + 7. Service override -> WEB-DL (handles misdetected sources) + 8. No encoding + disc -> REMUX + """ + + def has_encoding_tools(general_track, tools): + """Check if general track contains specified encoding tools.""" + encoded_app = str(general_track.get("Encoded_Application", "")).lower() + extra = general_track.get("extra", {}) + writing_frontend = str(extra.get("Writing_frontend", "")).lower() + tool_string = f"{encoded_app} {writing_frontend}" + return any(tool in tool_string for tool in tools) + + try: + mi = meta.get("mediainfo", {}) + tracks = mi.get("media", {}).get("track", []) + general_track = tracks[0] + video_track = tracks[1] + + # Normalize source list + source = meta.get("source", "") + if isinstance(source, list): + source = [s.upper() for s in source] + else: + source = [source.upper()] if source else [] + + service = str(meta.get("service", "")).upper() + + # Extract encoding metadata + raw_settings = video_track.get("Encoded_Library_Settings", "") + raw_library = video_track.get("Encoded_Library", "") + has_settings = raw_settings and not isinstance(raw_settings, dict) + has_library = raw_library and not isinstance(raw_library, dict) + encoding_settings = str(raw_settings).lower() if has_settings else "" + encoded_library = str(raw_library).lower() if has_library else "" + + # ===== Priority 1: DV streaming profiles ===== + # DV profiles 5/7/8 indicate streaming sources (overrides source field) + hdr_profile = video_track.get("HDR_Format_Profile", "") + has_streaming_dv = any( + prof in hdr_profile for prof in ["dvhe.05", "dvhe.07", "dvhe.08"] + ) + + if has_streaming_dv and not encoding_settings: + # Ensure not re-encoded by user tools + if not has_encoding_tools( + general_track, ["handbrake", "staxrip", "megatagger"] + ): + return "WEBDL" + + # ===== Priority 2: CRF detection ===== + # CRF (Constant Rate Factor) indicates user re-encode + if "crf=" in encoding_settings: + return "WEBRIP" if any("WEB" in s for s in source) else "ENCODE" + + # ===== Priority 3: Service fingerprints ===== + # Crunchyroll detection + if service == "CR": + if "core 142" in encoded_library: + return "WEBDL" + if has_library: + core_match = re.search(r"core (\d+)", encoded_library) + if core_match and int(core_match.group(1)) >= 152: + return "WEBRIP" + if encoding_settings and "bitrate=" in encoding_settings: + return "WEBDL" + + # Netflix fingerprint detection + format_profile = video_track.get("Format_Profile", "") + if "Main@L4.0" in format_profile and "rc=2pass" in encoding_settings: + if "core 118" in encoded_library or "core 148" in encoded_library: + return "WEBDL" + + # ===== Priority 4: BluRay encoding detection ===== + if any(s in ("BLURAY", "BLU-RAY") for s in source): + # GPU encode detection: empty BitDepth/Chroma metadata (dict type) + if isinstance(video_track.get("BitDepth"), dict): + return "ENCODE" + # Any encoding settings or library info = encode (not remux) + # Catches x264/x265 in Encoded_Library or settings in Encoded_Library_Settings + if has_settings or has_library: + return "ENCODE" + + # ===== Priority 5: Encoding tools (source-aware) ===== + # Check general track for encoding tools (Handbrake, Staxrip, etc) + if any(s in ("BLURAY", "BLU-RAY") for s in source): + if has_encoding_tools( + general_track, + ["x264", "x265", "handbrake", "staxrip", "megatagger"], + ): + return "ENCODE" + + # WEB sources: only explicit user tools indicate re-encode + if any("WEB" in s for s in source): + if has_encoding_tools( + general_track, ["handbrake", "staxrip", "megatagger"] + ): + return "WEBRIP" + + # ===== Priority 6: No encoding + WEB = WEB-DL ===== + if any("WEB" in s for s in source): + return "WEBDL" + + # ===== Priority 7: Service override ===== + # If streaming service is set but source wasn't detected as Web, + # override to WEB-DL (handles upstream get_source.py misdetection) + if service and service not in ("", "NONE"): + return "WEBDL" + + # ===== Priority 8: No encoding + disc = REMUX ===== + if any(s in ("BLURAY", "BLU-RAY", "HDDVD") for s in source): + return "REMUX" - if reverse: - # Return reverse mapping of IDs to resolutions - return {v: k for k, v in resolution_mapping.items()} - elif resolution is not None: - # Return the ID for the given resolution - return resolution_mapping.get(resolution, '10') # Default to '10' for unknown resolutions + # DVD REMUX detection + if any(s in ("NTSC", "PAL", "NTSC DVD", "PAL DVD", "DVD") for s in source): + if not has_settings and not has_library: + return "REMUX" + + except (IndexError, KeyError): + # Fallback on mediainfo parsing errors + pass + + # Final fallback: use meta type or default to ENCODE + return meta.get("type", "ENCODE") + + def get_effective_type(self, meta): + """ + Determine effective type with priority hierarchy: + 1. Cinema News (CAM/HDCAM/TC/HDTC/TS/HDTS/MD/LD keywords) + 2. Technical analysis (REMUX/ENCODE/WEB-DL/WEBRip detection) + 3. Base type from meta + """ + basename = self.get_basename(meta) + if self.CINEMA_NEWS_PATTERN.search(basename): + return "CINEMA_NEWS" + + detected_type = self._detect_type_from_technical_analysis(meta) + return detected_type + + def _get_italian_title(self, imdb_info): + """Extract Italian title from IMDb AKAs with priority""" + country_match = None + language_match = None + + for aka in imdb_info.get("akas", []): + if isinstance(aka, dict): + if aka.get("country") == "Italy" and not aka.get("attributes"): + country_match = aka.get("title") + break # Country match takes priority + elif aka.get("language") == "Italy" and not language_match and not aka.get("attributes"): + language_match = aka.get("title") + + return country_match or language_match + + def _has_italian_audio(self, meta): + """Check for Italian audio tracks, excluding commentary""" + if "mediainfo" not in meta: + return False + + tracks = meta["mediainfo"].get("media", {}).get("track", []) + return any( + track.get("@type") == "Audio" + and self._get_language_code(track) in {"it"} + and "commentary" not in str(track.get("Title", "")).lower() + for track in tracks[2:] + ) + + def _has_italian_subtitles(self, meta): + """Check for Italian subtitle tracks""" + if "mediainfo" not in meta: + return False + + tracks = meta["mediainfo"].get("media", {}).get("track", []) + return any( + track.get("@type") == "Text" and self._get_language_code(track) in {"it"} + for track in tracks + ) + + def _get_language_name(self, iso_code): + """Convert ISO language code to full language name""" + if not iso_code: + return "" + + # Try alpha_2 (IT, EN, etc) + lang = pycountry.languages.get(alpha_2=iso_code.lower()) + if lang: + return lang.name.upper() + + # Try alpha_3 (ITA, ENG, etc) + lang = pycountry.languages.get(alpha_3=iso_code.lower()) + if lang: + return lang.name.upper() + + return iso_code + + def _get_italian_language_name(self, iso_code): + """Convert ISO language code to Italian language name using Babel""" + if not iso_code: + return "" + + try: + locale = Locale.parse(iso_code.lower()) + italian_name = locale.get_display_name("it") + return ( + italian_name.title() + if italian_name + else self._get_language_name(iso_code).title() + ) + except (ValueError, AttributeError, KeyError, UnknownLocaleError): + return self._get_language_name(iso_code).title() + + async def _get_best_italian_audio_format(self, meta): + """Filter Italian tracks, select best, format via get_audio_v2""" + # fmt: off + ITALIAN_LANGS = {"it", "italian", "italiano"} + + def extract_quality(track, is_bdinfo): + if is_bdinfo: + bitrate_match = re.search(r'(\d+)', track.get("bitrate", "0")) + return ( + any(x in track.get("codec", "").lower() for x in ["truehd", "dts-hd ma", "flac", "pcm"]), + int(float(track.get("channels", "2.0").split(".")[0])), + "atmos" in track.get("atmos_why_you_be_like_this", "").lower(), + int(bitrate_match.group(1)) if bitrate_match else 0 + ) + else: + try: + bitrate_int = int(track.get("BitRate", 0)) if track.get("BitRate", 0) else 0 + except (ValueError, TypeError) as e: + cli_ui.warning(f"Invalid BitRate value in audio track: {track.get('BitRate')}\n" + f"Using 0 as default. Error: {e}.") + bitrate_int = 0 + return ( + track.get("Compression_Mode") == "Lossless", + int(track.get("Channels", 2)), + "JOC" in track.get("Format_AdditionalFeatures", "") or "Atmos" in track.get("Format_Commercial", ""), + bitrate_int + ) + + def clean(audio_str): + return re.sub(r"\s*-[A-Z]{3}(-[A-Z]{3})*$", "", audio_str.replace("Dual-Audio", "").replace("Dubbed", "")).strip() + + bdinfo = meta.get("bdinfo") + + if bdinfo and bdinfo.get("audio"): + italian = [t for t in bdinfo["audio"] if t.get("language", "").lower() in ITALIAN_LANGS] + if not italian: + return clean(meta.get("audio", "")) + best = max(italian, key=lambda t: extract_quality(t, True)) + audio_str, _, _ = await get_audio_v2(None, meta, {"audio": [best]}) else: - # Return the full mapping - return resolution_mapping - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + tracks = meta.get("mediainfo", {}).get("media", {}).get("track", []) + italian = [ + t for t in tracks[1:] + if t.get("@type") == "Audio" + and self._get_language_code(t) in ITALIAN_LANGS + and "commentary" not in str(t.get("Title", "")).lower() + ] + if not italian: + return clean(meta.get("audio", "")) + best = max(italian, key=lambda t: extract_quality(t, False)) + audio_str, _, _ = await get_audio_v2({"media": {"track": [tracks[0], best]}}, meta, None) + + return clean(audio_str) + + async def get_description(self, meta, is_test=False): + """Generate Italian BBCode description for ShareIsland""" + title = meta.get("title", "Unknown") + italian_title = self._get_italian_title(meta.get("imdb_info", {})) + if italian_title: + title = italian_title + + category = meta.get("category", "MOVIE") + + # Build info line: resolution, source, codec, audio, language + info_parts = [] + if meta.get("resolution"): + info_parts.append(meta["resolution"]) + + source = meta.get("source", "") + if isinstance(source, list): + source = source[0] if source else "" + if source: + info_parts.append( + source.replace("Blu-ray", "BluRay").replace("Web", "WEB-DL") + ) + + video_codec = meta.get("video_codec", "") + if "HEVC" in video_codec or "H.265" in video_codec: + info_parts.append("x265") + elif "AVC" in video_codec or "H.264" in video_codec: + info_parts.append("x264") + elif video_codec: + info_parts.append(video_codec) + + if meta.get("hdr") and meta["hdr"] != "SDR": + info_parts.append(meta["hdr"]) + + audio = await self._get_best_italian_audio_format(meta) + if audio: + info_parts.append(audio) + + if meta.get("audio_languages"): + langs = [ + self._get_italian_language_name(self._get_language_code(lang)) + for lang in meta["audio_languages"] + ] + langs = [lang for lang in langs if lang] + if "Italiano" in langs: + info_parts.append("Italiano") + elif "Inglese" in langs: + info_parts.append("Inglese") + elif langs: + info_parts.append(langs[0].title()) + + info_line = " ".join(info_parts) + + # Fetch TMDb data and format components + summary, logo_url = await self._fetch_tmdb_italian(meta) + screens = await self._format_screens_italian(meta) + synthetic_mi = await self._get_synthetic_mediainfo(meta) + + bbcode = self._build_bbcode( + title, info_line, logo_url, summary, screens, synthetic_mi, category, meta + ) + + custom_description_header = self.config.get("DEFAULT", {}).get( + "custom_description_header", "" + ) + if custom_description_header: + bbcode = bbcode.replace( + "[code]\n", f"[code]\n{custom_description_header}\n\n" + ) + + if not is_test: + desc_file = ( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + ) + async with aiofiles.open(desc_file, "w", encoding="utf-8") as f: + await f.write(bbcode) + + return {"description": bbcode} + + async def _fetch_tmdb_italian(self, meta): + """Fetch Italian overview and logo from TMDb API""" + api_key = self.config.get("DEFAULT", {}).get("tmdb_api", "N/A") + tmdb_id = meta.get("tmdb", "") + + summary = "Riassunto non disponibile." + logo_url = "" + + if not tmdb_id: + return summary, logo_url + + # Use /tv/ endpoint for series, /movie/ for films + category = meta.get("category", "MOVIE") + media_type = "tv" if category == "TV" else "movie" try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = { - 'name': each['attributes']['name'], - 'size': each['attributes']['size'] - } - dupes.append(result) + url = f"https://api.themoviedb.org/3/%7Bmedia_type%7D/%7Btmdb_id%7D" + params = {"api_key": api_key, "language": "it-IT"} + resp = await asyncio.to_thread( + requests.get, url, params=params, timeout=5, verify=certifi.where() + ) + resp.encoding = "utf-8" + + if resp.status_code == 200: + data = resp.json() + raw_summary = data.get("overview", "Riassunto non disponibile.") + summary = " ".join(raw_summary.split()) + + # Try meta logo first, then fetch from TMDb + logo_path = meta.get("tmdb_logo", "") + if logo_path: + logo_url = f"https://image.tmdb.org/t/p/w300/%7Blogo_path%7D" else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") + img_url = ( + f"https://api.themoviedb.org/3/%7Bmedia_type%7D/%7Btmdb_id%7D/images" + ) + img_resp = await asyncio.to_thread( + requests.get, + img_url, + params={"api_key": api_key}, + timeout=5, + verify=certifi.where(), + ) + if img_resp.status_code == 200: + img_data = img_resp.json() + logos = img_data.get("logos", []) + # Priority: Italian > English > any other > first available + logo_url = "" + fallback_logo = None + for logo in logos: + lang = logo.get("iso_639_1") + path = logo.get("file_path") + if lang == "it": + logo_url = f"https://image.tmdb.org/t/p/w300%7Bpath%7D" + break + elif lang == "en" and not logo_url: + logo_url = f"https://image.tmdb.org/t/p/w300%7Bpath%7D" + elif not fallback_logo: + fallback_logo = path + # Use fallback if no Italian/English found + if not logo_url and fallback_logo: + logo_url = f"https://image.tmdb.org/t/p/w300%7Bfallback_logo%7D" + except Exception as e: + print(f"[DEBUG] TMDb fetch error: {e}") + + return summary, logo_url + + async def _format_screens_italian(self, meta): + """Format up to 6 screenshots in 2-column grid with [img=350]""" + images = meta.get("image_list", []) + if not images: + return "[center]Nessuno screenshot disponibile[/center]" + + screens = [] + for img in images[:6]: + raw_url = img.get("raw_url", "") + web_url = img.get("web_url", raw_url) + if raw_url: + screens.append(f"[url={web_url}][img=350]{raw_url}[/img][/url]") + + if not screens: + return "[center]Nessuno screenshot disponibile[/center]" + + # 2 screenshots per row + row1 = ( + " ".join(screens[:2]) + " \n" + if len(screens) >= 2 + else " ".join(screens) + " \n" + ) + row2 = " ".join(screens[2:4]) + " \n" if len(screens) > 2 else "" + row3 = " ".join(screens[4:6]) + " \n" if len(screens) > 4 else "" + return f"[center]{row1}{row2}{row3}[/center]" + + async def _get_synthetic_mediainfo(self, meta): + """Extract formatted mediainfo from meta.json structure""" + + def safe_int(val, default=0): + """Convert to int, handling dict/None cases""" + try: + return default if isinstance(val, dict) else int(val) + except (ValueError, TypeError): + return default + + def get_audio_format_details(audio_track): + """Map raw audio formats to commercial names""" + fmt_map = { + "E-AC-3": ("DDP", "Dolby Digital Plus"), + "AC-3": ("DD", "Dolby Digital"), + "TrueHD": ("TrueHD", "Dolby TrueHD"), + "MLP FBA": ("TrueHD", "Dolby TrueHD"), + "DTS-HD MA": ("DTS-HD MA", "DTS-HD Master Audio"), + "AAC": ("AAC", "Advanced Audio Codec"), + } + + if not audio_track: + return "AAC", "AAC" + + fmt_raw = audio_track.get("Format", "AAC") + + # Detect Atmos in MLP FBA streams + if fmt_raw == "MLP FBA": + commercial = audio_track.get("Format_Commercial_IfAny", "") + if isinstance(commercial, str) and "atmos" in commercial.lower(): + return "TrueHD Atmos", "Dolby TrueHD with Atmos" + + return fmt_map.get(fmt_raw, (fmt_raw, fmt_raw)) + + try: + mi = meta.get("mediainfo", {}).get("media", {}) + tracks = mi.get("track", []) + + # Parse track types + general = next((t for t in tracks if t.get("@type") == "General"), {}) + video = next((t for t in tracks if t.get("@type") == "Video"), {}) + audio_tracks = [t for t in tracks if t.get("@type") == "Audio"] + text_tracks = [t for t in tracks if t.get("@type") == "Text"] + + # Prefer Italian audio, fallback to first track + ita_audio = next( + (t for t in audio_tracks if self._get_language_code(t) == "it"), None + ) + if not ita_audio and audio_tracks: + ita_audio = audio_tracks[0] + + # General info + filelist = meta.get("filelist", []) + fn = ( + os.path.basename(filelist[0]) + if filelist + else general.get("FileName", "file.mkv") + ) + size = f"{safe_int(general.get('FileSize', 0)) / (1024**3):.1f} GiB" + + dur_sec = float(general.get("Duration", 0)) + hours = safe_int(dur_sec // 3600) + minutes = safe_int((dur_sec % 3600) // 60) + dur = f"{hours} h {minutes} min" if hours > 0 else f"{minutes} min" + + total_br = ( + f"{safe_int(general.get('OverallBitRate', 0)) / 1000000:.1f} Mb/s" + ) + chap = "Si" if safe_int(general.get("MenuCount", 0)) > 0 else "No" + + # Video info + vid_format = video.get("Format", "N/A") + vid_format_upper = vid_format.upper() + if "HEVC" in vid_format_upper: + codec = "x265" + elif "AVC" in vid_format_upper or "H.264" in vid_format_upper: + codec = "x264" + elif "MPEG VIDEO" in vid_format_upper or "MPEG-2" in vid_format_upper: + codec = "MPEG-2" + elif "VC-1" in vid_format_upper or "VC1" in vid_format_upper: + codec = "VC-1" + else: + codec = vid_format # Fallback to format name + depth = f"{video.get('BitDepth', 10)} bits" + vid_br = f"{safe_int(video.get('BitRate', 0)) / 1000000:.1f} Mb/s" + res = meta.get("resolution", "N/A") + asp_decimal = video.get("DisplayAspectRatio") + asp_float = float(asp_decimal) if asp_decimal else 0.0 + if 1.77 <= asp_float <= 1.79: + asp = "16:9" + elif 1.32 <= asp_float <= 1.34: + asp = "4:3" + elif 2.35 <= asp_float <= 2.45: + asp = "2.39:1" + else: + asp = f"{asp_float:.2f}:1" if asp_float != 0.0 else "N/A" + + # Audio info + afmt = ita_audio.get("Format", "N/A") if ita_audio else "N/A" + + # Try commercial name from mediainfo, fallback to mapping + afmt_name = ( + ita_audio.get("Format_Commercial_IfAny", "") if ita_audio else "" + ) + if isinstance(afmt_name, dict) or not afmt_name: + afmt_name = ita_audio.get("Title", "") if ita_audio else "" + if isinstance(afmt_name, dict) or not afmt_name: + _, afmt_name = ( + get_audio_format_details(ita_audio) if ita_audio else ("", afmt) + ) + + # Map channel count to standard format + ch = ita_audio.get("Channels", "2") if ita_audio else "2" + if ch == "6": + ch = "5.1" + elif ch == "8": + ch = "7.1" + elif ch == "2": + ch = "2.0" + + aud_br = ( + f"{safe_int(ita_audio.get('BitRate', 0)) / 1000:.0f} kb/s" + if ita_audio + else "0 kb/s" + ) + if ita_audio: + audio_lang_code = self._get_language_code(ita_audio) + lang = ( + self._get_italian_language_name(audio_lang_code) + if audio_lang_code + else "Inglese" + ) + else: + lang = "Inglese" + + # Subtitle languages + if text_tracks: + sub_langs = set() + for t in text_tracks: + lang_code = self._get_language_code(t) + if lang_code: + lang_name = self._get_italian_language_name(lang_code) + if lang_name: + sub_langs.add(lang_name.title()) + subs = ", ".join(sorted(sub_langs)) if sub_langs else "Assenti" + else: + subs = "Assenti" + + return { + "fn": fn, + "size": size, + "dur": dur, + "total_br": total_br, + "chap": chap, + "vid_format": vid_format, + "codec": codec, + "depth": depth, + "vid_br": vid_br, + "res": res, + "asp": asp, + "aud_format": afmt, + "aud_name": afmt_name, + "ch": ch, + "aud_br": aud_br, + "lang": lang, + "subs": subs, + } except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) + print(f"[DEBUG] Mediainfo extraction error: {e}") + import traceback + + traceback.print_exc() + return None + + def _strip_bbcode(self, text): + """Remove BBCode tags from text, keeping only plain content""" + pattern = re.compile(r"\[/?[^\]]+\]") + return pattern.sub("", text).strip() + + def _build_bbcode( + self, title, info_line, logo_url, summary, screens, synthetic_mi, category, meta + ): + """Build ShareIsland BBCode template""" + if category == "TV": + is_pack = meta.get("tv_pack", 0) == 1 + category_header = ( + "--- SERIE TV (STAGIONE) ---" + if is_pack + else "--- SERIE TV (EPISODIO) ---" + ) + else: + category_header = "--- FILM ---" + release_group = meta.get("tag", "").lstrip("-").strip() + + tonemapped_text = "" + if meta.get("tonemapped", False): + tonemapped_header = self.config.get("DEFAULT", {}).get( + "tonemapped_header", "" + ) + if tonemapped_header: + tonemapped_text = self._strip_bbcode(tonemapped_header) + + if release_group.lower() == "island": + base_notes = "Questa è una release interna pubblicata in esclusiva su Shareisland.\nSi prega di non ricaricare questa release su tracker pubblici o privati. Si prega di mantenerla in seed il più a lungo possibile. Grazie!" + if tonemapped_text: + release_notes_section = f"""[size=13][b][color=#e8024b]--- RELEASE NOTES ---[/color][/b][/size] +[size=11][color=#FFFFFF]{base_notes} +{tonemapped_text}[/color][/size]""" + else: + release_notes_section = f"""[size=13][b][color=#e8024b]--- RELEASE NOTES ---[/color][/b][/size] +[size=11][color=#FFFFFF]{base_notes}[/color][/size]""" + else: + base_notes = "Nulla da aggiungere." + if tonemapped_text: + release_notes_section = f"""[size=13][b][color=#e8024b]--- RELEASE NOTES ---[/color][/b][/size] +[size=11][color=#FFFFFF]{tonemapped_text}[/color][/size]""" + else: + release_notes_section = f"""[size=13][b][color=#e8024b]--- RELEASE NOTES ---[/color][/b][/size] +[size=11][color=#FFFFFF]{base_notes}[/color][/size]""" + + pirate_shouts = [ + "The Scene never dies", + "Arrr! Powered by Rum & Bandwidth", + "Seed or walk the plank!", + "Released by Nobody — claimed by Everybody", + "From the depths of the digital seas", + "Where bits are free and rum flows endlessly", + "Pirates don't ask, they share", + "For the glory of the Scene!", + "Scene is the paradise", + ] + if not release_group or release_group.lower() in [ + "nogroup", + "nogrp", + "unknown", + "unk", + ]: + shoutouts = f"SHOUTOUTS : {random.choice(pirate_shouts)}" + else: + shoutouts = f"SHOUTOUTS : {release_group}" + logo_section = ( + f"[center][img=250]{logo_url}[/img][/center]\n" if logo_url else "" + ) + + # Build LINKS section + imdb_id = meta.get("imdb", "") + tmdb_id = meta.get("tmdb", "") + media_type = "tv" if category == "TV" else "movie" + + links_section = "" + if imdb_id or tmdb_id: + links_section = ( + "\n[size=13][b][color=#e8024b]--- LINKS ---[/color][/b][/size]\n" + ) + if imdb_id: + links_section += f"[size=11][color=#FFFFFF]IMDb: https://www.imdb.com/title/tt{imdb_id}/[/color][/size]\n" + if tmdb_id: + links_section += f"[size=11][color=#FFFFFF]TMDb: https://www.themoviedb.org/{media_type}/{tmdb_id}[/color][/size]\n" + links_section += "\n" + + ua_sig = meta.get("ua_signature", "Generated by Upload Assistant") + + # Mediainfo section + mediainfo_section = "" + if synthetic_mi: + mediainfo_section = f"""[size=13][b][color=#da8d49]INFO GENERALI[/color][/b][/size] +[size=11][color=#FFFFFF]Nome File : {synthetic_mi['fn']}[/color][/size] +[size=11][color=#FFFFFF]Dimensioni File : {synthetic_mi['size']}[/color][/size] +[size=11][color=#FFFFFF]Durata : {synthetic_mi['dur']}[/color][/size] +[size=11][color=#FFFFFF]Bitrate Totale : {synthetic_mi['total_br']}[/color][/size] +[size=11][color=#FFFFFF]Capitoli : {synthetic_mi['chap']}[/color][/size] + +[size=13][b][color=#da8d49]VIDEO[/color][/b][/size] +[size=11][color=#FFFFFF]Formato : {synthetic_mi['vid_format']}[/color][/size] +[size=11][color=#FFFFFF]Compressore : {synthetic_mi['codec']}[/color][/size] +[size=11][color=#FFFFFF]Profondità Bit : {synthetic_mi['depth']}[/color][/size] +[size=11][color=#FFFFFF]Bitrate : {synthetic_mi['vid_br']}[/color][/size] +[size=11][color=#FFFFFF]Risoluzione : {synthetic_mi['res']}[/color][/size] +[size=11][color=#FFFFFF]Rapporto : {synthetic_mi['asp']}[/color][/size] + +[size=13][b][color=#da8d49]AUDIO[/color][/b][/size] +[size=11][color=#FFFFFF]Formato : {synthetic_mi['aud_format']}[/color][/size] +[size=11][color=#FFFFFF]Nome : {synthetic_mi['aud_name']}[/color][/size] +[size=11][color=#FFFFFF]Canali : {synthetic_mi['ch']}[/color][/size] +[size=11][color=#FFFFFF]Bitrate : {synthetic_mi['aud_br']}[/color][/size] +[size=11][color=#FFFFFF]Lingua : {synthetic_mi['lang']}[/color][/size] + +[size=13][b][color=#da8d49]SOTTOTITOLI[/color][/b][/size] +[size=11][color=#FFFFFF]{synthetic_mi['subs']}[/color][/size] + +""" + + bbcode = f"""[code] +{logo_section}[center][size=13][b][color=#e8024b]{category_header}[/color][/b][/size][/center] +[center][size=13][b][color=#ffffff]{title}[/color][/b][/size][/center] +[center][size=13][color=#ffffff]{info_line}[/color][/size][/center] + +[center][size=13][b][color=#e8024b]--- RIASSUNTO ---[/color][/b][/size][/center] +{summary} + +[center][size=13][b][color=#e8024b]--- SCREENS ---[/color][/b][/size][/center] +{screens} +{links_section}{mediainfo_section}{release_notes_section} + +[size=13][b][color=#e8024b]--- SHOUTOUTS ---[/color][/b][/size] +[size=11][color=#FFFFFF]{shoutouts}[/color][/size] + +[size=13][color=#0592a3][size=16][b]BUON DOWNLOAD![/b][/size][/color][/size] + +[right][size=8]{ua_sig}[/size][/right] +[/code]""" - return dupes + return bbcode diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 816c1eb24..7be955cd6 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- import requests import asyncio @@ -8,13 +9,6 @@ class SN(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ def __init__(self, config): self.config = config self.tracker = 'SN' diff --git a/src/trackers/SP.py b/src/trackers/SP.py index de240ffc0..b65f9104a 100644 --- a/src/trackers/SP.py +++ b/src/trackers/SP.py @@ -1,64 +1,50 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import httpx import re import os from src.trackers.COMMON import COMMON from src.console import console +from src.trackers.UNIT3D import UNIT3D -class SP(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class SP(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='SP') self.config = config + self.common = COMMON(config) self.tracker = 'SP' self.source_flag = 'seedpool.org' - self.upload_url = 'https://seedpool.org/api/torrents/upload' - self.search_url = 'https://seedpool.org/api/torrents/filter' - self.torrent_url = 'https://seedpool.org/torrents/' - self.signature = None - self.banned_groups = [""] + self.base_url = 'https://seedpool.org/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - # Change from base: Requires the full meta dictionary to determine category - async def get_cat_id(self, meta): + async def get_category_id(self, meta): if not isinstance(meta, dict): raise TypeError('meta must be a dict when passed to Seedpool get_cat_id') category_name = meta.get('category', '').upper() release_title = meta.get('name', '') mal_id = meta.get('mal_id', 0) - tv_pack = meta.get('tv_pack', 0) # Custom SEEDPOOL category logic - # Anime - if mal_id != 0: - return '6' - - # Boxset - if tv_pack != 0: - return '13' + # Anime TV go in the Anime category + if mal_id != 0 and category_name == 'TV': + return {'category_id': '6'} # Sports if self.contains_sports_patterns(release_title): - return '8' + return {'category_id': '8'} # Default category logic category_id = { 'MOVIE': '1', 'TV': '2', }.get(category_name, '0') - return category_id + return {'category_id': category_id} # New function to check for sports releases in a title def contains_sports_patterns(self, release_title): @@ -74,7 +60,7 @@ def contains_sports_patterns(self, release_title): return True return False - async def get_type_id(self, type): + async def get_type_id(self, meta): type_id = { 'DISC': '1', 'REMUX': '2', @@ -83,110 +69,10 @@ async def get_type_id(self, type): 'HDTV': '6', 'ENCODE': '3', 'DVDRIP': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id + }.get(meta['type'], '0') + return {'type_id': type_id} - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta) - name = await self.edit_name(meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def edit_name(self, meta): + async def get_name(self, meta): KNOWN_EXTENSIONS = {".mkv", ".mp4", ".avi", ".ts"} if meta['scene'] is True: if meta.get('scene_name') != "": @@ -204,37 +90,5 @@ async def edit_name(self, meta): if ext.lower() in KNOWN_EXTENSIONS: name = base.replace(" ", ".") console.print(f"[cyan]Name: {name}") - return name - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - return dupes + return {'name': name} diff --git a/src/trackers/SPD.py b/src/trackers/SPD.py index 22c24e493..e90d87999 100644 --- a/src/trackers/SPD.py +++ b/src/trackers/SPD.py @@ -1,170 +1,336 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -from torf import Torrent -import requests -from src.console import console -from pprint import pprint +import aiofiles import base64 -import shutil -import os -import traceback +import glob import httpx -from src.trackers.COMMON import COMMON - +import os +import re +import unicodedata +from .COMMON import COMMON +from src.bbcode import BBCODE +from src.console import console +from src.get_desc import DescriptionBuilder +from src.languages import process_desc_language -class SPD(): +class SPD: def __init__(self, config): self.url = "https://speedapp.io/" self.config = config + self.common = COMMON(config) self.tracker = 'SPD' - self.source_flag = 'speedapp.io' - self.search_url = 'https://speedapp.io/api/torrent' + self.source_flag = "speedapp.io" self.upload_url = 'https://speedapp.io/api/upload' - self.forum_link = 'https://speedapp.io/support/wiki/rules' - self.banned_groups = [''] - pass + self.torrent_url = 'https://speedapp.io/browse/' + self.banned_groups = [] + self.banned_url = 'https://speedapp.io/api/torrent/release-group/blacklist' + self.session = httpx.AsyncClient(headers={ + 'User-Agent': "Upload Assistant", + 'accept': 'application/json', + 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'], + }, timeout=30.0) - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - type_id = "" - if meta['anime']: - type_id = '3' - elif meta['category'] == 'TV': - if meta['tv_pack']: - type_id = '41' - elif meta['sd'] and not meta['tv_pack']: - type_id = '45' - # must be hd - else: - type_id = '43' - else: - if meta['type'] != "DISC" and meta['resolution'] == "2160p": - type_id = '61' - else: - type_id = { - 'DISC': '17', - 'REMUX': '8', - 'WEBDL': '8', - 'WEBRIP': '8', - 'HDTV': '8', - 'SD': '10', - 'ENCODE': '8' - }.get(type, '0') - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() - bd_dump = None - screenshots = [] - if len(meta['image_list']) != 0: - for image in meta['image_list']: - screenshots.append(image['raw_url']) - data = { - 'name': meta['name'].replace("'", '').replace(': ', '.').replace(':', '.').replace(' ', '.').replace(' ', '.').replace('DD+', 'DDP'), - 'screenshots': screenshots, - 'release_info': f"[center][url={self.forum_link}]Please seed[/url][/center]", - 'media_info': mi_dump, - 'bd_info': bd_dump, - 'type': type_id, - 'url': f"https://www.imdb.com/title/tt%7Bmeta['imdb']}", - 'shortDescription': meta['genres'], - 'keywords': meta['keywords'], - 'releaseInfo': self.forum_link - } - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') as binary_file: - binary_file_data = binary_file.read() - base64_encoded_data = base64.b64encode(binary_file_data) - base64_message = base64_encoded_data.decode('utf-8') - data['file'] = base64_message + async def get_cat_id(self, meta): + if not meta.get('language_checked', False): + await process_desc_language(meta, desc=None, tracker=self.tracker) - headers = {'Authorization': 'Bearer ' + self.config['TRACKERS'][self.tracker]['api_key'].strip()} + langs = [lang.lower() for lang in meta.get('subtitle_languages', []) + meta.get('audio_languages', [])] + romanian = 'romanian' in langs - if meta['debug'] is False: - response = requests.request("POST", url=self.upload_url, json=data, headers=headers) - try: - if response.status_code == 200: - # response = {'status': True, 'error': False, 'downloadUrl': '/api/torrent/383435/download', 'torrent': {'id': 383435, 'name': 'name-with-full-stops', 'slug': 'name-with-dashs', 'category_id': 3}} - # downloading the torrent from site as it adds a tonne of different trackers and the source is different all the time. - try: - # torrent may not dl and may not provide error if machine is under load or network connection usage high. - if 'downloadUrl' in response.json(): - meta['tracker_status'][self.tracker]['status_message'] = response.json()['downloadUrl'] - with requests.get(url=self.url + response.json()['downloadUrl'], stream=True, headers=headers) as r: - # replacing L4g/torf created torrent so it will be added to the client. - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", - 'wb') as f: - shutil.copyfileobj(r.raw, f) - # adding as comment link to torrent - if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent"): - new_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent") - new_torrent.metainfo['comment'] = f"{self.url}/browse/{response.json()['torrent']['id']}" - Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", overwrite=True) - else: - console.print("[bold red]No downloadUrl in response.") - console.print("[bold red]Confirm it uploaded correctly and try to download manually") - console.print({response.json()}) - except Exception: - console.print(traceback.print_exc()) - console.print("[red]Unable to Download torrent, try manually") - console.print({response.json()}) - else: - console.print(f"[bold red]Failed to upload got status code: {response.status_code}") - except Exception: - console.print(traceback.print_exc()) - console.print("[yellow]Unable to Download torrent, try manually") - return + if 'RO' in meta.get('origin_country', []): + if meta.get('category') == 'TV': + return '60' + elif meta.get('category') == 'MOVIE': + return '59' + + # documentary + if 'documentary' in meta.get("genres", "").lower() or 'documentary' in meta.get("keywords", "").lower(): + return '63' if romanian else '9' + + # anime + if meta.get('anime'): + return '3' + + # TV + if meta.get('category') == 'TV': + if meta.get('tv_pack'): + return '66' if romanian else '41' + elif meta.get('sd'): + return '46' if romanian else '45' + return '44' if romanian else '43' + + # MOVIE + if meta.get('category') == 'MOVIE': + if meta.get('resolution') == '2160p' and meta.get('type') != 'DISC': + return '57' if romanian else '61' + if meta.get('type') in ('REMUX', 'WEBDL', 'WEBRIP', 'HDTV', 'ENCODE'): + return '29' if romanian else '8' + if meta.get('type') == 'DISC': + return '24' if romanian else '17' + if meta.get('type') == 'SD': + return '35' if romanian else '10' + + return None + + async def get_file_info(self, meta): + base_path = f"{meta['base_dir']}/tmp/{meta['uuid']}" + + if meta.get('bdinfo'): + bd_info = open(f"{base_path}/BD_SUMMARY_00.txt", encoding='utf-8').read() + return None, bd_info else: - console.print("[cyan]Request Data:") - pprint(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - 'FANRES': '3' - }.get(category_name, '0') - return category_id + media_info = open(f"{base_path}/MEDIAINFO_CLEANPATH.txt", encoding='utf-8').read() + return media_info, None + + async def get_screenshots(self, meta): + urls = [] + for image in meta.get('menu_images', []) + meta.get('image_list', []): + if image.get('raw_url'): + urls.append(image['raw_url']) + + return urls async def search_existing(self, meta, disctype): - dupes = [] - headers = { - 'accept': 'application/json', - 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - } + results = [] + search_url = 'https://speedapp.io/api/torrent' + + params = {} + if meta.get('imdb_id', 0) != 0: + params['imdbId'] = f"{meta.get('imdb_info', {}).get('imdbID', '')}" + else: + search_title = meta['title'].replace(':', '').replace("'", '').replace(',', '') + params['search'] = search_title + + try: + response = await self.session.get(url=search_url, params=params, headers=self.session.headers) + + if response.status_code == 200: + data = response.json() + for each in data: + name = each.get('name') + size = each.get('size') + link = f'{self.torrent_url}{each.get("id")}/' + + if name: + results.append({ + 'name': name, + 'size': size, + 'link': link + }) + return results + else: + console.print(f'[bold red]HTTP request failed. Status: {response.status_code}') + + except Exception as e: + console.print(f'[bold red]Unexpected error: {e}') + console.print_exception() + + return results + + async def search_channel(self, meta): + spd_channel = meta.get('spd_channel', '') or self.config['TRACKERS'][self.tracker].get('channel', '') + + # if no channel is specified, use the default + if not spd_channel: + return 1 + + # return the channel as int if it's already an integer + if isinstance(spd_channel, int): + return spd_channel + + # if user enters id as a string number + if isinstance(spd_channel, str): + if spd_channel.isdigit(): + return int(spd_channel) + # if user enter tag then it will use API to search + else: + pass params = { - 'includingDead': '1' + 'search': spd_channel } - if meta['imdb_id'] != 0: - params['imdbId'] = meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id'] - else: - params['search'] = meta['title'].replace(':', '').replace("'", '').replace(",", '') - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params, headers=headers) - if response.status_code == 200: - data = response.json() - for each in data: - result = [each][0]['name'] - dupes.append(result) + response = await self.session.get(url=self.url + '/api/channel', params=params, headers=self.session.headers) + + if response.status_code == 200: + data = response.json() + for entry in data: + id = entry['id'] + tag = entry['tag'] + + if id and tag: + if tag != spd_channel: + console.print(f'[{self.tracker}]: Unable to find a matching channel based on your input. Please check if you entered it correctly.') + return + else: + return id + else: + console.print(f'[{self.tracker}]: Could not find the channel ID. Please check if you entered it correctly.') + else: console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out while searching for existing torrents.") - except httpx.RequestError as e: - console.print(f"[bold red]An error occurred while making the request: {e}") except Exception as e: console.print(f"[bold red]Unexpected error: {e}") console.print_exception() - await asyncio.sleep(5) - return dupes + async def edit_desc(self, meta): + builder = DescriptionBuilder(self.config) + desc_parts = [] + + user_description = await builder.get_user_description(meta) + title, episode_image, episode_overview = await builder.get_tv_info(meta, self.tracker, resize=True) + if user_description or episode_overview: # Avoid unnecessary descriptions + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo_resize_url = meta.get('tmdb_logo', '') + if logo_resize_url: + desc_parts.append(f"[center][img]https://image.tmdb.org/t/p/w300/{logo_resize_url}[/img][/center]") + + # TV + if episode_overview: + desc_parts.append(f'[center]{title}[/center]') + + if episode_image: + desc_parts.append(f"[center][img]{episode_image}[/img][/center]") + + desc_parts.append(f'[center]{episode_overview}[/center]') + + # User description + desc_parts.append(user_description) + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Signature + desc_parts.append(f"[url=https://github.com/Audionut/Upload-Assistant]{meta['ua_signature']}[/url]") + + description = '\n\n'.join(part for part in desc_parts if part.strip()) + + bbcode = BBCODE() + description = bbcode.remove_img_resize(description) + description = bbcode.convert_named_spoiler_to_normal_spoiler(description) + description = bbcode.remove_extra_lines(description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description + + async def edit_name(self, meta): + torrent_name = meta['name'] + + name = torrent_name.replace(':', ' -') + name = unicodedata.normalize("NFKD", name) + name = name.encode("ascii", "ignore").decode("ascii") + name = re.sub(r'[\\/*?"<>|]', '', name) + + return re.sub(r"\s{2,}", " ", name) + + async def encode_to_base64(self, file_path): + with open(file_path, 'rb') as binary_file: + binary_file_data = binary_file.read() + base64_encoded_data = base64.b64encode(binary_file_data) + return base64_encoded_data.decode('utf-8') + + async def get_nfo(self, meta): + nfo_dir = os.path.join(meta['base_dir'], "tmp", meta['uuid']) + nfo_files = glob.glob(os.path.join(nfo_dir, "*.nfo")) + + if nfo_files: + nfo = await self.encode_to_base64(nfo_files[0]) + return nfo + + return None + + async def fetch_data(self, meta): + media_info, bd_info = await self.get_file_info(meta) + + data = { + 'bdInfo': bd_info, + 'coverPhotoUrl': meta.get('backdrop', ''), + 'description': meta.get('genres', ''), + 'media_info': media_info, + 'name': await self.edit_name(meta), + 'nfo': await self.get_nfo(meta), + 'plot': meta.get('overview_meta', '') or meta.get('overview', ''), + 'poster': meta.get('poster', ''), + 'technicalDetails': await self.edit_desc(meta), + 'screenshots': await self.get_screenshots(meta), + 'type': await self.get_cat_id(meta), + 'url': str(meta.get('imdb_info', {}).get('imdb_url', '')), + } + + await self.common.edit_torrent(meta, self.tracker, self.source_flag) + data['file'] = await self.encode_to_base64(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent") + if meta['debug'] is True: + data['file'] = data['file'][:50] + '...[DEBUG MODE]' + data['nfo'] = data['nfo'][:50] + '...[DEBUG MODE]' + + return data + + async def upload(self, meta, disctype): + data = await self.fetch_data(meta) + + channel = await self.search_channel(meta) + if channel is None: + meta['skipping'] = f"{self.tracker}" + return + channel = str(channel) + data['channel'] = channel + + status_message = '' + torrent_id = '' + + if meta['debug'] is False: + try: + response = await self.session.post(url=self.upload_url, json=data, headers=self.session.headers) + response.raise_for_status() + response = response.json() + if response.get('status') is True and response.get('error') is False: + status_message = "Torrent uploaded successfully." + + if 'downloadUrl' in response: + torrent_id = str(response.get('torrent', {}).get('id', '')) + if torrent_id: + meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id + + download_url = f"{self.url}/api/torrent/{torrent_id}/download" + await self.common.add_tracker_torrent( + meta, + tracker=self.tracker, + source_flag=None, + new_tracker=None, + comment=None, + headers={'Authorization': self.config['TRACKERS'][self.tracker]['api_key']}, + downurl=download_url + ) + + else: + console.print("[bold red]No downloadUrl in response.") + console.print("[bold red]Confirm it uploaded correctly and try to download manually") + console.print(response) + + else: + status_message = f'data error: {response}' + + except httpx.HTTPStatusError as e: + status_message = f'data error: HTTP {e.response.status_code} - {e.response.text}' + except httpx.TimeoutException: + status_message = f'data error: Request timed out after {self.session.timeout.write} seconds' + except httpx.RequestError as e: + status_message = f'data error: Unable to upload. Error: {e}.\nResponse: {response}' + except Exception as e: + status_message = f'data error: It may have uploaded, go check. Error: {e}.\nResponse: {response}' + return + + else: + console.print(data) + status_message = "Debug mode enabled, not uploading." + + meta['tracker_status'][self.tracker]['status_message'] = status_message diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 8913ac008..cb9cd46e5 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -1,131 +1,25 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -import asyncio -import requests -import platform -import os -import glob -import httpx from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class STC(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class STC(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='STC') self.config = config + self.common = COMMON(config) self.tracker = 'STC' self.source_flag = 'STC' - self.upload_url = 'https://skipthecommericals.xyz/api/torrents/upload' - self.search_url = 'https://skipthecommericals.xyz/api/torrents/filter' - self.torrent_url = 'https://skipthecommericals.xyz/torrents/' - self.signature = '\n[center][url=https://skipthecommericals.xyz/pages/1]Please Seed[/url][/center]' + self.base_url = 'https://skipthecommericals.xyz/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [""] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")) - resolution_id = await self.get_res_id(meta['resolution']) - stc_name = await self.edit_name(meta) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': stc_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://skipthecommericals.xyz/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def edit_name(self, meta): - stc_name = meta.get('name') - return stc_name - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type, tv_pack, sd, category): + async def get_type_id(self, meta): type_id = { 'DISC': '1', 'REMUX': '2', @@ -133,73 +27,23 @@ async def get_type_id(self, type, tv_pack, sd, category): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') - if tv_pack == 1: - if sd == 1: + }.get(meta.get('type'), '0') + if meta.get('tv_pack'): + if meta.get('sd'): # Season SD type_id = '14' - if type == "ENCODE": + if meta.get('type') == "ENCODE": type_id = '18' - if sd == 0: + if meta.get('sd'): # Season HD type_id = '13' - if type == "ENCODE": + if meta.get('type') == "ENCODE": type_id = '18' - if type == "DISC" and category == "TV": - if sd == 1: + if meta.get('type') == "DISC" and meta.get('category') == "TV": + if meta.get('sd') == 1: # SD-RETAIL type_id = '17' - if sd == 0: + if meta.get('sd') == 0: # HD-RETAIL type_id = '18' - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = f"{meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] + meta['edition'] - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + return {'type_id': type_id} diff --git a/src/trackers/THR.py b/src/trackers/THR.py index f0906b1e7..2b980e550 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- import asyncio import requests @@ -10,6 +11,7 @@ import httpx from bs4 import BeautifulSoup from unidecode import unidecode +from src.bbcode import BBCODE from src.console import console from src.trackers.COMMON import COMMON @@ -74,11 +76,11 @@ async def upload(self, meta, disctype): 'name': thr_name, 'descr': desc, 'type': cat_id, - 'url': f"https://www.imdb.com/title/tt%7Bmeta.get('imdb')}/", + 'url': str(meta.get('imdb_info', {}).get('imdb_url', '') + '/'), 'tube': meta.get('youtube', '') } headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.3 ({platform.system()} {platform.release()})' } # If pronfo fails, put mediainfo into THR parser if meta.get('is_disc', '') != 'BDMV': @@ -170,6 +172,7 @@ def get_subtitles(self, meta): for track in mi['media']['track']: if track['@type'] == "Text": language = track.get('Language') + language = language.split('-')[0] if language else language if language in ['hr', 'en', 'bs', 'sr', 'sl']: if language not in sub_langs: sub_langs.append(language) @@ -191,6 +194,7 @@ def get_subtitles(self, meta): async def edit_desc(self, meta): pronfo = False + bbcode = BBCODE() base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: if meta['tag'] == "": @@ -211,11 +215,27 @@ async def edit_desc(self, meta): desc.write(f"Category: {meta['category']}\n") desc.write(f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}\n") if meta['imdb_id'] != 0: - desc.write(f"IMDb: https://www.imdb.com/title/tt{meta['imdb']}\n") + desc.write(f"IMDb: {str(meta.get('imdb_info', {}).get('imdb_url', ''))}\n") if meta['tvdb_id'] != 0: desc.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") + if meta['tvmaze_id'] != 0: + desc.write(f"TVMaze: https://www.tvmaze.com/shows/{meta['tvmaze_id']}\n") + if meta['mal_id'] != 0: + desc.write(f"MAL: https://myanimelist.net/anime/{meta['mal_id']}\n") desc.write("[/quote]") - desc.write(base) + + if base: + # replace unsupported bbcode tags + base = bbcode.convert_named_spoiler_to_named_hide(base) + base = bbcode.convert_spoiler_to_hide(base) + base = bbcode.convert_code_to_pre(base) + # fix alignment for NFO content inherited from centering the spoiler + base = re.sub(r'(?P\[hide=(Scene|FraMeSToR) NFO:\]\[pre\])(?P.*?)(?P\[/pre\]\[/hide\])', + r'\g[align=left]\g[/align]\g', + base, + flags=re.DOTALL) + desc.write("\n\n" + base) + # REHOST IMAGES os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") image_patterns = ["*.png", ".[!.]*.png"] @@ -259,7 +279,7 @@ async def edit_desc(self, meta): with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt") as bd_file: desc.write(f"[nfo]{bd_file.read()}[/nfo]") bd_file.close() - else: + elif self.config['TRACKERS']['THR'].get('pronfo_api_key'): # ProNFO pronfo_url = f"https://www.pronfo.com/api/v1/access/upload/%7Bself.config['TRACKERS']['THR'].get('pronfo_api_key', '')}" data = { @@ -287,7 +307,7 @@ async def edit_desc(self, meta): # full_mi = mi_file.read() # desc.write(f"[/align]\n[hide=FULL MEDIAINFO]{full_mi}[/hide][align=center]") # mi_file.close() - desc.write("\n\n[size=2][url=https://www.torrenthr.org/forums.php?action=viewtopic&topicid=8977]Created by L4G's Upload Assistant[/url][/size][/align]") + desc.write(f"\n\n[size=2][url=https://www.torrenthr.org/forums.php?action=viewtopic&topicid=8977]{meta['ua_signature']}[/url][/size][/align]") desc.close() return pronfo diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index 3bd11549b..8d43851b5 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -1,152 +1,50 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests +import aiofiles +import click import os import re -import platform -import sys -import cli_ui import urllib.request -import click -import httpx -from src.trackers.COMMON import COMMON from src.console import console +from src.get_desc import DescriptionBuilder +from src.trackers.UNIT3D import UNIT3D from src.uploadscreens import upload_screens -class TIK(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class TIK(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='TIK') self.config = config self.tracker = 'TIK' self.source_flag = 'TIK' - self.search_url = 'https://cinematik.net/api/torrents/filter' - self.upload_url = 'https://cinematik.net/api/torrents/upload' - self.torrent_url = 'https://cinematik.net/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.base_url = 'https://cinematik.net/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - cat_id = await self.get_cat_id(meta['category'], meta.get('foreign'), meta.get('opera'), meta.get('asian')) - type_id = await self.get_type_id(disctype) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 + async def get_additional_checks(self, meta): + should_continue = True if not meta['is_disc']: console.print("[red]Only disc-based content allowed at TIK") - return - elif meta['bdinfo'] is not None: - mi_dump = None - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as bd_file: - bd_dump = bd_file.read() - else: - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as mi_file: - mi_dump = mi_file.read() - bd_dump = None - - if meta.get('desclink'): - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() - print(f"Custom Description Link: {desc}") + return False - elif meta.get('descfile'): - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() - print(f"Custom Description File Path: {desc}") + return should_continue - else: - await self.edit_desc(meta) - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() - - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} + async def get_additional_data(self, meta): data = { - 'name': await self.get_name(meta, disctype), - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': 0, - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if self.config['TRACKERS'][self.tracker].get('personal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('personal_group', [])): - data['personal_release'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'modq': await self.get_flag(meta, 'modq'), } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://cinematik.net/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - def get_basename(self, meta): - path = next(iter(meta['filelist']), meta['path']) - return os.path.basename(path) + return data - async def get_name(self, meta, disctype): + async def get_name(self, meta): disctype = meta.get('disctype', None) - basename = self.get_basename(meta) + basename = os.path.basename(next(iter(meta['filelist']), meta['path'])) type = meta.get('type', "") title = meta.get('title', "").replace('AKA', '/').strip() alt_title = meta.get('aka', "").replace('AKA', '/').strip() @@ -175,12 +73,7 @@ async def get_name(self, meta, disctype): search_year = meta.get('search_year', "") if not str(search_year).strip(): search_year = year - - category_name = meta.get('category', "") - foreign = meta.get('foreign') - opera = meta.get('opera') - asian = meta.get('asian') - meta['category_id'] = await self.get_cat_id(category_name, foreign, opera, asian) + meta['category_id'] = (await self.get_category_id(meta))['category_id'] name = "" alt_title_part = f" {alt_title}" if alt_title else "" @@ -196,17 +89,13 @@ async def get_name(self, meta, disctype): if meta['is_disc'] == 'DVD': name = f"{title}{alt_title_part} ({search_year}) {season} {source} {dvd_size}" - # User confirmation - console.print(f"[yellow]Final generated name: [greee]{name}") - confirmation = cli_ui.ask_yes_no("Do you want to use this name?", default=False) # Default is 'No' - - if confirmation: - return name - else: - console.print("[red]Sorry, this seems to be an edge case, please report at (insert_link)") - sys.exit(1) + return {'name': name} - async def get_cat_id(self, category_name, foreign, opera, asian): + async def get_category_id(self, meta): + category_name = meta['category'] + foreign = meta.get('foreign', False) + opera = meta.get('opera', False) + asian = meta.get('asian', False) category_id = { 'FILM': '1', 'TV': '2', @@ -233,9 +122,10 @@ async def get_cat_id(self, category_name, foreign, opera, asian): else: category_id = '2' - return category_id + return {'category_id': category_id} - async def get_type_id(self, disctype): + async def get_type_id(self, meta): + disctype = meta.get('disctype', None) type_id_map = { 'Custom': '1', 'BD100': '3', @@ -251,14 +141,15 @@ async def get_type_id(self, disctype): if not disctype: console.print("[red]You must specify a --disctype") - return None + # Raise an exception since we can't proceed without disctype + raise ValueError("disctype is required for TIK tracker but was not provided") disctype_value = disctype[0] if isinstance(disctype, list) else disctype type_id = type_id_map.get(disctype_value, '1') # '1' is the default fallback - return type_id + return {'type_id': type_id} - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta): resolution_id = { 'Other': '10', '4320p': '1', @@ -271,17 +162,16 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') - return resolution_id + }.get(meta['resolution'], '10') + return {'resolution_id': resolution_id} - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 + async def get_description(self, meta): + if meta.get('description_link') or meta.get('description_file'): + desc = await DescriptionBuilder(self.config).unit3d_edit_desc(meta, self.tracker, comparison=True) - return 1 if meta.get(flag_name, False) else 0 + print(f'Custom Description Link/File Path: {desc}') + return {'description': desc} - async def edit_desc(self, meta): if len(meta.get('discs', [])) > 0: summary = meta['discs'][0].get('summary', '') else: @@ -326,7 +216,7 @@ async def edit_desc(self, meta): if os.path.exists(poster_path): try: console.print("Uploading standard poster to image host....") - new_poster_url, _ = upload_screens(meta, 1, 1, 0, 1, [poster_path], {}) + new_poster_url, _ = await upload_screens(meta, 1, 1, 0, 1, [poster_path], {}) # Ensure that the new poster URL is assigned only once if len(new_poster_url) > 0: @@ -342,7 +232,7 @@ async def edit_desc(self, meta): images = meta['image_list'] discs = meta.get('discs', []) # noqa #F841 - if len(images) >= 4: + if len(images) >= 6: image_link_1 = images[0]['raw_url'] image_link_2 = images[1]['raw_url'] image_link_3 = images[2]['raw_url'] @@ -381,7 +271,7 @@ async def edit_desc(self, meta): desc_text.append("[code]\n") if meta['is_disc'] == 'BDMV': desc_text.append(f" Disc Label.........:{meta.get('bdinfo', {}).get('label', '')}\n") - desc_text.append(f" IMDb...............: [url]https://www.imdb.com/title/tt{meta.get('imdb_id')}{meta.get('imdb_rating', '')}[/url]\n") + desc_text.append(f" IMDb...............: [url]{str(meta.get('imdb_info', {}).get('imdb_url', ''))}{str(meta.get('imdb_rating', ''))}[/url]\n") desc_text.append(f" Year...............: {meta.get('year', '')}\n") desc_text.append(f" Country............: {country_name}\n") if meta['is_disc'] == 'BDMV': @@ -436,9 +326,8 @@ async def edit_desc(self, meta): if meta['is_disc'] == 'BDMV': video_info = meta.get('bdinfo', {}).get('video', []) - video_codec = video_info[0].get('codec', 'Unknown') - video_bitrate = video_info[0].get('bitrate', 'Unknown') - desc_text.append(f" Video Format.......: {video_codec} / {video_bitrate}\n") + video_resolution = video_info[0].get('resolution', 'Unknown') + desc_text.append(f" Video Format.......: {video_resolution}\n") else: desc_text.append(f" DVD Format.........: {meta.get('source', 'Unknown')}\n") desc_text.append(" Film Aspect Ratio..: [color=red]The actual aspect ratio of the content, not including the black bars[/color]\n") @@ -496,8 +385,10 @@ async def edit_desc(self, meta): console.print("[green]Keeping the original description.[/green]") # Write the final description to the file - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc_file: - desc_file.write(description) + async with aiofiles.open(f'{meta["base_dir"]}/tmp/{meta["uuid"]}/[{self.tracker}]DESCRIPTION.txt', 'w', encoding='utf-8') as desc_file: + await desc_file.write(description) + + return {'description': description} def parse_subtitles(self, disc_mi): unique_subtitles = set() # Store unique subtitle strings @@ -560,36 +451,3 @@ def country_code_to_name(self, code): 'YEM': 'Yemen', 'ZMB': 'Zambia', 'ZWE': 'Zimbabwe' } return country_mapping.get(code.upper(), 'Unknown Country') - - async def search_existing(self, meta, disctype): - dupes = [] - disctype = meta.get('disctype', None) - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('foreign'), meta.get('opera'), meta.get('asian')), - 'types[]': await self.get_type_id(disctype), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/TL.py b/src/trackers/TL.py index 224528ca8..2470d09eb 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -1,211 +1,255 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord +import aiofiles import httpx import os import re import platform -from src.trackers.COMMON import COMMON +from src.bbcode import BBCODE from src.console import console -from pymediainfo import MediaInfo - - -class TL(): - CATEGORIES = { - 'Anime': 34, - 'Movie4K': 47, - 'MovieBluray': 13, - 'MovieBlurayRip': 14, - 'MovieCam': 8, - 'MovieTS': 9, - 'MovieDocumentary': 29, - 'MovieDvd': 12, - 'MovieDvdRip': 11, - 'MovieForeign': 36, - 'MovieHdRip': 43, - 'MovieWebrip': 37, - 'TvBoxsets': 27, - 'TvEpisodes': 26, - 'TvEpisodesHd': 32, - 'TvForeign': 44 - } +from src.get_desc import DescriptionBuilder +from src.trackers.COMMON import COMMON + +class TL: def __init__(self, config): self.config = config + self.common = COMMON(config) self.tracker = 'TL' self.source_flag = 'TorrentLeech.org' self.base_url = 'https://www.torrentleech.org/' self.http_upload_url = f'{self.base_url}/torrents/upload/' self.api_upload_url = f'{self.base_url}/torrents/upload/apiupload' - self.signature = """
Created by Audionut's Upload Assistant
""" - self.banned_groups = [""] + self.torrent_url = f'{self.base_url}/torrent/' + self.banned_groups = [] self.session = httpx.AsyncClient(timeout=60.0) - self.api_upload = self.config['TRACKERS'][self.tracker].get('api_upload') - self.announce_key = self.config['TRACKERS'][self.tracker]['announce_key'] - self.config['TRACKERS'][self.tracker]['announce_url'] = f"https://tracker.torrentleech.org/a/%7Bself.announce_key%7D/announce" + self.tracker_config = self.config['TRACKERS'][self.tracker] + self.api_upload = self.tracker_config.get('api_upload', False) + self.passkey = self.tracker_config.get('passkey') + self.announce_list = [ + f'https://tracker.torrentleech.org/a/%7Bself.passkey%7D/announce', + f'https://tracker.tleechreload.org/a/%7Bself.passkey%7D/announce' + ] self.session.headers.update({ - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant ({platform.system()} {platform.release()})' }) - async def login(self, meta): - if self.api_upload: + async def login(self, meta, force=False): + if self.api_upload and not force: return True - self.cookies_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/TL.txt") + cookies_file = os.path.abspath(f"{meta['base_dir']}/data/cookies/TL.txt") - cookie_path = os.path.abspath(self.cookies_file) + cookie_path = os.path.abspath(cookies_file) if not os.path.exists(cookie_path): console.print(f"[bold red]'{self.tracker}' Cookies not found at: {cookie_path}[/bold red]") return False - common = COMMON(config=self.config) - self.session.cookies.update(await common.parseCookieFile(self.cookies_file)) + self.session.cookies.update(await self.common.parseCookieFile(cookies_file)) try: - response = await self.session.get(self.http_upload_url, timeout=10) - if response.status_code == 200 and 'torrents/upload' in str(response.url): - return True + if force: + response = await self.session.get('https://www.torrentleech.org/torrents/browse/index', timeout=10) + if response.status_code == 301 and 'torrents/browse' in str(response.url): + if meta['debug']: + console.print(f"[bold green]Logged in to '{self.tracker}' with cookies.[/bold green]") + return True + elif not force: + response = await self.session.get(self.http_upload_url, timeout=10) + if response.status_code == 200 and 'torrents/upload' in str(response.url): + if meta['debug']: + console.print(f"[bold green]Logged in to '{self.tracker}' with cookies.[/bold green]") + return True + else: + console.print(f"[bold red]Login to '{self.tracker}' with cookies failed. Please check your cookies.[/bold red]") + return False except httpx.RequestError as e: console.print(f"[bold red]Error while validating credentials for '{self.tracker}': {e}[/bold red]") return False async def generate_description(self, meta): - base_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt" - self.final_desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - - description_parts = [] - - # MediaInfo/BDInfo - tech_info = "" - if meta.get('is_disc') != 'BDMV': - video_file = meta['filelist'][0] - mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") - if os.path.exists(mi_template): - try: - media_info = MediaInfo.parse(video_file, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) - tech_info = str(media_info) - except Exception: - console.print("[bold red]Couldn't find the MediaInfo template[/bold red]") - mi_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" - if os.path.exists(mi_file_path): - with open(mi_file_path, 'r', encoding='utf-8') as f: - tech_info = f.read() - else: - console.print("[bold yellow]Using normal MediaInfo for the description.[/bold yellow]") - mi_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" - if os.path.exists(mi_file_path): - with open(mi_file_path, 'r', encoding='utf-8') as f: - tech_info = f.read() - else: - bd_summary_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" - if os.path.exists(bd_summary_file): - with open(bd_summary_file, 'r', encoding='utf-8') as f: - tech_info = f.read() - - if tech_info: - description_parts.append(tech_info) - - if os.path.exists(base_desc_path): - with open(base_desc_path, 'r', encoding='utf-8') as f: - manual_desc = f.read() - description_parts.append(manual_desc) - - # Add screenshots to description only if it is an anonymous upload as TL does not support anonymous upload in the screenshots section - if meta.get('anon', False) or self.api_upload: - images = meta.get('image_list', []) - - screenshots_block = "
Screenshots\n\n" - for image in images: - img_url = image['img_url'] - web_url = image['web_url'] - screenshots_block += f""" """ - screenshots_block += "\n
" - - description_parts.append(screenshots_block) + builder = DescriptionBuilder(self.config) + desc_parts = [] + process_screenshot = not self.tracker_config.get("img_rehost", True) or self.tracker_config.get("api_upload", True) + + # Custom Header + desc_parts.append(await builder.get_custom_header(self.tracker)) + + # Logo + logo, logo_size = await builder.get_logo_section(meta, self.tracker) + if logo and logo_size: + desc_parts.append(f"""
""") + + # TV + title, episode_image, episode_overview = await builder.get_tv_info(meta, self.tracker) + if episode_overview: + desc_parts.append(f'[center]{title}[/center]') + + if episode_image: + desc_parts.append(f"[center][/center]") + + desc_parts.append(f'[center]{episode_overview}[/center]') + + # File information + desc_parts.append(await builder.get_mediainfo_section(meta, self.tracker)) + desc_parts.append(await builder.get_bdinfo_section(meta)) + + # NFO + if meta.get('description_nfo_content', ''): + desc_parts.append(f"
{meta.get('description_nfo_content')}
") + + # User description + desc_parts.append(await builder.get_user_description(meta)) + + # Menus Screenshots + if process_screenshot: + # Disc menus screenshots header + menu_images = meta.get("menu_images", []) + if menu_images: + desc_parts.append(await builder.menu_screenshot_header(meta, self.tracker)) + + # Disc menus screenshots + menu_screenshots_block = "" + for i, image in enumerate(menu_images): + menu_img_url = image.get("img_url") + menu_web_url = image.get("web_url") + if menu_img_url and menu_web_url: + menu_screenshots_block += f""" """ + if (i + 1) % 2 == 0: + menu_screenshots_block += "

" + if menu_screenshots_block: + desc_parts.append("
" + menu_screenshots_block + "
") + + # Tonemapped Header + desc_parts.append(await builder.get_tonemapped_header(meta, self.tracker)) + + # Screenshots Section + if process_screenshot: + images = meta.get("image_list", []) + if images: + # Screenshot Header + desc_parts.append(await builder.screenshot_header(self.tracker)) + # Screenshots + screenshots_block = "" + for i, image in enumerate(images): + img_url = image.get("img_url") + web_url = image.get("web_url") + if img_url and web_url: + screenshots_block += ( + f""" """ + ) + if (i + 1) % 2 == 0: + screenshots_block += "

" + if screenshots_block: + desc_parts.append("
" + screenshots_block + "
") + + # Signature + desc_parts.append( + f"""""" + ) + + description = '\n\n'.join(part for part in desc_parts if part.strip()) - if self.signature: - description_parts.append(self.signature) - - final_description = "\n\n".join(filter(None, description_parts)) - from src.bbcode import BBCODE bbcode = BBCODE() - desc = final_description - desc = desc.replace("[center]", "
").replace("[/center]", "
") - desc = re.sub(r'\[spoiler=.*?\]', '[spoiler]', desc, flags=re.IGNORECASE) - desc = re.sub(r'\[\*\]', '\n[*]', desc, flags=re.IGNORECASE) - desc = re.sub(r'\[list=.*?\]', '[list]', desc, flags=re.IGNORECASE) - desc = re.sub(r'\[c\](.*?)\[/c\]', r'[code]\1[/code]', desc, flags=re.IGNORECASE | re.DOTALL) - desc = re.sub(r'\[hr\]', '---', desc, flags=re.IGNORECASE) - desc = re.sub(r'\[img=[\d"x]+\]', '[img]', desc, flags=re.IGNORECASE) - desc = bbcode.convert_comparison_to_centered(desc, 1000) - - with open(self.final_desc_path, 'w', encoding='utf-8') as f: - f.write(desc) - - return desc - - async def get_cat_id(self, common, meta): + description = description.replace("[center]", "
").replace("[/center]", "
") + description = re.sub(r'\[\*\]', '\n[*]', description, flags=re.IGNORECASE) + description = re.sub(r'\[c\](.*?)\[/c\]', r'[code]\1[/code]', description, flags=re.IGNORECASE | re.DOTALL) + description = re.sub(r'\[hr\]', '---', description, flags=re.IGNORECASE) + description = re.sub(r'\[img=[\d"x]+\]', '[img]', description, flags=re.IGNORECASE) + description = description.replace('[*] ', '• ').replace('[*]', '• ') + description = bbcode.remove_list(description) + description = bbcode.convert_comparison_to_centered(description, 1000) + description = bbcode.remove_spoiler(description) + description = re.sub(r'\n{3,}', '\n\n', description) + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as description_file: + await description_file.write(description) + + return description + + def get_category(self, meta): + categories = { + 'Anime': 34, + 'Movie4K': 47, + 'MovieBluray': 13, + 'MovieBlurayRip': 14, + 'MovieCam': 8, + 'MovieTS': 9, + 'MovieDocumentary': 29, + 'MovieDvd': 12, + 'MovieDvdRip': 11, + 'MovieForeign': 36, + 'MovieHdRip': 43, + 'MovieWebrip': 37, + 'TvBoxsets': 27, + 'TvEpisodes': 26, + 'TvEpisodesHd': 32, + 'TvForeign': 44 + } + if meta.get('anime', 0): - return self.CATEGORIES['Anime'] + return categories['Anime'] if meta['category'] == 'MOVIE': if meta['original_language'] != 'en': - return self.CATEGORIES['MovieForeign'] + return categories['MovieForeign'] elif 'Documentary' in meta['genres']: - return self.CATEGORIES['MovieDocumentary'] + return categories['MovieDocumentary'] elif meta['resolution'] == '2160p': - return self.CATEGORIES['Movie4K'] + return categories['Movie4K'] elif meta['is_disc'] in ('BDMV', 'HDDVD') or (meta['type'] == 'REMUX' and meta['source'] in ('BluRay', 'HDDVD')): - return self.CATEGORIES['MovieBluray'] + return categories['MovieBluray'] elif meta['type'] == 'ENCODE' and meta['source'] in ('BluRay', 'HDDVD'): - return self.CATEGORIES['MovieBlurayRip'] + return categories['MovieBlurayRip'] elif meta['is_disc'] == 'DVD' or (meta['type'] == 'REMUX' and 'DVD' in meta['source']): - return self.CATEGORIES['MovieDvd'] + return categories['MovieDvd'] elif meta['type'] == 'ENCODE' and 'DVD' in meta['source']: - return self.CATEGORIES['MovieDvdRip'] + return categories['MovieDvdRip'] elif 'WEB' in meta['type']: - return self.CATEGORIES['MovieWebrip'] + return categories['MovieWebrip'] elif meta['type'] == 'HDTV': - return self.CATEGORIES['MovieHdRip'] + return categories['MovieHdRip'] elif meta['category'] == 'TV': if meta['original_language'] != 'en': - return self.CATEGORIES['TvForeign'] + return categories['TvForeign'] elif meta.get('tv_pack', 0): - return self.CATEGORIES['TvBoxsets'] + return categories['TvBoxsets'] elif meta['sd']: - return self.CATEGORIES['TvEpisodes'] + return categories['TvEpisodes'] else: - return self.CATEGORIES['TvEpisodesHd'] + return categories['TvEpisodesHd'] raise NotImplementedError('Failed to determine TL category!') def get_screens(self, meta): - screenshot_urls = [ - image.get('raw_url') - for image in meta.get('image_list', []) - if image.get('raw_url') - ] + urls = [] + for image in meta.get('menu_images', []) + meta.get('image_list', []): + if image.get('raw_url'): + urls.append(image['raw_url']) - return screenshot_urls + return urls def get_name(self, meta): is_scene = bool(meta.get('scene_name')) if is_scene: name = meta['scene_name'] else: - name = meta['name'] + name = meta['name'].replace(meta['aka'], '') return name async def search_existing(self, meta, disctype): - await self.login(meta) - cat_id = await self.get_cat_id(self, meta) - - dupes = [] + login = await self.login(meta, force=True) + if not login: + meta['skipping'] = "TL" + if meta['debug']: + console.print(f"[bold red]Skipping upload to '{self.tracker}' as login failed.[/bold red]") + return + cat_id = self.get_category(meta) - if self.api_upload: - console.print(f"[bold yellow]Cannot search for duplicates on {self.tracker} when using API upload.[/bold yellow]") - return dupes + results = [] search_name = meta["title"] resolution = meta["resolution"] @@ -242,122 +286,173 @@ async def search_existing(self, meta, disctype): torrents = data.get("torrentList", []) for torrent in torrents: - name = torrent.get("name") - size = torrent.get("size") - if name or size: - dupes.append({'name': name, 'size': size}) + name = torrent.get('name') + link = f"{self.torrent_url}{torrent.get('fid')}" + size = torrent.get('size') + if name: + results.append({ + 'name': name, + 'size': size, + 'link': link + }) except Exception as e: console.print(f"[bold red]Error searching for duplicates on {self.tracker} ({url}): {e}[/bold red]") - return dupes + return results + + async def get_anilist_id(self, meta): + url = 'https://graphql.anilist.co/' + query = ''' + query ($idMal: Int) { + Media(idMal: $idMal, type: ANIME) { + id + } + } + ''' + variables = {'idMal': meta.get('mal_id')} + + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post(url, json={'query': query, 'variables': variables}) + response.raise_for_status() + data = response.json() + + media = data.get('data', {}).get('Media') + return media['id'] if media else None async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(common, meta) + await self.common.edit_torrent(meta, self.tracker, self.source_flag) if self.api_upload: - await self.upload_api(meta, cat_id) + await self.upload_api(meta) else: - await self.upload_http(meta, cat_id) + await self.cookie_upload(meta) - async def upload_api(self, meta, cat_id): - desc_content = await self.generate_description(meta) + async def upload_api(self, meta): torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" with open(torrent_path, 'rb') as open_torrent: files = { 'torrent': (self.get_name(meta) + '.torrent', open_torrent, 'application/x-bittorrent') } + data = { - 'announcekey': self.announce_key, - 'category': cat_id, - 'nfo': desc_content + 'announcekey': self.passkey, + 'category': self.get_category(meta), + 'description': await self.generate_description(meta), + 'name': self.get_name(meta), + 'nonscene': 'on' if not meta.get('scene') else 'off', } + if meta.get('anime', False): + anilist_id = await self.get_anilist_id(meta) + if anilist_id: + data.update({'animeid': f"https://anilist.co/anime/%7Banilist_id%7D"}) + + else: + if meta['category'] == 'MOVIE': + data.update({'imdb': meta.get('imdb_info', {}).get('imdbID', '')}) + + if meta['category'] == 'TV': + data.update({ + 'tvmazeid': meta.get('tvmaze_id', ''), + 'tvmazetype': meta.get('tv_pack', ''), + }) + + anon = not (meta['anon'] == 0 and not self.tracker_config.get('anon', False)) + if anon: + data.update({'is_anonymous_upload': 'on'}) + if meta['debug'] is False: response = await self.session.post( url=self.api_upload_url, files=files, data=data ) - if not response.text.isnumeric(): - meta['tracker_status'][self.tracker]['status_message'] = response.text - else: - console.print("[cyan]Request Data:") - console.print(data) - async def upload_http(self, meta, cat_id): - if not await self.login(meta): - meta['tracker_status'][self.tracker]['status_message'] = "Login with cookies failed." - return + if not response.text.isnumeric(): + meta['tracker_status'][self.tracker]['status_message'] = 'data error: ' + response.text - await self.generate_description(meta) + if response.text.isnumeric(): + torrent_id = str(response.text) + meta['tracker_status'][self.tracker]['status_message'] = 'Torrent uploaded successfully.' + meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id + await self.common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.announce_list, self.torrent_url + torrent_id) - imdbURL = '' - if meta.get('category') == 'MOVIE' and meta.get('imdb_info', {}).get('imdbID', ''): - imdbURL = f"https://www.imdb.com/title/%7Bmeta.get('imdb_info', {}).get('imdbID', '')}" + else: + console.print(data) + async def get_cookie_upload_data(self, meta): tvMazeURL = '' if meta.get('category') == 'TV' and meta.get("tvmaze_id"): tvMazeURL = f"https://www.tvmaze.com/shows/%7Bmeta.get('tvmaze_id')}" - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - torrent_file = f"[{self.tracker}].torrent" - description_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - - with open(torrent_path, 'rb') as torrent_fh, open(description_path, 'rb') as nfo: + data = { + 'name': self.get_name(meta), + 'category': self.get_category(meta), + 'nonscene': 'on' if not meta.get("scene") else 'off', + 'imdbURL': str(meta.get('imdb_info', {}).get('imdb_url', '')), + 'tvMazeURL': tvMazeURL, + 'igdbURL': '', + 'torrentNFO': '0', + 'torrentDesc': '1', + 'nfotextbox': '', + 'torrentComment': '0', + 'uploaderComments': '', + 'is_anonymous_upload': 'off', + 'screenshots[]': self.get_screens(meta) if self.tracker_config.get('img_rehost', True) else '', + } + + anon = not (meta['anon'] == 0 and not self.tracker_config.get('anon', False)) + if anon: + data.update({'is_anonymous_upload': 'on'}) + + return data + + async def cookie_upload(self, meta): + await self.generate_description(meta) + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8') as f: + description_content = await f.read() + login = await self.login(meta) + if not login: + meta['tracker_status'][self.tracker]['status_message'] = "data error: Login with cookies failed." + return - files = { - 'torrent': (torrent_file, torrent_fh, 'application/x-bittorrent'), - 'nfo': (f"[{self.tracker}]DESCRIPTION.txt", nfo, 'text/plain') - } + data = await self.get_cookie_upload_data(meta) - data = { - 'name': self.get_name(meta), - 'category': cat_id, - 'nonscene': 'on' if not meta.get("scene") else 'off', - 'imdbURL': imdbURL, - 'tvMazeURL': tvMazeURL, - 'igdbURL': '', - 'torrentNFO': '0', - 'torrentDesc': '1', - 'nfotextbox': '', - 'torrentComment': '0', - 'uploaderComments': '', - 'is_anonymous_upload': 'on' if meta.get('anon', False) else 'off', - 'screenshots[]': '' if meta.get('anon', False) else self.get_screens(meta), # It is not possible to upload screenshots anonymously - } - - if meta['debug'] is False: - try: - response = await self.session.post( - url=self.http_upload_url, - files=files, - data=data - ) - - if response.status_code == 302 and 'location' in response.headers: - torrent_id = response.headers['location'].replace('/successfulupload?torrentID=', '') - torrent_url = f"{self.base_url}/torrent/{torrent_id}" - meta['tracker_status'][self.tracker]['status_message'] = torrent_url - - announce_url = self.config['TRACKERS'][self.tracker].get('announce_url') - common = COMMON(config=self.config) - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, announce_url, torrent_url) - - else: - console.print("[bold red]Upload failed: No success redirect found.[/bold red]") - failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]FailedUpload.html" - with open(failure_path, "w", encoding="utf-8") as f: - f.write(f"Status Code: {response.status_code}\n") - f.write(f"Headers: {response.headers}\n") - f.write(response.text) - console.print(f"[yellow]The response was saved at: '{failure_path}'[/yellow]") - - except httpx.RequestError as e: - console.print(f"[bold red]Error during upload to '{self.tracker}': {e}[/bold red]") - meta['tracker_status'][self.tracker]['status_message'] = str(e) - else: - console.print(data) + if meta['debug']: + console.print(data) + else: + try: + status_message = '' + + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') as f: + torrent_bytes = await f.read() + files = { + 'torrent': ('torrent.torrent', torrent_bytes, 'application/x-bittorrent'), + 'nfo': ('description.txt', description_content, 'text/plain'), + } + + response = await self.session.post(url=self.http_upload_url, files=files, data=data) + + if response.status_code == 302 and 'location' in response.headers: + torrent_id = response.headers['location'].replace('/successfulupload?torrentID=', '') + torrent_url = f"{self.base_url}/torrent/{torrent_id}" + status_message = 'Torrent uploaded successfully.' + meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id + + await self.common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.announce_list, torrent_url) + + else: + status_message = 'data error - Upload failed: No success redirect found.' + failure_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]FailedUpload.html" + async with aiofiles.open(failure_path, "w", encoding="utf-8") as failure_file: + await failure_file.write(f"Status Code: {response.status_code}\n") + await failure_file.write(f"Headers: {response.headers}\n") + await failure_file.write(response.text) + console.print(f"[yellow]The response was saved at: '{failure_path}'[/yellow]") + + except httpx.RequestError as e: + status_message = f'data error - {str(e)}' + + meta['tracker_status'][self.tracker]['status_message'] = status_message diff --git a/src/trackers/TLZ.py b/src/trackers/TLZ.py new file mode 100644 index 000000000..b5361849f --- /dev/null +++ b/src/trackers/TLZ.py @@ -0,0 +1,45 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +# import discord +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D + + +class TLZ(UNIT3D): + def __init__(self, config): + super().__init__(config, tracker_name='TLZ') + self.config = config + self.common = COMMON(config) + self.tracker = 'TLZ' + self.source_flag = 'TLZ' + self.base_url = 'https://tlzdigital.com/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [""] + pass + + async def get_category_id(self, meta): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(meta['category'], '0') + return {'category_id': category_id} + + async def get_type_id(self, meta): + type_id = { + 'FILM': '1', + 'EPISODE': '3', + 'PACK': '4', + }.get(meta.get('type'), '0') + + if meta.get('tv_pack'): + type_id = '4' + elif type_id != '4': + type_id = '3' + + if meta['category'] == 'MOVIE': + type_id = '1' + + return {'type_id': type_id} diff --git a/src/trackers/TOCA.py b/src/trackers/TOCA.py deleted file mode 100644 index f1cb8fde0..000000000 --- a/src/trackers/TOCA.py +++ /dev/null @@ -1,174 +0,0 @@ -# -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import httpx -from src.trackers.COMMON import COMMON -from src.console import console - - -class TOCA(): - - def __init__(self, config): - self.config = config - self.tracker = 'TOCA' - self.source_flag = 'TOCA' - self.upload_url = 'https://tocashare.biz/api/torrents/upload' - self.search_url = 'https://tocashare.biz/api/torrents/filter' - self.torrent_url = 'https://tocashare.biz/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] - pass - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '4320p': '1', - '2160p': '2', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9', - 'Other': '10', - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - if meta.get('region') == 'EUR': - region_id = 0 - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 05887d069..0cbcf1373 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import pickle from bs4 import BeautifulSoup import requests diff --git a/src/trackers/TTR.py b/src/trackers/TTR.py new file mode 100644 index 000000000..b28f1f73f --- /dev/null +++ b/src/trackers/TTR.py @@ -0,0 +1,139 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +from src.console import console +from src.languages import process_desc_language +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D + + +class TTR(UNIT3D): + def __init__(self, config): + super().__init__(config, tracker_name='TTR') + self.config = config + self.common = COMMON(config) + self.tracker = 'TTR' + self.source_flag = 'TTR' + self.base_url = 'https://torrenteros.org/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [] + pass + + async def get_name(self, meta): + try: + name = TTR.ttr_name + except AttributeError: + name = await self.build_name(meta) + + return {'name': name} + + async def build_name(self, meta): + name = meta['name_notag'] + + async def ask_spanish_type(kind): + console.print(f"{self.tracker}: [green]Found Spanish {kind} track.[/green] [yellow]Is it Castellano or Latino?[/yellow]") + console.print("1 = Castellano") + console.print("2 = Latino") + console.print("3 = Castellano Latino") + return await self.common.async_input() + + def get_spanish_type(lang_code): + if not lang_code: + return None + lang_code = lang_code.lower() + if lang_code in ('es-es', 'es', 'spa'): + return 'Castellano' + if lang_code.startswith('es-'): + return 'Latino' + return None + + if meta.get('is_disc') == 'BDMV': + spanish_audio = "Spanish" in meta.get('audio_languages', []) + spanish_subtitle = "Spanish" in meta.get('subtitle_languages', []) + unattended = meta.get('unattended', False) + confirm = meta.get('unattended_confirm', False) + + if spanish_audio: + if unattended or confirm: + suffix = 'Castellano' + else: + user_choice = await ask_spanish_type("audio") + suffix = {'1': 'Castellano', '2': 'Latino', '3': 'Castellano Latino'}.get(user_choice, 'Castellano') + name += f" {suffix}" + + elif spanish_subtitle: + if unattended or confirm: + suffix = 'Castellano Subs' + else: + user_choice = await ask_spanish_type("subtitle") + suffix = {'1': 'Castellano Subs', '2': 'Latino Subs', '3': 'Castellano Latino Subs'}.get(user_choice, 'Castellano Subs') + + name += f" {suffix}" + + else: + tracks = meta.get('mediainfo', {}).get('media', {}).get('track', []) + spanish_audio_type = None + spanish_subs_type = None + + for track in tracks: + if track.get('@type') == 'Audio': + lang = track.get('Language', '') + if isinstance(lang, dict): + lang = '' + spanish_audio_type = get_spanish_type(str(lang).strip()) + if spanish_audio_type: + break + + for track in tracks: + if track.get('@type') == 'Text': + lang = track.get('Language', '') + if isinstance(lang, dict): + lang = '' + spanish_subs_type = get_spanish_type(str(lang).strip()) + if spanish_subs_type: + break + + if spanish_audio_type: + name += f" {spanish_audio_type}" + elif spanish_subs_type: + name += f" {spanish_subs_type} Subs" + + tag = meta.get('tag', "") + if tag: + name += tag + + TTR.ttr_name = name + + return name + + async def get_additional_data(self, meta): + data = { + 'mod_queue_opt_in': await self.get_flag(meta, 'modq'), + } + + return data + + async def get_additional_checks(self, meta): + if not meta.get("language_checked", False): + await process_desc_language(meta, desc=None, tracker=self.tracker) + + if "Spanish" not in meta.get('audio_languages', []): + if "Spanish" not in meta.get('subtitle_languages', []): + console.print( + "[bold red]TTR requires at least one Spanish audio or subtitle track." + ) + return False + else: + if meta.get('unattended'): + if not meta.get('unattended_confirm', False): + return False + else: + console.print(f"{self.tracker}: [yellow]No Spanish audio track found, but Spanish subtitles are present.[/yellow]") + console.print(f"{self.tracker}: [yellow]Do you want to upload anyway? y/N[/yellow]") + user_choice = await self.common.async_input() + if user_choice.lower() != 'y': + return False + + return True diff --git a/src/trackers/TVC.py b/src/trackers/TVC.py index 12dce40bc..510ee72b7 100644 --- a/src/trackers/TVC.py +++ b/src/trackers/TVC.py @@ -1,27 +1,23 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord import asyncio -import requests import traceback import cli_ui import os +import re +from urllib.parse import urlparse import tmdbsimple as tmdb from src.bbcode import BBCODE import json import httpx +import requests from src.trackers.COMMON import COMMON from src.console import console +from src.rehostimages import check_hosts +from datetime import datetime class TVC(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - def __init__(self, config): self.config = config self.tracker = 'TVC' @@ -32,31 +28,62 @@ def __init__(self, config): self.signature = "" self.banned_groups = [] tmdb.API_KEY = config['DEFAULT']['tmdb_api'] - self.images = { - "imdb_75": 'https://i.imgur.com/Mux5ObG.png', - "tmdb_75": 'https://i.imgur.com/r3QzUbk.png', - "tvdb_75": 'https://i.imgur.com/UWtUme4.png', - "tvmaze_75": 'https://i.imgur.com/ZHEF5nE.png', - "mal_75": 'https://i.imgur.com/PBfdP3M.png' - } - pass + # TV type mapping as a dict for clarity and maintainability + self.tv_type_map = { + "comedy": "29", + "current affairs": "45", + "documentary": "5", + "drama": "11", + "entertainment": "14", + "factual": "19", + "foreign": "43", + "kids": "32", + "movies": "44", + "news": "54", + "reality": "52", + "soaps": "30", + "sci-fi": "33", + "sport": "42", + "holding bin": "53", + } + + def format_date_ddmmyyyy(self, date_str): + """ + Convert a date string from 'YYYY-MM-DD' to 'DD-MM-YYYY'. + + Args: + date_str (str): Input date string. + + Returns: + str: Reformatted date string, or the original if parsing fails. + """ + try: + return datetime.strptime(date_str, "%Y-%m-%d").strftime("%d-%m-%Y") + except (ValueError, TypeError): + return date_str - async def get_cat_id(self, genres): - # Note sections are based on Genre not type, source, resolution etc.. - self.tv_types = ["comedy", "documentary", "drama", "entertainment", "factual", "foreign", "kids", "movies", "News", "radio", "reality", "soaps", "sci-fi", "sport", "holding bin"] - self.tv_types_ids = ["29", "5", "11", "14", "19", "42", "32", "44", "45", "51", "52", "30", "33", "42", "53"] + async def get_cat_id(self, genres: str) -> str: + """ + Determine TVC category ID based on genre list. - genres = genres.split(', ') - if len(genres) >= 1: - for i in genres: - g = i.lower().replace(',', '') - for s in self.tv_types: - if s.__contains__(g): - return self.tv_types_ids[self.tv_types.index(s)] + Args: + genres (str): Comma-separated genre names (e.g. "Drama, Comedy"). - # returning 14 as that is holding bin/misc - return self.tv_types_ids[14] + Returns: + str: Category ID string from tv_type_map. Defaults to "holding bin" if no match. + """ + # Note sections are based on Genre not type, source, resolution etc.. + # Uses tv_type_map dict for genre → category ID mapping + if not genres: + return self.tv_type_map["holding bin"] + for g in genres.split(', '): + g = g.lower().replace(",", "").strip() + if g and g in self.tv_type_map: + return self.tv_type_map[g] + + # fallback to holding bin/misc id + return self.tv_type_map["holding bin"] async def get_res_id(self, tv_pack, resolution): if tv_pack: @@ -82,107 +109,208 @@ async def get_res_id(self, tv_pack, resolution): '540': 'SD', '480p': 'SD', '480i': 'SD' - }.get(resolution, 'SD') + }.get(resolution, 'SD') return resolution_id + async def append_country_code(self, meta, name): + """ + Append ISO country code suffix to release name based on origin_country_code. + + Args: + meta (dict): Metadata containing 'origin_country_code' list. + name (str): Base release name. + + Returns: + str: Release name with appended country code (e.g. "Show Title [IRL]"). + """ + country_map = { + "AT": "AUT", + "AU": "AUS", + "BE": "BEL", + "CA": "CAN", + "CH": "CHE", + "CZ": "CZE", + "DE": "GER", + "DK": "DNK", + "EE": "EST", + "ES": "SPA", + "FI": "FIN", + "FR": "FRA", + "IE": "IRL", + "IS": "ISL", + "IT": "ITA", + "NL": "NLD", + "NO": "NOR", + "NZ": "NZL", + "PL": "POL", + "PT": "POR", + "RU": "RUS", + "SE": "SWE", + } + + if 'origin_country_code' in meta: + for code in meta['origin_country_code']: + if code in country_map: + name += f" [{country_map[code]}]" + break # append only the first match + + return name + + async def read_file(self, path: str, encoding: str = "utf-8") -> str: + """ + Async helper to read a text file safely. + Uses a with-block to ensure the file handle is closed. + """ + def _read(): + with open(path, "r", encoding=encoding) as f: + return f.read() + return await asyncio.to_thread(_read) + + async def check_image_hosts(self, meta): + url_host_mapping = { + "ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "imgbox.com": "imgbox", + "pixhost.to": "pixhost", + "imagebam.com": "bam", + "onlyimage.org": "onlyimage", + } + + approved_image_hosts = ['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'bam', 'onlyimage'] + await check_hosts( + meta, + self.tracker, + url_host_mapping=url_host_mapping, + img_host_index=1, + approved_image_hosts=approved_image_hosts + ) + return + async def upload(self, meta, disctype): common = COMMON(config=self.config) + + image_list = meta.get('TVC_images_key', meta.get('image_list', [])) + if not isinstance(image_list, (list, tuple)): + image_list = [] + await common.edit_torrent(meta, self.tracker, self.source_flag) await self.get_tmdb_data(meta) - if meta['category'] == 'TV': - cat_id = await self.get_cat_id(meta['genres']) - else: - cat_id = 44 - # type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['tv_pack'] if 'tv_pack' in meta else 0, meta['resolution']) - await self.unit3d_edit_desc(meta, self.tracker, self.signature) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 + # load MediaInfo.json + try: + content = await self.read_file(f"{meta['base_dir']}/tmp/{meta['uuid']}/MediaInfo.json") + mi = json.loads(content) + except (FileNotFoundError, json.JSONDecodeError) as e: + console.print(f"[yellow]Warning: Could not load MediaInfo.json: {e}") + mi = {} + + cat_id = await self.get_cat_id(meta.get('genres', '')) if meta.get('category', '') == 'TV' else '44' + meta['language_checked'] = True + + # Foreign category check based on TMDB original_language only + original_lang = meta.get("original_language", "") + if original_lang and not original_lang.startswith("en") and original_lang not in ["ga", "gd", "cy"]: + cat_id = self.tv_type_map["foreign"] + elif not original_lang: + # Fallback: inspect audio languages from MediaInfo if TMDB data is missing + audio_langs = self.get_audio_languages(mi) + if audio_langs and "English" not in audio_langs: + cat_id = self.tv_type_map["foreign"] + + resolution_id = await self.get_res_id(meta.get('tv_pack', 0), meta['resolution']) + + anon = 0 if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False) else 1 if meta['bdinfo'] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = await self.read_file(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt") else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = await self.read_file(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt") bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - if meta['type'] == "ENCODE" and (str(meta['path']).lower().__contains__("bluray") or str(meta['path']).lower().__contains__("brrip") or str(meta['path']).lower().__contains__("bdrip")): + # build description and capture return instead of reopening file + descfile_path = os.path.join(meta['base_dir'], "tmp", meta['uuid'], f"[{self.tracker}]DESCRIPTION.txt") + desc = await self.unit3d_edit_desc(meta, self.tracker, self.signature, image_list) + if not desc: + console.print(f"[yellow]Warning: DESCRIPTION file not found at {descfile_path}") + desc = "" + + # Naming logic + if meta['type'] == "ENCODE" and ("bluray" in str(meta['path']).lower() or + "brrip" in str(meta['path']).lower() or + "bdrip" in str(meta['path']).lower()): type = "BRRip" else: type = meta['type'].replace('WEBDL', 'WEB-DL') - # Naming as per TVC rules. Site has unusual naming conventions. if meta['category'] == "MOVIE": tvc_name = f"{meta['title']} ({meta['year']}) [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]" - else: - if meta['search_year'] != "": - year = meta['year'] - else: - year = "" - if meta.get('no_season', False) is True: - season = '' - if meta.get('no_year', False) is True: + elif meta['category'] == "TV": + # Use safe lookups to avoid KeyError if 'search_year' is missing + search_year = meta.get('search_year', '') + # If search_year is empty, fall back to year + year = search_year if search_year else meta.get('year', '') + if meta.get('no_year', False): year = '' - - if meta['category'] == "TV": - if meta['tv_pack']: - # seasons called series here. - tvc_name = f"{meta['title']} ({meta['year'] if 'season_air_first_date' and len(meta['season_air_first_date']) >= 4 else meta['season_air_first_date'][:4]}) Series {meta['season_int']} [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') + year_str = f" ({year})" if year else "" + + if meta['tv_pack']: + season_first = (meta.get('season_air_first_date') or "")[:4] + season_year = season_first or year + tvc_name = ( + f"{meta['title']} - Series {meta['season_int']} ({season_year}) " + f"[{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]" + ) + else: + if 'episode_airdate' in meta: + formatted_date = self.format_date_ddmmyyyy(meta['episode_airdate']) + tvc_name = ( + f"{meta['title']}{year_str} {meta['season']}{meta['episode']} " + f"({formatted_date}) [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]" + ) else: - if 'episode_airdate' in meta: - tvc_name = f"{meta['title']} ({year}) {meta['season']}{meta['episode']} ({meta['episode_airdate']}) [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') - else: - tvc_name = f"{meta['title']} ({year}) {meta['season']}{meta['episode']} [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') + tvc_name = ( + f"{meta['title']}{year_str} {meta['season']}{meta['episode']} " + f"[{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]" + ) + else: + # Defensive guard for unsupported categories + raise ValueError(f"Unsupported category for TVC: {meta.get('category')}") - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) + # Add original language title if foreign + if cat_id == self.tv_type_map["foreign"]: + if meta.get('original_title') and meta['original_title'] != meta['title']: + tvc_name = tvc_name.replace(meta['title'], f"{meta['title']} ({meta['original_title']})") if not meta['is_disc']: + # Pass the full MediaInfo dict; get_subs_info handles missing/invalid data internally self.get_subs_info(meta, mi) if meta['video_codec'] == 'HEVC': tvc_name = tvc_name.replace(']', ' HEVC]') - - if 'eng_subs' in meta and meta['eng_subs']: + if meta.get('eng_subs'): tvc_name = tvc_name.replace(']', ' SUBS]') - if 'sdh_subs' in meta and meta['eng_subs']: - if 'eng_subs' in meta and meta['eng_subs']: + if meta.get('sdh_subs'): + if meta.get('eng_subs'): tvc_name = tvc_name.replace(' SUBS]', ' (ENG + SDH SUBS)]') else: tvc_name = tvc_name.replace(']', ' (SDH SUBS)]') - if 'origin_country_code' in meta: - if "IE" in meta['origin_country_code']: - tvc_name += " [IRL]" - elif "AU" in meta['origin_country_code']: - tvc_name += " [AUS]" - elif "NZ" in meta['origin_country_code']: - tvc_name += " [NZ]" - elif "CA" in meta['origin_country_code']: - tvc_name += " [CA]" + tvc_name = await self.append_country_code(meta, tvc_name) if meta.get('unattended', False) is False: upload_to_tvc = cli_ui.ask_yes_no(f"Upload to {self.tracker} with the name {tvc_name}?", default=False) - if not upload_to_tvc: tvc_name = cli_ui.ask_string("Please enter New Name:") upload_to_tvc = cli_ui.ask_yes_no(f"Upload to {self.tracker} with the name {tvc_name}?", default=False) data = { 'name': tvc_name, - # newline does not seem to work on this site for some reason. if you edit and save it again they will but not if pushed by api 'description': desc.replace('\n', '
').replace('\r', '
'), 'mediainfo': mi_dump, 'bdinfo': bd_dump, 'category_id': cat_id, 'type': resolution_id, - # 'resolution_id': resolution_id, 'tmdb': meta['tmdb'], 'imdb': meta['imdb'], 'tvdb': meta['tvdb_id'], @@ -199,105 +327,202 @@ async def upload(self, meta, disctype): 'doubleup': 0, 'sticky': 0, } - if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + if 'upload_to_tvc' in locals() and upload_to_tvc is False: return + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + response = None try: - # some reason this does not return json instead it returns something like below. - # b'application/x-bittorrent\n{"success":true,"data":"https:\\/\\/tvchaosuk.com\\/torrent\\/download\\/164633.REDACTED","message":"Torrent uploaded successfully."}' - # so you need to convert text to json. - json_data = json.loads(response.text.strip('application/x-bittorrent\n')) + async with httpx.AsyncClient(timeout=30.0) as client: + with open(torrent_path, "rb") as open_torrent: + files = {'torrent': open_torrent} + response = await client.post( + self.upload_url, + files=files, + data=data, + headers={'User-Agent': 'Mozilla/5.0'}, + params={'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip()} + ) + + if response.status_code != 200: + if response.status_code == 403: + meta['tracker_status'][self.tracker]['status_message'] = ( + "data error: Forbidden (403). This may indicate that you do not have upload permission." + ) + elif response.status_code in (301, 302, 303, 307, 308): + meta['tracker_status'][self.tracker]['status_message'] = ( + f"data error: Redirect ({response.status_code}). Please verify that your API key is valid." + ) + else: + meta['tracker_status'][self.tracker]['status_message'] = ( + f"data error: HTTP {response.status_code} - {response.text}" + ) + return + # TVC returns "application/x-bittorrent\n{json}" so strip the prefix + json_data = json.loads(response.text.split('\n', 1)[-1]) meta['tracker_status'][self.tracker]['status_message'] = json_data - # adding torrent link to comment of torrent file - t_id = json_data['data'].split(".")[1].split("/")[3] + + # Extract torrent ID robustly from returned URL + data_str = json_data.get('data') + if not isinstance(data_str, str): + raise ValueError(f"Invalid TVC response: 'data' missing or not a string: {data_str}") + + parsed = urlparse(data_str) + segments = [seg for seg in parsed.path.split("/") if seg] + if not segments: + raise ValueError(f"Invalid TVC response format: no path segments in {data_str}") + + # Use last segment as torrent ID + t_id = segments[-1] meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, - self.config['TRACKERS'][self.tracker].get('announce_url'), - "https://tvchaosuk.com/torrents/" + t_id) - - except Exception: - console.print(traceback.print_exc()) - console.print("[yellow]It may have uploaded, go check") - console.print(response.text.strip('application/x-bittorrent\n')) + + if meta['debug']: + console.print(f"[cyan]Extracted torrent ID {t_id} from {data_str}") + + await common.add_tracker_torrent( + meta, + self.tracker, + self.source_flag, + self.config['TRACKERS'][self.tracker].get('announce_url'), + f"https://tvchaosuk.com/torrents/%7Bt_id%7D" + ) + + except httpx.TimeoutException: + meta['tracker_status'][self.tracker]['status_message'] = 'data error: Request timed out after 30 seconds' + except httpx.RequestError as e: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: Unable to upload. Error: {e}.\nResponse: {(response.text) if response else "No response"}' + except Exception as e: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: It may have uploaded, go check. Error: {e}.\nResponse: {(response.text) if response else "No response"}' return + else: - console.print("[cyan]Request Data:") + console.print("[cyan]TVC Request Data:") console.print(data) meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - # why the fuck is this even a thing..... - async def get_tmdb_data(self, meta): - import tmdbsimple as tmdb - if meta['category'] == "MOVIE": - movie = tmdb.Movies(meta['tmdb']) - response = movie.info() - else: - tv = tmdb.TV(meta['tmdb']) - response = tv.info() - - # TVC stuff - if meta['category'] == "TV": - if hasattr(tv, 'release_dates'): - meta['release_dates'] = tv.release_dates() - if hasattr(tv, 'networks') and len(tv.networks) != 0 and 'name' in tv.networks[0]: - meta['networks'] = tv.networks[0]['name'] - - try: - if 'tv_pack' in meta and not meta['tv_pack']: - episode_info = tmdb.TV_Episodes(meta['tmdb'], meta['season_int'], meta['episode_int']).info() - - meta['episode_airdate'] = episode_info['air_date'] - meta['episode_name'] = episode_info['name'] - meta['episode_overview'] = episode_info['overview'] - if 'tv_pack' in meta and meta['tv_pack']: - season_info = tmdb.TV_Seasons(meta['tmdb'], meta['season_int']).info() - meta['season_air_first_date'] = season_info['air_date'] - - if hasattr(tv, 'first_air_date'): - meta['first_air_date'] = tv.first_air_date - except Exception: - console.print(traceback.print_exc()) - console.print(f"Unable to get episode information, Make sure episode {meta['season']}{meta['episode']} exists in TMDB. \nhttps://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}/season/{meta['season_int']}") - meta['season_air_first_date'] = str({meta["year"]}) + "-N/A-N/A" - meta['first_air_date'] = str({meta["year"]}) + "-N/A-N/A" + def get_audio_languages(self, mi): + """ + Parse MediaInfo object and return a list of normalized audio languages. + + Args: + mi (dict): MediaInfo JSON object. + + Returns: + list[str]: Sorted list of audio language names (e.g. ["English", "French"]). + """ + audio_langs = set() + for track in mi.get("media", {}).get("track", []): + if track.get("@type") != "Audio": + continue + lang_val = ( + track.get("Language/String") + or track.get("Language/String1") + or track.get("Language/String2") + or track.get("Language") + ) + lang = str(lang_val).strip() if lang_val else "" + if not lang: + continue + lowered = lang.lower() + if lowered in {"en", "eng", "en-us", "en-gb", "en-ie", "en-au"}: + audio_langs.add("English") + else: + audio_langs.add(lang.title()) + return sorted(audio_langs) if audio_langs else [] + async def get_tmdb_data(self, meta): + # Origin country codes (shared for both movies and TV) meta['origin_country_code'] = [] - if 'origin_country' in response: - if isinstance(response['origin_country'], list): - for i in response['origin_country']: - meta['origin_country_code'].append(i) + if meta.get('origin_country'): + if isinstance(meta['origin_country'], list): + meta['origin_country_code'].extend(meta['origin_country']) else: - meta['origin_country_code'].append(response['origin_country']) - print(type(response['origin_country'])) - - elif len(response['production_countries']): - for i in response['production_countries']: + meta['origin_country_code'].append(meta['origin_country']) + elif len(meta.get('production_countries', [])): + for i in meta['production_countries']: if 'iso_3166_1' in i: meta['origin_country_code'].append(i['iso_3166_1']) - elif len(response['production_companies']): - meta['origin_country_code'].append(response['production_companies'][0]['origin_country']) + elif len(meta.get('production_companies', [])): + meta['origin_country_code'].append(meta['production_companies'][0].get('origin_country', '')) - async def search_existing(self, meta, disctype): - # Search on TVCUK has been DISABLED due to issues - # leaving code here for future use when it is re-enabled - console.print("[red]Cannot search for dupes as search api is not working...") - console.print("[red]Please make sure you are not uploading duplicates.") - # https://tvchaosuk.com/api/torrents/filter?api_token=&tmdb=138108 + if meta['category'] == "MOVIE": + # Everything movie-specific is already handled + if meta['debug']: + console.print("[yellow]Fetching TMDb movie details[/yellow]") + movie = tmdb.Movies(meta['tmdb']) + response = movie.info() + console.print(f"[cyan]DEBUG: Movie data: {response}[/cyan]") + return + + elif meta['category'] == "TV": + # TVC-specific extras + if meta.get('networks') and len(meta['networks']) != 0 and 'name' in meta['networks'][0]: + meta['networks'] = meta['networks'][0]['name'] + + try: + if not meta['tv_pack']: + if 'tmdb_episode_data' not in meta or not meta['tmdb_episode_data']: + episode_info = tmdb.TV_Episodes(meta['tmdb'], meta['season_int'], meta['episode_int']).info() + meta['episode_airdate'] = episode_info.get('air_date', '') + meta['episode_name'] = episode_info.get('name', '') + meta['episode_overview'] = episode_info.get('overview', '') + else: + episode_info = meta['tmdb_episode_data'] + meta['episode_airdate'] = episode_info.get('air_date', '') + meta['episode_name'] = episode_info.get('name', '') + meta['episode_overview'] = episode_info.get('overview', '') + else: + if 'tmdb_season_data' not in meta or not meta['tmdb_season_data']: + season_info = tmdb.TV_Seasons(meta['tmdb'], meta['season_int']).info() + air_date = season_info.get('air_date') or "" + meta['season_air_first_date'] = air_date + meta['season_name'] = season_info.get('name', f"Season {meta['season_int']}") + meta['episodes'] = [] + for ep in season_info.get('episodes', []): + code = f"S{str(ep.get('season_number', 0)).zfill(2)}E{str(ep.get('episode_number', 0)).zfill(2)}" + meta['episodes'].append({ + "code": code, + "title": (ep.get("name") or "").strip(), + "airdate": ep.get("air_date") or "", + "overview": (ep.get("overview") or "").strip() + }) + else: + season_info = meta['tmdb_season_data'] + air_date = season_info.get('air_date') or "" + meta['season_air_first_date'] = air_date + meta['season_name'] = season_info.get('name', f"Season {meta['season_int']}") + meta['episodes'] = [] + for ep in season_info.get('episodes', []): + code = f"S{str(ep.get('season_number', 0)).zfill(2)}E{str(ep.get('episode_number', 0)).zfill(2)}" + meta['episodes'].append({ + "code": code, + "title": (ep.get("name") or "").strip(), + "airdate": ep.get("air_date") or "", + "overview": (ep.get("overview") or "").strip() + }) + + except (requests.exceptions.RequestException, KeyError, TypeError) as e: + console.print(f"[yellow]Expected error while fetching TV episode/season info: {e}") + console.print(traceback.format_exc()) + + console.print( + f"Unable to get episode information, Make sure episode {meta['season']}{meta['episode']} exists in TMDB.\n" + f"https://www.themoviedb.org/tv/%7Bmeta['tmdb']}/season/{meta['season_int']}" + ) + meta.setdefault('season_air_first_date', f"{meta['year']}-N/A-N/A") + meta.setdefault('first_air_date', f"{meta['year']}-N/A-N/A") + + else: + raise ValueError(f"Unsupported category for TVC: {meta.get('category')}") + async def search_existing(self, meta, _disctype=None): + # Search on TVCUK has been DISABLED due to issues, but we can still skip uploads based on criteria dupes = [] # UHD, Discs, remux and non-1080p HEVC are not allowed on TVC. @@ -306,148 +531,272 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "TVC" return [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb': meta['tmdb'], - 'name': "" - } - - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - # 404 catch when their api is down - if data['data'] != '404': - for each in data['data']: - print(each[0]['attributes']['name']) - result = each[0]['attributes']['name'] - dupes.append(result) - else: - console.print("Search API is down, please check manually") - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) + console.print("[red]Cannot search for dupes on TVC at this time.[/red]") + console.print("[red]Please make sure you are not uploading duplicates.") + await asyncio.sleep(2) return dupes - async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w') as descfile: - bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n") - descfile.write("\n") - if len(discs) >= 2: - for each in discs[1:]: - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n") - descfile.write("\n") - if each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n") - descfile.write("\n") - desc = "" + async def unit3d_edit_desc(self, meta, tracker, signature, image_list, comparison=False): + """ + Build and write the tracker-specific DESCRIPTION.txt file. + + Constructs BBCode-formatted description text for discs, TV packs, + episodes, or movies, including screenshots and notes. Always writes + a non-empty description file to tmp//[TVC]DESCRIPTION.txt. - # release info - rd_info = "" - # getting movie release info - if meta['category'] != "TV" and 'release_dates' in meta: + Args: + meta (dict): Metadata dictionary for the release. + tracker (str): Tracker name (e.g. "TVC"). + signature (str): Optional signature string to append. + image_list (list): List of screenshot image dicts. + comparison (bool): Whether to include comparison collapse blocks. + + Returns: + str: The final BBCode description string (also written to file). + """ + try: + base = await self.read_file(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt") + except FileNotFoundError: + base = "" + # Ensure tmp/ directory exists + desc_dir = os.path.join(meta['base_dir'], "tmp", meta['uuid']) + os.makedirs(desc_dir, exist_ok=True) + descfile_path = os.path.join(desc_dir, f"[{tracker}]DESCRIPTION.txt") + bbcode = BBCODE() + desc = "" + + # Discs + if meta.get('discs', []): + discs = meta['discs'] + if discs[0]['type'] == "DVD": + desc += f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n\n" + for each in discs[1:]: + if each['type'] == "BDMV": + desc += f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n" + if each['type'] == "DVD": + desc += f"{each['name']}:\n" + desc += ( + f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] " + f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n" + ) + + # Release info for movies + rd_info = "" + if meta['category'] == "MOVIE": + if 'release_dates' in meta: for cc in meta['release_dates']['results']: for rd in cc['release_dates']: if rd['type'] == 6: channel = str(rd['note']) if str(rd['note']) != "" else "N/A Channel" - rd_info += "[color=orange][size=15]" + cc['iso_3166_1'] + " TV Release info [/size][/color]" + "\n" + str(rd['release_date'])[:10] + " on " + channel + "\n" - # movie release info adding - if rd_info != "": - desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" - desc += rd_info + "\n\n" - # getting season release info. need to fix so it gets season info instead of first episode info. - elif meta['category'] == "TV" and meta['tv_pack'] == 1 and 'first_air_date' in meta: - channel = meta['networks'] if 'networks' in meta and meta['networks'] != "" else "N/A" - desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" - desc += f"[color=orange][size=15]First episode of this season aired {meta['season_air_first_date']} on channel {channel}[/size][/color]" + "\n\n" - elif meta['category'] == "TV" and meta['tv_pack'] != 1 and 'episode_airdate' in meta: - channel = meta['networks'] if 'networks' in meta and meta['networks'] != "" else "N/A" - desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" - desc += f"[color=orange][size=15]Episode aired on channel {channel} on {meta['episode_airdate']}[/size][/color]" + "\n\n" + rd_info += ( + f"[color=orange][size=15]{cc['iso_3166_1']} TV Release info [/size][/color]\n" + f"{str(rd['release_date'])[:10]} on {channel}\n" + ) else: - desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" - desc += "[color=orange][size=15]TMDB has No TV release info for this[/size][/color]" + "\n\n" - - if meta['category'] == 'TV' and meta['tv_pack'] != 1 and 'episode_overview' in meta: - desc += "\n\n" + "[color=green][size=25]PLOT[/size][/color]\n" + "Episode Name: " + str(meta['episode_name']) + "\n" + str(meta['episode_overview'] + "\n\n") + rd_info = meta.get('release_date', '') + if rd_info: + desc += f"[center]{rd_info}[/center]\n\n" + + # TV pack layout + if meta['category'] == "TV" and meta.get('tv_pack') == 1 and 'season_air_first_date' in meta: + channel = meta.get('networks', 'N/A') + airdate = self.format_date_ddmmyyyy(meta.get('season_air_first_date') or "") + + desc += "[center]\n" + if meta.get("logo"): + desc += f"[img={self.config['DEFAULT'].get('logo_size', '300')}]" + desc += f"{meta['logo']}[/img]\n\n" + + # UK terminology: Series not Season + desc += f"[b]Series Title:[/b] {meta.get('season_name', 'Unknown Series')}\n\n" + desc += f"[b]This series premiered on:[/b] {channel} on {airdate}\n" + + # Episode list + if meta.get('episodes'): + desc += "\n\n[b]Episode List[/b]\n\n" + for ep in meta['episodes']: + ep_num = ep.get('code', '') + ep_title = ep.get('title', '').strip() + ep_date = ep.get('airdate', '') + ep_overview = ep.get('overview', '').strip() + + desc += f"[b]{ep_num}[/b]" + if ep_title: + desc += f" - {ep_title}" + if ep_date: + formatted_date = self.format_date_ddmmyyyy(ep_date) + desc += f" ({formatted_date})" + desc += "\n" + + if ep_overview: + desc += f"{ep_overview}\n\n" + + desc += self.get_links(meta) + + screens_count = int(meta.get('screens', 0) or 0) + if image_list and screens_count >= self.config['TRACKERS'][self.tracker].get('image_count', 2): + desc += "\n\n[b]Screenshots[/b]\n\n" + for each in image_list[:self.config['TRACKERS'][self.tracker]['image_count']]: + web_url = each['web_url'] + img_url = each['img_url'] + desc += f"[url={web_url}][img=350]{img_url}[/img][/url]" + + desc += "[/center]\n\n" + + # Episode layout + elif meta['category'] == "TV" and meta.get('tv_pack') != 1 and 'episode_overview' in meta: + desc += "[center]\n" + if meta.get("logo"): + desc += f"[img={self.config['DEFAULT'].get('logo_size', '300')}]" + desc += f"{meta['logo']}[/img]\n\n" + episode_name = str(meta.get('episode_name', '')).strip() + overview = str(meta.get('episode_overview', '')).strip() + # Note: regex may mis-split on abbreviations (e.g. "Dr. Smith") or ellipses ("..."). + # This is a heuristic; fallback is to treat the whole overview as one block. + sentences = [s.strip() for s in re.split(r'(?<=[.!?])\s+', overview) if s.strip()] + if not sentences and overview: + sentences = [overview] + + if episode_name: + desc += f"[b]Episode Title:[/b] {episode_name}\n\n" + for s in sentences: + desc += s.rstrip() + "\n" + if 'episode_airdate' in meta: + channel = meta.get('networks', 'N/A') + formatted_date = self.format_date_ddmmyyyy(meta['episode_airdate']) + desc += f"\n[b]Broadcast on:[/b] {channel} on {formatted_date}\n" + + desc += self.get_links(meta) + + screens_count = int(meta.get('screens', 0) or 0) + if image_list and screens_count >= self.config['TRACKERS'][self.tracker].get('image_count', 2): + desc += "\n\n[b]Screenshots[/b]\n\n" + for each in image_list[:self.config['TRACKERS'][self.tracker]['image_count']]: + web_url = each['web_url'] + img_url = each['img_url'] + desc += f"[url={web_url}][img=350]{img_url}[/img][/url]" + desc += "[/center]\n\n" + + # Movie / fallback overview + else: + # Fallback path: for non‑movie categories with only a generic overview available. + overview = str(meta.get('overview', '')).strip() + desc += "[center]\n" + if meta['category'].upper() == "MOVIE" and meta.get("logo"): + desc += f"[img={self.config['DEFAULT'].get('logo_size', '300')}]" + desc += f"{meta['logo']}[/img]\n\n" + + if meta['category'].upper() == "MOVIE": + desc += f"[b]Movie Title:[/b] {meta.get('title', 'Unknown Movie')}\n\n" + desc += overview + "\n" + if 'release_date' in meta: + formatted_date = self.format_date_ddmmyyyy(meta['release_date']) + desc += f"\n[b]Released on:[/b] {formatted_date}\n" + desc += self.get_links(meta) + + # Screenshots block for movies + screens_count = int(meta.get('screens', 0) or 0) + if image_list and screens_count >= self.config['TRACKERS'][self.tracker].get('image_count', 2): + desc += "\n\n[b]Screenshots[/b]\n\n" + for each in image_list[:self.config['TRACKERS'][self.tracker]['image_count']]: + web_url = each['web_url'] + img_url = each['img_url'] + desc += f"[url={web_url}][img=350]{img_url}[/img][/url]" + + desc += "[/center]\n\n" else: - desc += "[color=green][size=25]PLOT[/size][/color]" + "\n" + str(meta['overview'] + "\n\n") - # Max two screenshots as per rules - if len(base) > 2 and meta['description'] != "PTP": - desc += "[color=green][size=25]Notes/Extra Info[/size][/color]" + " \n \n" + str(base) + " \n \n " - desc += self.get_links(meta, "[color=green][size=25]", "[/size][/COLOR]") - desc = bbcode.convert_pre_to_code(desc) - desc = bbcode.convert_hide_to_spoiler(desc) - if comparison is False: - desc = bbcode.convert_comparison_to_collapse(desc, 1000) - descfile.write(desc) - images = meta['image_list'] - # only adding 2 screens as that is mentioned in rules. - if len(images) > 0 and int(meta['screens']) >= 2: - descfile.write("[color=green][size=25]Screenshots[/size][/color]\n\n[center]") - for each in range(len(images[:2])): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] - descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") - descfile.write("[/center]") - - if signature is not None: - descfile.write(signature) - descfile.close() - return + desc += overview + "\n[/center]\n\n" + + # Notes/Extra Info + notes_content = base.strip() + if notes_content and notes_content.lower() != "ptp": + desc += f"[center][b]Notes / Extra Info[/b]\n\n{notes_content}\n\n[/center]\n\n" + + # BBCode conversions + desc = bbcode.convert_pre_to_code(desc) + desc = bbcode.convert_hide_to_spoiler(desc) + if not comparison: + desc = bbcode.convert_comparison_to_collapse(desc, 1000) + + # Ensure fallback content if description is empty + if not desc.strip(): + desc = "[center][i]No description available[/i][/center]\n" + + # Append signature if provided + if signature: + desc += f"\n{signature}\n" + + # Write description asynchronously + def _write(): + with open(descfile_path, "w", encoding="utf-8") as f: + f.write(desc) + + try: + await asyncio.to_thread(_write) + if meta['debug']: + console.print(f"[cyan]Wrote DESCRIPTION file to {descfile_path} ({len(desc)} chars)") + except Exception as e: + console.print(f"[bold red]Failed to write DESCRIPTION file: {e}") + + return desc + + def get_links(self, meta): + """ + Returns a BBCode string with an 'External Info Sources' heading and icon links. + No [center] tags are included; callers control layout. + """ + parts = [] + + parts.append("\n[b]External Info Sources:[/b]\n\n") + + if meta.get('imdb_id', 0): + parts.append(f"[URL={meta.get('imdb_info', {}).get('imdb_url', '')}][img]{self.config['IMAGES']['imdb_75']}[/img][/URL]") + + if meta.get('tmdb_id', 0): + parts.append(f"[URL=https://www.themoviedb.org/{meta.get('category', '').lower()}/{meta['tmdb_id']}][img]{self.config['IMAGES']['tmdb_75']}[/img][/URL]") + + if meta.get('tvdb_id', 0): + parts.append(f"[URL=https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series][img]{self.config['IMAGES']['tvdb_75']}[/img][/URL]") - def get_links(self, movie, subheading, heading_end): - description = "" - description += "\n\n" + subheading + "Links" + heading_end + "\n" - if movie['imdb_id'] != "0": - description += f"[URL=https://www.imdb.com/title/tt{movie['imdb']}][img]{self.images['imdb_75']}[/img][/URL]" - if movie['tmdb'] != "0": - description += f" [URL=https://www.themoviedb.org/{str(movie['category'].lower())}/{str(movie['tmdb'])}][img]{self.images['tmdb_75']}[/img][/URL]" - if movie['tvdb_id'] != 0: - description += f" [URL=https://www.thetvdb.com/?id={str(movie['tvdb_id'])}&tab=series][img]{self.images['tvdb_75']}[/img][/URL]" - if movie['tvmaze_id'] != 0: - description += f" [URL=https://www.tvmaze.com/shows/{str(movie['tvmaze_id'])}][img]{self.images['tvmaze_75']}[/img][/URL]" - if movie['mal_id'] != 0: - description += f" [URL=https://myanimelist.net/anime/{str(movie['mal_id'])}][img]{self.images['mal_75']}[/img][/URL]" - return description + " \n \n " + if meta.get('tvmaze_id', 0): + parts.append(f"[URL=https://www.tvmaze.com/shows/{meta['tvmaze_id']}][img]{self.config['IMAGES']['tvmaze_75']}[/img][/URL]") + + if meta.get('mal_id', 0): + parts.append(f"[URL=https://myanimelist.net/anime/{meta['mal_id']}][img]{self.config['IMAGES']['mal_75']}[/img][/URL]") + + return " ".join(parts) # get subs function # used in naming conventions - def get_subs_info(self, meta, mi): + + def get_subs_info(self, meta, mi) -> None: subs = "" subs_num = 0 - for s in mi.get("media").get("track"): - if s["@type"] == "Text": - subs_num = subs_num + 1 - if subs_num >= 1: - meta['has_subs'] = 1 - else: - meta['has_subs'] = 0 - for s in mi.get("media").get("track"): - if s["@type"] == "Text": - if "Language" in s: - if not subs_num <= 0: - subs = subs + s["Language"] + ", " - # checking if it has english subs as for data scene. - if str(s["Language"]).lower().__contains__("en"): + media = mi.get("media") or {} + tracks = media.get("track") or [] + + # Count subtitle tracks + for s in tracks: + if s.get("@type") == "Text": + subs_num += 1 + + meta['has_subs'] = 1 if subs_num > 0 else 0 + # Reset flags to avoid stale values + meta.pop('eng_subs', None) + meta.pop('sdh_subs', None) + + # Collect languages and flags + for s in tracks: + if s.get("@type") == "Text": + lang = s.get("Language") + if lang and subs_num > 0: + lang_str = str(lang).strip() + if lang_str: + subs += lang_str + ", " + lowered = lang_str.lower() + if lowered in {"en", "eng", "en-us", "en-gb", "en-ie", "en-au", "english"}: meta['eng_subs'] = 1 - if str(s).lower().__contains__("sdh"): - meta['sdh_subs'] = 1 - - return - # get subs function^^^^ + # crude SDH detection + if "sdh" in str(s).lower(): + meta['sdh_subs'] = 1 diff --git a/src/trackers/UHD.py b/src/trackers/UHD.py deleted file mode 100644 index 7bb2376af..000000000 --- a/src/trackers/UHD.py +++ /dev/null @@ -1,171 +0,0 @@ -# -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import httpx -from src.trackers.COMMON import COMMON -from src.console import console - - -class UHD(): - def __init__(self, config): - self.config = config - self.tracker = 'UHD' - self.source_flag = 'UHD' - self.upload_url = 'https://uhdshare.com/api/torrents/upload' - self.search_url = 'https://uhdshare.com/api/torrents/filter' - self.torrent_url = 'https://uhdshare.com/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = [""] - pass - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 5507cffcc..63e47f81f 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -1,288 +1,117 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import os -import glob -import httpx +import aiofiles import cli_ui -from src.trackers.COMMON import COMMON +import re from src.console import console +from src.get_desc import DescriptionBuilder from src.languages import process_desc_language, has_english_language +from src.trackers.UNIT3D import UNIT3D -class ULCX(): - +class ULCX(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='ULCX') self.config = config self.tracker = 'ULCX' self.source_flag = 'ULCX' - self.upload_url = 'https://upload.cx/api/torrents/upload' - self.search_url = 'https://upload.cx/api/torrents/filter' - self.torrent_url = 'https://upload.cx/torrents/' - self.id_url = 'https://upload.cx/api/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://upload.cx/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [ '4K4U', 'AROMA', 'd3g', ['EDGE2020', 'Encodes'], 'EMBER', 'FGT', 'FnP', 'FRDS', 'Grym', 'Hi10', 'iAHD', 'INFINITY', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'NAHOM', 'Niblets', 'nikt0', ['NuBz', 'Encodes'], 'OFT', 'QxR', ['Ralphy', 'Encodes'], 'RARBG', 'Sicario', 'SM737', 'SPDVD', 'SWTYBLZ', 'TAoE', 'TGx', 'Tigole', 'TSP', - 'TSPxL', 'VXT', 'Vyndros', 'Will1869', 'x0r', 'YIFY', 'Alcaide_Kira' + 'TSPxL', 'VXT', 'Vyndros', 'Will1869', 'x0r', 'YIFY', 'Alcaide_Kira', 'PHOCiS', 'HDT', 'SPx', 'seedpool' ] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution, type): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - modq = await self.get_flag(meta, 'modq') - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution'], meta['type']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - should_skip = meta['tracker_status'][self.tracker].get('skip_upload', False) - if should_skip: - meta['tracker_status'][self.tracker]['status_message'] = "data error: ulcx_no_language" - return - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - name, region_id, distributor_id = await self.edit_name(meta, region_id, distributor_id) - if region_id == "SKIPPED" or distributor_id == "SKIPPED": - console.print("Region or Distributor ID not found; skipping ULCX upload.") - return - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" - open_torrent = open(torrent_file_path, 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'mod_queue_opt_in': modq, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if meta.get('freeleech', 0) != 0: - data['free'] = meta.get('freeleech', 0) - if meta['is_disc'] == "BDMV": - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://upload.cx/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def edit_name(self, meta, region_id, distributor_id): - common = COMMON(config=self.config) - ulcx_name = meta['name'] - imdb_name = meta.get('imdb_info', {}).get('title', "") - imdb_year = str(meta.get('imdb_info', {}).get('year', "")) - year = str(meta.get('year', "")) - ulcx_name = ulcx_name.replace(f"{meta['title']}", imdb_name, 1) - if not meta.get('category') == "TV": - ulcx_name = ulcx_name.replace(f"{year}", imdb_year, 1) - if meta.get('mal_id', 0) != 0 and meta.get('aka', "") != "": - ulcx_name = ulcx_name.replace(f"{meta['aka']}", "", 1) - if meta.get('is_disc') == "BDMV": - if not region_id: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - region_name = cli_ui.ask_string("ULCX: Region code not found for disc. Please enter it manually (UPPERCASE): ") - region_id = await common.unit3d_region_ids(region_name) - if not meta.get('edition', ""): - ulcx_name = ulcx_name.replace(f"{meta['resolution']}", f"{meta['resolution']} {region_name}", 1) - else: - ulcx_name = ulcx_name.replace(f"{meta['resolution']} {meta['edition']}", f"{meta['resolution']} {meta['edition']} {region_name}", 1) - else: - region_id = "SKIPPED" - if not distributor_id: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - distributor_name = cli_ui.ask_string("ULCX: Distributor code not found for disc. Please enter it manually (UPPERCASE): ") - distributor_id = await common.unit3d_distributor_ids(distributor_name) - else: - distributor_id = "SKIPPED" - - return ulcx_name, region_id, distributor_id - - async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 - - return 1 if meta.get(flag_name, False) else 0 - - async def search_existing(self, meta, disctype): + async def get_additional_checks(self, meta): + should_continue = True if 'concert' in meta['keywords']: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - console.print('[bold red]Concerts not allowed at ULCX.') + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]Concerts not allowed at {self.tracker}.[/bold red]') if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): pass else: - meta['skipping'] = "ULCX" - return + return False else: - meta['skipping'] = "ULCX" - return + return False if meta['video_codec'] == "HEVC" and meta['resolution'] != "2160p" and 'animation' not in meta['keywords'] and meta.get('anime', False) is not True: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - console.print('[bold red]This content might not fit HEVC rules for ULCX.') + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False)): + console.print(f'[bold red]This content might not fit HEVC rules for {self.tracker}.[/bold red]') if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): pass else: - meta['skipping'] = "ULCX" - return + return False else: - meta['skipping'] = "ULCX" - return + return False if meta['type'] == "ENCODE" and meta['resolution'] not in ['8640p', '4320p', '2160p', '1440p', '1080p', '1080i', '720p']: if not meta['unattended']: - console.print('[bold red]Encodes must be at least 720p resolution for ULCX.') - meta['skipping'] = "ULCX" - return + console.print(f'[bold red]Encodes must be at least 720p resolution for {self.tracker}.[/bold red]') + return False if meta['bloated'] is True: - console.print("[bold red]Non-English dub not allowed at ULCX[/bold red]") - meta['skipping'] = "ULCX" - return [] + console.print(f"[bold red]Non-English dub not allowed at {self.tracker}[/bold red]") + return False if not meta['is_disc'] == "BDMV": - if not meta.get('audio_languages') or not meta.get('subtitle_languages'): + if not meta.get('language_checked', False): await process_desc_language(meta, desc=None, tracker=self.tracker) if not await has_english_language(meta.get('audio_languages')) and not await has_english_language(meta.get('subtitle_languages')): if not meta['unattended']: - console.print('[bold red]ULCX requires at least one English audio or subtitle track.') - meta['skipping'] = "ULCX" - return + console.print(f'[bold red]{self.tracker} requires at least one English audio or subtitle track.') + return False + + if not meta['valid_mi_settings']: + console.print(f"[bold red]No encoding settings in mediainfo, skipping {self.tracker} upload.[/bold red]") + return False + + return should_continue - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution'], meta['type']), - 'name': "" + async def get_additional_data(self, meta): + data = { + 'mod_queue_opt_in': await self.get_flag(meta, 'modq'), } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - attributes = each['attributes'] - result = { - 'name': attributes['name'], - 'size': attributes['size'] - } - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - return dupes + return data + + async def get_description(self, meta): + desc = await DescriptionBuilder(self.config).unit3d_edit_desc(meta, self.tracker, comparison=True) + + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + pattern = r'(\[center\](?:\s*\[url=[^\]]+\]\[img(?:=[0-9]+)?\][^\]]+\[/img\]\[/url\]\s*)+\[/center\])' + + def wrap_in_spoiler(match): + center_block = match.group(1) + return f'[center][spoiler=Screenshots]{center_block}[/spoiler][/center]' + + desc = re.sub(pattern, wrap_in_spoiler, desc, flags=re.DOTALL) + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as f: + await f.write(desc) + + return {'description': desc} + + async def get_name(self, meta): + ulcx_name = meta['name'] + imdb_name = meta.get('imdb_info', {}).get('title', "") + imdb_year = str(meta.get('imdb_info', {}).get('year', "")) + imdb_aka = meta.get('imdb_info', {}).get('aka', "") + year = str(meta.get('year', "")) + aka = meta.get('aka', "") + if imdb_name and imdb_name.strip(): + if aka: + ulcx_name = ulcx_name.replace(f"{aka} ", "", 1) + ulcx_name = ulcx_name.replace(f"{meta['title']}", imdb_name, 1) + if imdb_aka and imdb_aka.strip() and imdb_aka != imdb_name and not meta.get('no_aka', False) and not meta.get('anime', False): + ulcx_name = ulcx_name.replace(f"{imdb_name}", f"{imdb_name} AKA {imdb_aka}", 1) + if "Hybrid" in ulcx_name: + ulcx_name = ulcx_name.replace("Hybrid ", "", 1) + if not meta.get('category') == "TV" and imdb_year and imdb_year.strip() and year and year.strip() and imdb_year != year: + ulcx_name = ulcx_name.replace(f"{year}", imdb_year, 1) + + return {'name': ulcx_name} diff --git a/src/trackers/UNIT3D.py b/src/trackers/UNIT3D.py new file mode 100644 index 000000000..9f84236d1 --- /dev/null +++ b/src/trackers/UNIT3D.py @@ -0,0 +1,430 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +# import discord +import aiofiles +import asyncio +import glob +import httpx +import os +import platform +import re +from src.console import console +from src.get_desc import DescriptionBuilder +from src.trackers.COMMON import COMMON + + +class UNIT3D: + def __init__(self, config, tracker_name): + self.config = config + self.tracker = tracker_name + self.common = COMMON(config) + tracker_config = self.config['TRACKERS'].get(self.tracker, {}) + self.announce_url = tracker_config.get('announce_url', '') + self.api_key = tracker_config.get('api_key', '') + pass + + async def get_additional_checks(self, meta): + should_continue = True + return should_continue + + async def search_existing(self, meta, disctype): + if not self.api_key: + if not meta['debug']: + console.print(f'[bold red]{self.tracker}: Missing API key in config file. Skipping upload...[/bold red]') + meta['skipping'] = f'{self.tracker}' + return + + should_continue = await self.get_additional_checks(meta) + if not should_continue: + meta['skipping'] = f'{self.tracker}' + return + + dupes = [] + params = { + 'api_token': self.api_key, + 'tmdbId': meta['tmdb'], + 'categories[]': (await self.get_category_id(meta))['category_id'], + 'resolutions[]': (await self.get_resolution_id(meta))['resolution_id'], + 'name': '' + } + if self.tracker not in ['SP']: + params['types[]'] = (await self.get_type_id(meta))['type_id'] + if meta['category'] == 'TV': + params['name'] = params['name'] + f" {meta.get('season', '')}" + + try: + async with httpx.AsyncClient(timeout=10.0, follow_redirects=True) as client: + response = await client.get(url=self.search_url, params=params) + response.raise_for_status() + if response.status_code == 200: + data = response.json() + for each in data['data']: + attributes = each.get('attributes', {}) + if not meta['is_disc']: + result = { + 'name': attributes['name'], + 'size': attributes['size'], + 'files': [file['name'] for file in attributes.get('files', []) if isinstance(file, dict) and 'name' in file], + 'file_count': len(attributes.get('files', [])) if isinstance(attributes.get('files'), list) else 0, + 'trumpable': attributes.get('trumpable', False), + 'link': attributes.get('details_link', None), + 'download': attributes.get('download_link', None) + } + else: + result = { + 'name': attributes['name'], + 'size': attributes['size'], + 'files': [], + 'file_count': len(attributes.get('files', [])) if isinstance(attributes.get('files'), list) else 0, + 'trumpable': attributes.get('trumpable', False), + 'link': attributes.get('details_link', None), + 'download': attributes.get('download_link', None) + } + dupes.append(result) + else: + console.print(f'[bold red]Failed to search torrents. HTTP Status: {response.status_code}') + except httpx.HTTPStatusError as e: + if e.response.status_code == 302: + meta['tracker_status'][self.tracker]['status_message'] = ( + "data error: Redirect (302). This may indicate a problem with authentication. Please verify that your API key is valid." + ) + else: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: HTTP {e.response.status_code} - {e.response.text}' + except httpx.TimeoutException: + console.print('[bold red]Request timed out after 10 seconds') + except httpx.RequestError as e: + console.print(f'[bold red]Unable to search for existing torrents: {e}') + except Exception as e: + console.print(f'[bold red]Unexpected error: {e}') + await asyncio.sleep(5) + + return dupes + + async def get_name(self, meta): + return {'name': meta['name']} + + async def get_description(self, meta): + return {'description': await DescriptionBuilder(self.config).unit3d_edit_desc(meta, self.tracker, comparison=True)} + + async def get_mediainfo(self, meta): + if meta['bdinfo'] is not None: + mediainfo = None + else: + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as f: + mediainfo = await f.read() + return {'mediainfo': mediainfo} + + async def get_bdinfo(self, meta): + if meta['bdinfo'] is not None: + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as f: + bdinfo = await f.read() + else: + bdinfo = None + return {'bdinfo': bdinfo} + + async def get_category_id(self, meta, category=None, reverse=False, mapping_only=False): + category_id = { + 'MOVIE': '1', + 'TV': '2', + } + if mapping_only: + return category_id + elif reverse: + return {v: k for k, v in category_id.items()} + elif category is not None: + return {'category_id': category_id.get(category, '0')} + else: + meta_category = meta.get('category', '') + resolved_id = category_id.get(meta_category, '0') + return {'category_id': resolved_id} + + async def get_type_id(self, meta, type=None, reverse=False, mapping_only=False): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3', + 'DVDRIP': '3', + } + if mapping_only: + return type_id + elif reverse: + return {v: k for k, v in type_id.items()} + elif type is not None: + return {'type_id': type_id.get(type, '0')} + else: + meta_type = meta.get('type', '') + resolved_id = type_id.get(meta_type, '0') + return {'type_id': resolved_id} + + async def get_resolution_id(self, meta, resolution=None, reverse=False, mapping_only=False): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + } + if mapping_only: + return resolution_id + elif reverse: + return {v: k for k, v in resolution_id.items()} + elif resolution is not None: + return {'resolution_id': resolution_id.get(resolution, '10')} + else: + meta_resolution = meta.get('resolution', '') + resolved_id = resolution_id.get(meta_resolution, '10') + return {'resolution_id': resolved_id} + + async def get_anonymous(self, meta): + if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): + anonymous = 0 + else: + anonymous = 1 + return {'anonymous': anonymous} + + async def get_additional_data(self, meta): + # Used to add additional data if needed + ''' + data = { + 'modq': await self.get_flag(meta, 'modq'), + 'draft': await self.get_flag(meta, 'draft'), + } + ''' + data = {} + + return data + + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if meta.get(flag_name, False): + return 1 + else: + if config_flag is not None: + return 1 if config_flag else 0 + else: + return 0 + + async def get_distributor_id(self, meta): + distributor_id = await self.common.unit3d_distributor_ids(meta.get('distributor')) + if distributor_id != 0: + return {'distributor_id': distributor_id} + + return {} + + async def get_region_id(self, meta): + region_id = await self.common.unit3d_region_ids(meta.get('region')) + if region_id != 0: + return {'region_id': region_id} + + return {} + + async def get_tmdb(self, meta): + return {'tmdb': meta['tmdb']} + + async def get_imdb(self, meta): + return {'imdb': meta['imdb']} + + async def get_tvdb(self, meta): + tvdb = meta.get('tvdb_id', 0) if meta['category'] == 'TV' else 0 + return {'tvdb': tvdb} + + async def get_mal(self, meta): + return {'mal': meta['mal_id']} + + async def get_igdb(self, meta): + return {'igdb': 0} + + async def get_stream(self, meta): + return {'stream': meta['stream']} + + async def get_sd(self, meta): + return {'sd': meta['sd']} + + async def get_keywords(self, meta): + return {'keywords': meta.get('keywords', '')} + + async def get_personal_release(self, meta): + personal_release = int(meta.get('personalrelease', False)) + return {'personal_release': personal_release} + + async def get_internal(self, meta): + internal = 0 + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != '' and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + internal = 1 + + return {'internal': internal} + + async def get_season_number(self, meta): + data = {} + if meta.get('category') == 'TV': + data = {'season_number': meta.get('season_int', '0')} + + return data + + async def get_episode_number(self, meta): + data = {} + if meta.get('category') == 'TV': + data = {'episode_number': meta.get('episode_int', '0')} + + return data + + async def get_featured(self, meta): + return {'featured': 0} + + async def get_free(self, meta): + free = 0 + if meta.get('freeleech', 0) != 0: + free = meta.get('freeleech', 0) + + return {'free': free} + + async def get_doubleup(self, meta): + return {'doubleup': 0} + + async def get_sticky(self, meta): + return {'sticky': 0} + + async def get_data(self, meta): + results = await asyncio.gather( + self.get_name(meta), + self.get_description(meta), + self.get_mediainfo(meta), + self.get_bdinfo(meta), + self.get_category_id(meta), + self.get_type_id(meta), + self.get_resolution_id(meta), + self.get_tmdb(meta), + self.get_imdb(meta), + self.get_tvdb(meta), + self.get_mal(meta), + self.get_igdb(meta), + self.get_anonymous(meta), + self.get_stream(meta), + self.get_sd(meta), + self.get_keywords(meta), + self.get_personal_release(meta), + self.get_internal(meta), + self.get_season_number(meta), + self.get_episode_number(meta), + self.get_featured(meta), + self.get_free(meta), + self.get_doubleup(meta), + self.get_sticky(meta), + self.get_additional_data(meta), + self.get_region_id(meta), + self.get_distributor_id(meta), + ) + + merged = {} + for r in results: + if not isinstance(r, dict): + raise TypeError(f'Expected dict, got {type(r)}: {r}') + merged.update(r) + + return merged + + async def get_additional_files(self, meta): + files = {} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, 'tmp', uuid, '*.nfo') + nfo_files = glob.glob(specified_dir_path) + + if nfo_files: + async with aiofiles.open(nfo_files[0], 'rb') as f: + nfo_bytes = await f.read() + files['nfo'] = ("nfo_file.nfo", nfo_bytes, "text/plain") + + return files + + async def upload(self, meta, disctype): + data = await self.get_data(meta) + await self.common.edit_torrent(meta, self.tracker, self.source_flag) + + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + async with aiofiles.open(torrent_file_path, 'rb') as f: + torrent_bytes = await f.read() + files = {'torrent': ('torrent.torrent', torrent_bytes, 'application/x-bittorrent')} + files.update(await self.get_additional_files(meta)) + headers = {'User-Agent': f'{meta["ua_name"]} {meta.get("current_version", "")} ({platform.system()} {platform.release()})'} + params = {'api_token': self.api_key} + + if meta['debug'] is False: + response_data = {} + try: + async with httpx.AsyncClient(timeout=10.0, follow_redirects=True) as client: + response = await client.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + response.raise_for_status() + + response_data = response.json() + meta['tracker_status'][self.tracker]['status_message'] = await self.process_response_data(response_data) + torrent_id = await self.get_torrent_id(response_data) + + meta['tracker_status'][self.tracker]['torrent_id'] = torrent_id + await self.common.add_tracker_torrent( + meta, + self.tracker, + self.source_flag, + self.announce_url, + self.torrent_url + torrent_id, + headers=headers, + params=params, + downurl=response_data['data'] + ) + + except httpx.HTTPStatusError as e: + if e.response.status_code == 403: + meta['tracker_status'][self.tracker]['status_message'] = ( + "data error: Forbidden (403). This may indicate that you do not have upload permission." + ) + elif e.response.status_code == 302: + meta['tracker_status'][self.tracker]['status_message'] = ( + "data error: Redirect (302). This may indicate a problem with authentication. Please verify that your API key is valid." + ) + else: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: HTTP {e.response.status_code} - {e.response.text}' + except httpx.TimeoutException: + meta['tracker_status'][self.tracker]['status_message'] = 'data error: Request timed out after 10 seconds' + except httpx.RequestError as e: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: Unable to upload. Error: {e}.\nResponse: {response_data}' + except Exception as e: + meta['tracker_status'][self.tracker]['status_message'] = f'data error: It may have uploaded, go check. Error: {e}.\nResponse: {response_data}' + return + else: + console.print(f'[cyan]{self.tracker} Request Data:') + console.print(data) + meta['tracker_status'][self.tracker]['status_message'] = f'Debug mode enabled, not uploading: {self.tracker}.' + + async def get_torrent_id(self, response_data): + """Matches /12345.abcde and returns 12345""" + torrent_id = '' + try: + match = re.search(r'/(\d+)\.', response_data['data']) + if match: + torrent_id = match.group(1) + except (IndexError, KeyError): + print('Could not parse torrent_id from response data.') + return torrent_id + + async def process_response_data(self, response_data): + """Returns only the success message from the response data if the upload is successful; otherwise, returns the complete response data.""" + status_message = '' + try: + if response_data['success'] is True: + status_message = response_data['message'] + else: + status_message = response_data + except Exception: + pass + + return status_message diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 842c7ce5d..c3ae09627 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -1,47 +1,40 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- # import discord -import asyncio -import requests -import platform -import httpx from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class UNIT3D_TEMPLATE(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - - ############################################################### - ######## EDIT ME ######## noqa E266 - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - +class UNIT3D_TEMPLATE(UNIT3D): # EDIT 'UNIT3D_TEMPLATE' AS ABBREVIATED TRACKER NAME def __init__(self, config): + super().__init__(config, tracker_name='UNIT3D_TEMPLATE') # EDIT 'UNIT3D_TEMPLATE' AS ABBREVIATED TRACKER NAME self.config = config - self.tracker = 'Abbreviated' + self.common = COMMON(config) + self.tracker = 'Abbreviated Tracker Name' self.source_flag = 'Source flag for .torrent' - self.upload_url = 'https://domain.tld/api/torrents/upload' - self.search_url = 'https://domain.tld/api/torrents/filter' - self.torrent_url = 'https://domain.tld/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://domain.tld/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.requests_url = f'{self.base_url}/api/requests/filter' # If the site supports requests via API, otherwise remove this line + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [""] pass - async def get_cat_id(self, category_name): + # The section below can be deleted if no changes are needed, as everything else is handled in UNIT3D.py + # If advanced changes are required, copy the necessary functions from UNIT3D.py here + # For example, if you need to modify the description, copy and paste the 'get_description' function and adjust it accordingly + + # If default UNIT3D categories, remove this function + async def get_category_id(self, meta): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') - return category_id + }.get(meta['category'], '0') + return {'category_id': category_id} - async def get_type_id(self, type): + # If default UNIT3D types, remove this function + async def get_type_id(self, meta): type_id = { 'DISC': '1', 'REMUX': '2', @@ -49,10 +42,11 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') - return type_id + }.get(meta['type'], '0') + return {'type_id': type_id} - async def get_res_id(self, resolution): + # If default UNIT3D resolutions, remove this function + async def get_resolution_id(self, meta): resolution_id = { '8640p': '10', '4320p': '1', @@ -65,127 +59,26 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') - return resolution_id - - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### noqa E266 - ############################################################### + }.get(meta['resolution'], '10') + return {'resolution_id': resolution_id} - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 + # If there are tracker specific checks to be done before upload, add them here + # Is it a movie only tracker? Are concerts banned? Etc. + # If no checks are necessary, remove this function + async def get_additional_checks(self, meta): + should_continue = True + return should_continue - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} + # If the tracker has modq in the api, otherwise remove this function + # If no additional data is required, remove this function + async def get_additional_data(self, meta): data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if meta.get('freeleech', 0) != 0: - data['free'] = meta.get('freeleech', 0) - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'modq': await self.get_flag(meta, 'modq'), } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) + return data - return dupes + # If the tracker has specific naming conventions, add them here; otherwise, remove this function + async def get_name(self, meta): + UNIT3D_TEMPLATE_name = meta['name'] + return {'name': UNIT3D_TEMPLATE_name} diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 75bf15991..1652a9419 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -1,127 +1,27 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import os -import glob -import httpx from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class UTP(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ +class UTP(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='UTP') self.config = config + self.common = COMMON(config) self.tracker = 'UTP' self.source_flag = 'UTOPIA' - self.search_url = 'https://utp.to/api/torrents/filter' - self.torrent_url = 'https://utp.to/torrents/' - self.upload_url = 'https://utp.to/api/torrents/upload' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.base_url = 'https://utp.to/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' self.banned_groups = [] pass - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - cat_id = await self.get_cat_id(meta['category'], meta.get('edition', '')) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://utp.to/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def get_cat_id(self, category_name, edition): + async def get_category_id(self, meta): + category_name = meta['category'] + edition = meta.get('edition', '') category_id = { 'MOVIE': '1', 'TV': '2', @@ -129,58 +29,13 @@ async def get_cat_id(self, category_name, edition): }.get(category_name, '0') if category_name == 'MOVIE' and 'FANRES' in edition: category_id = '3' - return category_id + return {'category_id': category_id} - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): + async def get_resolution_id(self, meta): resolution_id = { '4320p': '1', '2160p': '2', '1080p': '3', '1080i': '4' - }.get(resolution, '1') - return resolution_id - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + }.get(meta['resolution'], '1') + return {'resolution_id': resolution_id} diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py index e06dfccd7..d0f260815 100644 --- a/src/trackers/YOINK.py +++ b/src/trackers/YOINK.py @@ -1,191 +1,21 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 # -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import platform -import os -import glob -import httpx from src.trackers.COMMON import COMMON -from src.console import console +from src.trackers.UNIT3D import UNIT3D -class YOINK(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class YOINK(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='YOINK') self.config = config + self.common = COMMON(config) self.tracker = 'YOINK' self.source_flag = 'YOiNKED' - self.upload_url = 'https://yoinked.org/api/torrents/upload' - self.search_url = 'https://yoinked.org/api/torrents/filter' - self.torrent_url = 'https://yoinked.org/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = ["YTS,YiFY,LAMA,MeGUSTA,NAHOM,GalaxyRG,RARBG"] + self.base_url = 'https://yoinked.org/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.requests_url = f'{self.base_url}/api/requests/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = ['YTS', 'YiFY', 'LAMA', 'MeGUSTA', 'NAHOM', 'GalaxyRG', 'RARBG', 'INFINITY'] pass - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - base_dir = meta['base_dir'] - uuid = meta['uuid'] - specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - nfo_files = glob.glob(specified_dir_path) - nfo_file = None - if nfo_files: - nfo_file = open(nfo_files[0], 'rb') - if nfo_file: - files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://yoinked.org/torrents/" + t_id) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/YUS.py b/src/trackers/YUS.py index 3ec9d8334..ac1ecb2bf 100644 --- a/src/trackers/YUS.py +++ b/src/trackers/YUS.py @@ -1,41 +1,48 @@ -# import discord -import asyncio -import requests -import platform -import httpx -from src.trackers.COMMON import COMMON +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# -*- coding: utf-8 -*- +import cli_ui +import re from src.console import console +from src.trackers.COMMON import COMMON +from src.trackers.UNIT3D import UNIT3D -class YUS(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - +class YUS(UNIT3D): def __init__(self, config): + super().__init__(config, tracker_name='YUS') self.config = config + self.common = COMMON(config) self.tracker = 'YUS' self.source_flag = 'YuScene' - self.upload_url = 'https://yu-scene.net/api/torrents/upload' - self.search_url = 'https://yu-scene.net/api/torrents/filter' - self.torrent_url = 'https://yu-scene.net/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" - self.banned_groups = ['KiNGDOM', 'Lama', 'MeGusta', 'MezRips', 'mHD', 'mRS', 'msd', 'NeXus', 'NhaNc3', 'nHD', 'RARBG', 'Radarr', - 'RCDiVX', 'RDN', 'SANTi', 'VXT', 'Will1869', 'x0r', 'XS', 'YIFY', 'YTS', 'ZKBL', 'ZmN', 'ZMNT'] + self.base_url = 'https://yu-scene.net/' + self.id_url = f'{self.base_url}/api/torrents/' + self.upload_url = f'{self.base_url}/api/torrents/upload' + self.search_url = f'{self.base_url}/api/torrents/filter' + self.torrent_url = f'{self.base_url}/torrents/' + self.banned_groups = [ + 'KiNGDOM', 'Lama', 'MeGusta', 'MezRips', 'mHD', 'mRS', 'msd', 'NeXus', + 'NhaNc3', 'nHD', 'RARBG', 'Radarr', 'RCDiVX', 'RDN', 'SANTi', 'VXT', 'Will1869', 'x0r', + 'XS', 'YIFY', 'YTS', 'ZKBL', 'ZmN', 'ZMNT', 'D3US', 'B3LLUM', 'FGT', 'd3g'] pass - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id + async def get_additional_checks(self, meta): + should_continue = True + + genres = f"{meta.get('keywords', '')} {meta.get('combined_genres', '')}" + adult_keywords = ['xxx', 'erotic', 'porn', 'adult', 'orgy'] + if any(re.search(rf'(^|,\s*){re.escape(keyword)}(\s*,|$)', genres, re.IGNORECASE) for keyword in adult_keywords): + if (not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False))): + console.print('[bold red]Porn/xxx is not allowed at YUS.') + if cli_ui.ask_yes_no("Do you want to upload anyway?", default=False): + pass + else: + return False + else: + return False + + return should_continue - async def get_type_id(self, type): + async def get_type_id(self, meta, type=None, reverse=False, mapping_only=False): type_id = { 'DISC': '17', 'REMUX': '2', @@ -43,143 +50,14 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and not self.config['TRACKERS'][self.tracker].get('anon', False): - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb'], - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - meta['tracker_status'][self.tracker]['status_message'] = response.json() - # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - meta['tracker_status'][self.tracker]['torrent_id'] = t_id - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) - except Exception: - console.print("It may have uploaded, go check") - return + if mapping_only: + return type_id + elif reverse: + return {v: k for k, v in type_id.items()} + elif type is not None: + return {'type_id': type_id.get(type, '0')} else: - console.print("[cyan]Request Data:") - console.print(data) - meta['tracker_status'][self.tracker]['status_message'] = "Debug mode enabled, not uploading." - open_torrent.close() - - async def search_existing(self, meta, disctype): - disallowed_keywords = {'XXX', 'Erotic', 'Porn', 'Hentai', 'softcore'} - if any(keyword.lower() in disallowed_keywords for keyword in map(str.lower, meta['keywords'])): - console.print('[bold red]Adult animation not allowed at YUS.') - meta['skipping'] = "YUS" - return [] - dupes = [] - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get(url=self.search_url, params=params) - if response.status_code == 200: - data = response.json() - for each in data['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - else: - console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") - except httpx.TimeoutException: - console.print("[bold red]Request timed out after 5 seconds") - except httpx.RequestError as e: - console.print(f"[bold red]Unable to search for existing torrents: {e}") - except Exception as e: - console.print(f"[bold red]Unexpected error: {e}") - await asyncio.sleep(5) - - return dupes + meta_type = meta.get('type', '') + resolved_id = type_id.get(meta_type, '0') + return {'type_id': resolved_id} diff --git a/src/trackersetup.py b/src/trackersetup.py index a4940c6ed..622b9df0f 100644 --- a/src/trackersetup.py +++ b/src/trackersetup.py @@ -1,24 +1,47 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles +import asyncio +import cli_ui +import httpx +import json +import os +import re +import sys + +from data.config import config +from datetime import datetime, timedelta +from src.cleanup import cleanup, reset_terminal +from src.console import console +from src.trackers.COMMON import COMMON + from src.trackers.ACM import ACM from src.trackers.AITHER import AITHER from src.trackers.AL import AL from src.trackers.ANT import ANT from src.trackers.AR import AR from src.trackers.ASC import ASC +from src.trackers.AZ import AZ from src.trackers.BHD import BHD from src.trackers.BHDTV import BHDTV +from src.trackers.BJS import BJS from src.trackers.BLU import BLU from src.trackers.BT import BT from src.trackers.CBR import CBR +from src.trackers.CZ import CZ from src.trackers.DC import DC from src.trackers.DP import DP +from src.trackers.FF import FF from src.trackers.FL import FL from src.trackers.FNP import FNP from src.trackers.FRIKI import FRIKI +from src.trackers.GPW import GPW from src.trackers.HDB import HDB from src.trackers.HDS import HDS from src.trackers.HDT import HDT from src.trackers.HHD import HHD from src.trackers.HUNO import HUNO +from src.trackers.IHD import IHD +from src.trackers.IS import IS from src.trackers.ITT import ITT from src.trackers.LCD import LCD from src.trackers.LDU import LDU @@ -28,10 +51,11 @@ from src.trackers.NBL import NBL from src.trackers.OE import OE from src.trackers.OTW import OTW -from src.trackers.PSS import PSS +from src.trackers.PHD import PHD from src.trackers.PT import PT from src.trackers.PTER import PTER from src.trackers.PTP import PTP +from src.trackers.PTS import PTS from src.trackers.PTT import PTT from src.trackers.R4E import R4E from src.trackers.RAS import RAS @@ -46,21 +70,15 @@ from src.trackers.THR import THR from src.trackers.TIK import TIK from src.trackers.TL import TL -from src.trackers.TOCA import TOCA +from src.trackers.TLZ import TLZ from src.trackers.TTG import TTG +from src.trackers.TTR import TTR from src.trackers.TVC import TVC -from src.trackers.UHD import UHD from src.trackers.ULCX import ULCX from src.trackers.UTP import UTP from src.trackers.YOINK import YOINK from src.trackers.YUS import YUS -from src.console import console -import httpx -import os -import json -import cli_ui -from datetime import datetime, timedelta -import asyncio +from src.trackers.EMUW import EMUW class TRACKER_SETUP: @@ -70,8 +88,6 @@ def __init__(self, config): pass def trackers_enabled(self, meta): - from data.config import config - if meta.get('trackers') is not None: trackers = meta['trackers'] else: @@ -96,20 +112,17 @@ def trackers_enabled(self, meta): async def get_banned_groups(self, meta, tracker): file_path = os.path.join(meta['base_dir'], 'data', 'banned', f'{tracker}_banned_groups.json') + tracker_class = tracker_class_map.get(tracker.upper()) + tracker_instance = tracker_class(self.config) + try: + banned_url = tracker_instance.banned_url + except AttributeError: + return None + # Check if we need to update if not await self.should_update(file_path): return file_path - url = None - if tracker.upper() == "AITHER": - url = f'https://{tracker}.cc/api/blacklists/releasegroups' - elif tracker.upper() == "LST": - url = f"https://{tracker}.gg/api/bannedReleaseGroups" - - if not url: - console.print(f"Error: Tracker '{tracker}' is not supported.") - return None - headers = { 'Authorization': f"Bearer {self.config['TRACKERS'][tracker]['api_key'].strip()}", 'Content-Type': 'application/json', @@ -124,7 +137,7 @@ async def get_banned_groups(self, meta, tracker): try: # Add query parameters for pagination params = {'cursor': next_cursor, 'per_page': 100} if next_cursor else {'per_page': 100} - response = await client.get(url, headers=headers, params=params) + response = await client.get(url=banned_url, headers=headers, params=params) if response.status_code == 200: response_json = response.json() @@ -226,7 +239,11 @@ async def check_banned_group(self, tracker, banned_group_list, meta): if not meta['tag']: return False - if tracker.upper() in ("AITHER", "LST"): + group_tags = meta['tag'][1:].lower() + if 'taoe' in group_tags: + group_tags = 'taoe' + + if tracker.upper() in ("AITHER", "LST", "SPD"): file_path = await self.get_banned_groups(meta, tracker) if file_path == "empty": console.print(f"[bold red]No banned groups found for '{tracker}'.") @@ -252,21 +269,27 @@ async def check_banned_group(self, tracker, banned_group_list, meta): for tag in banned_group_list: if isinstance(tag, list): - if meta['tag'][1:].lower() == tag[0].lower(): + if group_tags == tag[0].lower(): console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") console.print(f"[bold red]NOTE: [bold yellow]{tag[1]}") await asyncio.sleep(5) result = True else: - if meta['tag'][1:].lower() == tag.lower(): + if group_tags == tag.lower(): console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") await asyncio.sleep(5) result = True if result: - if not meta['unattended'] or meta.get('unattended-confirm', False): - if cli_ui.ask_yes_no(cli_ui.red, "Do you want to continue anyway?", default=False): - return False + if not meta['unattended'] or meta.get('unattended_confirm', False): + try: + if cli_ui.ask_yes_no(cli_ui.red, "Do you want to continue anyway?", default=False): + return False + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) return True return True @@ -318,12 +341,17 @@ async def write_internal_claims_to_file(self, file_path, data, debug=False): async def get_torrent_claims(self, meta, tracker): file_path = os.path.join(meta['base_dir'], 'data', 'banned', f'{tracker}_claimed_releases.json') + tracker_class = tracker_class_map.get(tracker.upper()) + tracker_instance = tracker_class(self.config) + try: + claims_url = tracker_instance.claims_url + except AttributeError: + return None # Check if we need to update if not await self.should_update(file_path): return await self.check_tracker_claims(meta, tracker) - url = f'https://{tracker}.cc/api/internals/claim' headers = { 'Authorization': f"Bearer {self.config['TRACKERS'][tracker]['api_key'].strip()}", 'Content-Type': 'application/json', @@ -338,7 +366,7 @@ async def get_torrent_claims(self, meta, tracker): try: # Add query parameters for pagination params = {'cursor': next_cursor, 'per_page': 100} if next_cursor else {'per_page': 100} - response = await client.get(url, headers=headers, params=params) + response = await client.get(url=claims_url, headers=headers, params=params) if response.status_code == 200: response_json = response.json() @@ -395,23 +423,18 @@ async def process_single_tracker(tracker_name): return False tracker_instance = tracker_class(self.config) - all_types = await tracker_instance.get_type_id() - type_names = meta.get('type', []) - if isinstance(type_names, str): - type_names = [type_names] - - type_ids = [all_types.get(type_name) for type_name in type_names] + # Get name-to-ID mappings directly + type_mapping = await tracker_instance.get_type_id(meta, mapping_only=True) + type_name = meta.get('type', '') + type_ids = [type_mapping.get(type_name)] if type_name else [] if None in type_ids: - console.print("[yellow]Warning: Some types in meta not found in tracker type mapping.[/yellow]") + console.print("[yellow]Warning: Type in meta not found in tracker type mapping.[/yellow]") - all_resolutions = await tracker_instance.get_res_id() - resolution_names = meta.get('resolution', []) - if isinstance(resolution_names, str): - resolution_names = [resolution_names] - - resolution_ids = [all_resolutions.get(res_name) for res_name in resolution_names] + resolution_mapping = await tracker_instance.get_resolution_id(meta, mapping_only=True) + resolution_name = meta.get('resolution', '') + resolution_ids = [resolution_mapping.get(resolution_name)] if resolution_name else [] if None in resolution_ids: - console.print("[yellow]Warning: Some resolutions in meta not found in tracker resolution mapping.[/yellow]") + console.print("[yellow]Warning: Resolution in meta not found in tracker resolution mapping.[/yellow]") tmdb_id = meta.get('tmdb', []) if isinstance(tmdb_id, int): @@ -464,24 +487,441 @@ async def process_single_tracker(tracker_name): return match_found + async def get_tracker_requests(self, meta, tracker, url): + if meta['debug']: + console.print(f"[bold green]Searching for existing requests on {tracker}[/bold green]") + requests = [] + headers = { + 'Authorization': f"Bearer {self.config['TRACKERS'][tracker]['api_key'].strip()}", + 'Accept': 'application/json' + } + params = { + 'tmdb': meta['tmdb'], + } + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(url=url, headers=headers, params=params) + if response.status_code == 200: + data = response.json() + if 'data' in data and isinstance(data['data'], list): + results_list = data['data'] + elif 'results' in data and isinstance(data['results'], list): + results_list = data['results'] + else: + console.print(f"[bold red]Unexpected response format: {type(data)}[/bold red]") + return requests + + try: + for each in results_list: + attributes = each + result = { + 'id': attributes.get('id'), + 'name': attributes.get('name'), + 'description': attributes.get('description'), + 'category': attributes.get('category_id'), + 'type': attributes.get('type_id'), + 'resolution': attributes.get('resolution_id'), + 'bounty': attributes.get('bounty'), + 'status': attributes.get('status'), + 'claimed': attributes.get('claimed'), + 'season': attributes.get('season_number'), + 'episode': attributes.get('episode_number'), + } + requests.append(result) + except Exception as e: + console.print(f"[bold red]Error processing response data: {e}[/bold red]") + return requests + else: + console.print(f"[bold red]Failed to search torrents on {tracker}. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + + return requests + + async def bhd_request_check(self, meta, tracker, url): + if 'BHD' not in self.config['TRACKERS'] or not self.config['TRACKERS']['BHD'].get('api_key'): + console.print("[red]BHD API key not configured. Skipping BHD request check.[/red]") + return False + if meta['debug']: + console.print(f"[bold green]Searching for existing requests on {tracker}[/bold green]") + requests = [] + params = { + 'action': 'search', + 'tmdb_id': f"{meta['category'].lower()}/{meta['tmdb_id']}", + } + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post(url=url, params=params) + if response.status_code == 200: + data = response.json() + if 'data' in data and isinstance(data['data'], list): + results_list = data['data'] + elif 'results' in data and isinstance(data['results'], list): + results_list = data['results'] + else: + console.print(f"[bold red]Unexpected response format: {type(data)}[/bold red]") + console.print(f"[bold red]Full response: {data}[/bold red]") + return requests + + try: + for each in results_list: + attributes = each + result = { + 'id': attributes.get('id'), + 'name': attributes.get('name'), + 'type': attributes.get('source'), + 'resolution': attributes.get('type'), + 'dv': attributes.get('dv'), + 'hdr': attributes.get('hdr'), + 'bounty': attributes.get('bounty'), + 'status': attributes.get('status'), + 'internal': attributes.get('internal'), + 'url': attributes.get('url'), + } + requests.append(result) + except Exception as e: + console.print(f"[bold red]Error processing response data: {e}[/bold red]") + console.print(f"[bold red]Response data: {data}[/bold red]") + return requests + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + # console.print(f"Debug: BHD requests found: {requests}") + return requests + + async def tracker_request(self, meta, tracker): + if isinstance(tracker, str): + trackers = [tracker.strip().upper()] + elif isinstance(tracker, list): + trackers = [s.upper() for s in tracker] + else: + console.print("[red]Invalid trackers input format.[/red]") + return False + + async def process_single_tracker(tracker): + tracker_class = tracker_class_map.get(tracker) + if not tracker_class: + console.print(f"[red]Tracker {tracker} is not registered in tracker_class_map[/red]") + return False + + tracker_instance = tracker_class(self.config) + try: + url = tracker_instance.requests_url + except AttributeError: + if tracker.upper() in ('ASC', 'BJS', 'FF', 'HDS', 'AZ', 'CZ', 'PHD'): + pass + else: + # tracker without requests url not supported + return + if tracker.upper() == "BHD": + requests = await self.bhd_request_check(meta, tracker, url) + elif tracker.upper() in ('ASC', 'BJS', 'FF', 'HDS', 'AZ', 'CZ', 'PHD'): + requests = await tracker_instance.get_requests(meta) + return + else: + requests = await self.get_tracker_requests(meta, tracker, url) + type_mapping = await tracker_instance.get_type_id(meta, mapping_only=True) + type_name = meta.get('type', '') + type_ids = [type_mapping.get(type_name)] if type_name else [] + if None in type_ids: + console.print("[yellow]Warning: Type in meta not found in tracker type mapping.[/yellow]") + + resolution_mapping = await tracker_instance.get_resolution_id(meta, mapping_only=True) + resolution_name = meta.get('resolution', '') + resolution_ids = [resolution_mapping.get(resolution_name)] if resolution_name else [] + if None in resolution_ids: + console.print("[yellow]Warning: Resolution in meta not found in tracker resolution mapping.[/yellow]") + + category_mapping = await tracker_instance.get_category_id(meta, mapping_only=True) + category_name = meta.get('category', '') + category_ids = [category_mapping.get(category_name)] if category_name else [] + if None in category_ids: + console.print("[yellow]Warning: Some categories in meta not found in tracker category mapping.[/yellow]") + + tmdb_id = meta.get('tmdb', []) + if isinstance(tmdb_id, int): + tmdb_id = [tmdb_id] + elif isinstance(tmdb_id, str): + tmdb_id = [int(tmdb_id)] + elif isinstance(tmdb_id, list): + tmdb_id = [int(id) for id in tmdb_id] + else: + console.print(f"[red]Invalid TMDB ID format in meta: {tmdb_id}[/red]") + return False + + # Initialize request log for this tracker + common = COMMON(config) + log_path = f"{meta['base_dir']}/tmp/{tracker}_request_results.json" + if not await common.path_exists(log_path): + await common.makedirs(os.path.dirname(log_path)) + + request_data = [] + try: + async with aiofiles.open(log_path, 'r', encoding='utf-8') as f: + content = await f.read() + request_data = json.loads(content) if content.strip() else [] + except Exception: + request_data = [] + + existing_uuids = {entry.get('uuid') for entry in request_data if isinstance(entry, dict)} + + for each in requests: + type_name = False + resolution = False + season = False + episode = False + double_check = False + api_id = each.get('id') + api_category = each.get('category') + api_name = each.get('name') + api_type = each.get('type') + api_bounty = each.get('bounty') + api_status = each.get('status') + if "BHD" not in tracker: + if str(api_type) in [str(tid) for tid in type_ids]: + type_name = True + elif api_type is None: + type_name = True + double_check = True + api_resolution = each.get('resolution') + if str(api_resolution) in [str(rid) for rid in resolution_ids]: + resolution = True + elif api_resolution is None: + resolution = True + double_check = True + api_claimed = each.get('claimed') + api_description = each.get('description') + if meta['category'] == "TV": + api_season = int(each.get('season')) if each.get('season') is not None else 0 + if api_season and meta.get('season_int') and api_season == meta.get('season_int'): + season = True + api_episode = int(each.get('episode')) if each.get('episode') is not None else 0 + if api_episode and meta.get('episode_int') and api_episode == meta.get('episode_int'): + episode = True + if str(api_category) in [str(cid) for cid in category_ids]: + new_url = re.sub(r'/api/requests/filter$', f'/requests/{api_id}', url) + if meta.get('category') == "MOVIE" and type_name and resolution and not api_claimed: + console.print(f"[bold blue]Found exact request match on [bold yellow]{tracker}[/bold yellow] with bounty [bold yellow]{api_bounty}[/bold yellow] and with status [bold yellow]{api_status}[/bold yellow][/bold blue]") + console.print(f"[bold blue]Claimed status:[/bold blue] [bold yellow]{api_claimed}[/bold yellow]") + console.print(f"[bold green]{api_name}:[/bold green] {new_url}") + console.print() + if double_check: + console.print("[bold red]Type and/or resolution was set to ANY, double check any description requirements:[/bold red]") + console.print(f"[bold yellow]Request desc:[/bold yellow] {api_description[:100]}") + console.print() + + if meta.get('uuid') not in existing_uuids: + request_entry = { + 'uuid': meta.get('uuid'), + 'path': meta.get('path', ''), + 'url': new_url, + 'name': api_name, + 'bounty': api_bounty, + 'description': api_description, + 'claimed': api_claimed + } + request_data.append(request_entry) + existing_uuids.add(meta.get('uuid')) + elif meta.get('category') == "TV" and season and episode and type_name and resolution and not api_claimed: + console.print(f"[bold blue]Found exact request match on [bold yellow]{tracker}[/bold yellow] with bounty [bold yellow]{api_bounty}[/bold yellow] and with status [bold yellow]{api_status}[/bold yellow][/bold blue]") + console.print(f"[bold blue]Claimed status:[/bold blue] [bold yellow]{api_claimed}[/bold yellow]") + console.print(f"[bold yellow]{api_name}[/bold yellow] - [bold yellow]S{api_season:02d} E{api_episode:02d}:[/bold yellow] {new_url}") + console.print() + if double_check: + console.print("[bold red]Type and/or resolution was set to ANY, double check any description requirements:[/bold red]") + console.print(f"[bold yellow]Request desc:[/bold yellow] {api_description[:100]}") + console.print() + + if meta.get('uuid') not in existing_uuids: + request_entry = { + 'uuid': meta.get('uuid'), + 'path': meta.get('path', ''), + 'url': new_url, + 'name': api_name, + 'bounty': api_bounty, + 'description': api_description, + 'claimed': api_claimed + } + request_data.append(request_entry) + existing_uuids.add(meta.get('uuid')) + else: + console.print(f"[bold blue]Found request on [bold yellow]{tracker}[/bold yellow] with bounty [bold yellow]{api_bounty}[/bold yellow] and with status [bold yellow]{api_status}[/bold yellow][/bold blue]") + console.print(f"[bold blue]Claimed status:[/bold blue] [bold yellow]{api_claimed}[/bold yellow]") + if meta.get('category') == "MOVIE": + console.print(f"[bold yellow]{api_name}:[/bold yellow] {new_url}") + else: + console.print(f"[bold yellow]{api_name}[/bold yellow] - [bold yellow]S{api_season:02d} E{api_episode:02d}:[/bold yellow] {new_url}") + console.print(f"[bold green]Request desc: {api_description[:100]}[/bold green]") + console.print() + + if not api_claimed and meta.get('uuid') not in existing_uuids: + request_entry = { + 'uuid': meta.get('uuid'), + 'path': meta.get('path', ''), + 'url': new_url, + 'name': api_name, + 'bounty': api_bounty, + 'description': api_description, + 'claimed': api_claimed, + 'match_type': 'partial' + } + request_data.append(request_entry) + existing_uuids.add(meta.get('uuid')) + else: + unclaimed = each.get('status') == 1 + internal = each.get('internal') == 1 + claimed_status = "" + if each.get('status') == 1: + claimed_status = "Unfilled" + elif each.get('status') == 2: + claimed_status = "Claimed" + elif each.get('status') == 3: + claimed_status = "Pending" + dv = False + hdr = False + season = False + meta_hdr = meta.get('HDR', '') + is_season = re.search(r'S\d{2}', api_name) + if is_season and is_season == meta.get('season'): + season = True + if each.get('dv') and meta_hdr == "DV": + dv = True + if each.get('hdr') and meta_hdr in ("HDR10", "HDR10+", "HDR"): + hdr = True + if not each.get('dv') and "DV" not in meta_hdr: + dv = True + if not each.get('hdr') and meta_hdr not in ("HDR10", "HDR10+", "HDR"): + hdr = True + if 'remux' in each.get('resolution', '').lower(): + if 'uhd' in each.get('resolution', '').lower() and meta.get('resolution') == "2160p" and meta.get('type') == "REMUX": + resolution = True + type_name = True + elif 'uhd' not in each.get('resolution', '').lower() and meta.get('resolution') == "1080p" and meta.get('type') == "REMUX": + resolution = True + type_name = True + elif 'remux' not in each.get('resolution', '').lower() and meta.get('is_disc') == "BDMV": + if 'uhd' in each.get('resolution', '').lower() and meta.get('resolution') == "2160p": + resolution = True + type_name = True + elif 'uhd' not in each.get('resolution', '').lower() and meta.get('resolution') == "1080p": + resolution = True + type_name = True + elif each.get('resolution') == meta.get('resolution'): + resolution = True + if 'Blu-ray' in each.get('type') and meta.get('type') == "ENCODE": + type_name = True + elif 'WEB' in each.get('type') and 'WEB' in meta.get('type'): + type_name = True + if meta.get('category') == "MOVIE" and type_name and resolution and unclaimed and not internal and dv and hdr: + console.print(f"[bold blue]Found exact request match on [bold yellow]{tracker}[/bold yellow] with bounty [bold yellow]{api_bounty}[/bold yellow] and with status [bold yellow]{claimed_status}[/bold yellow][/bold blue]") + console.print(f"[bold green]{api_name}:[/bold green] {each.get('url')}") + console.print() + + if meta.get('uuid') not in existing_uuids: + request_entry = { + 'uuid': meta.get('uuid'), + 'path': meta.get('path', ''), + 'url': each.get('url', ''), + 'name': api_name, + 'bounty': api_bounty, + 'claimed': claimed_status + } + request_data.append(request_entry) + existing_uuids.add(meta.get('uuid')) + if meta.get('category') == "MOVIE" and type_name and resolution and unclaimed and not internal and not dv and not hdr and 'uhd' in each.get('resolution').lower(): + console.print(f"[bold blue]Found request match on [bold yellow]{tracker}[/bold yellow] with bounty [bold yellow]{api_bounty}[/bold yellow] with mismatched HDR or DV[/bold blue]") + console.print(f"[bold green]{api_name}:[/bold green] {each.get('url')}") + console.print() + + if meta.get('uuid') not in existing_uuids: + request_entry = { + 'uuid': meta.get('uuid'), + 'path': meta.get('path', ''), + 'url': each.get('url', ''), + 'name': api_name, + 'bounty': api_bounty, + 'claimed': claimed_status + } + request_data.append(request_entry) + existing_uuids.add(meta.get('uuid')) + if meta.get('category') == "TV" and season and type_name and resolution and unclaimed and not internal and dv and hdr: + console.print(f"[bold blue]Found exact request match on [bold yellow]{tracker}[/bold yellow] with bounty [bold yellow]{api_bounty}[/bold yellow] and with status [bold yellow]{claimed_status}[/bold yellow][/bold blue]") + console.print(f"[bold yellow]{api_name}[/bold yellow] - [bold yellow]{meta.get('season')}:[/bold yellow] {each.get('url')}") + console.print() + + if meta.get('uuid') not in existing_uuids: + request_entry = { + 'uuid': meta.get('uuid'), + 'path': meta.get('path', ''), + 'url': each.get('url', ''), + 'name': api_name, + 'bounty': api_bounty, + 'claimed': claimed_status + } + request_data.append(request_entry) + existing_uuids.add(meta.get('uuid')) + if meta.get('category') == "TV" and season and type_name and resolution and unclaimed and not internal and not dv and not hdr: + console.print(f"[bold blue]Found request match on [bold yellow]{tracker}[/bold yellow] with bounty [bold yellow]{api_bounty}[/bold yellow] with mismatched HDR or DV[/bold blue]") + console.print(f"[bold yellow]{api_name}[/bold yellow] - [bold yellow]{meta.get('season')}:[/bold yellow] {each.get('url')}") + console.print() + + if meta.get('uuid') not in existing_uuids: + request_entry = { + 'uuid': meta.get('uuid'), + 'path': meta.get('path', ''), + 'url': each.get('url', ''), + 'name': api_name, + 'bounty': api_bounty, + 'claimed': claimed_status + } + request_data.append(request_entry) + existing_uuids.add(meta.get('uuid')) + else: + console.print(f"[bold blue]Found request on [bold yellow]{tracker}[/bold yellow] with bounty [bold yellow]{api_bounty}[/bold yellow] and with status [bold yellow]{claimed_status}[/bold yellow][/bold blue]") + if internal: + console.print("[bold red]Request is internal only[/bold red]") + console.print(f"[bold yellow]{api_name}[/bold yellow] - {each.get('url')}") + console.print() + + # Save all logged requests to file + if request_data: + async with aiofiles.open(log_path, 'w', encoding='utf-8') as f: + await f.write(json.dumps(request_data, indent=4)) + + return requests + + results = await asyncio.gather(*[process_single_tracker(tracker) for tracker in trackers]) + match_found = any(results) + + return match_found + tracker_class_map = { - 'ACM': ACM, 'AITHER': AITHER, 'AL': AL, 'ANT': ANT, 'AR': AR, 'ASC': ASC, 'BHD': BHD, 'BHDTV': BHDTV, 'BLU': BLU, 'BT': BT, 'CBR': CBR, - 'DC': DC, 'DP': DP, 'FNP': FNP, 'FL': FL, 'FRIKI': FRIKI, 'HDB': HDB, 'HDS': HDS, 'HDT': HDT, 'HHD': HHD, 'HUNO': HUNO, 'ITT': ITT, - 'LCD': LCD, 'LDU': LDU, 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PSS': PSS, 'PT': PT, 'PTP': PTP, 'PTER': PTER, 'PTT': PTT, + 'ACM': ACM, 'AITHER': AITHER, 'AL': AL, 'ANT': ANT, 'AR': AR, 'ASC': ASC, 'AZ': AZ, 'BHD': BHD, 'BHDTV': BHDTV, 'BJS': BJS, 'BLU': BLU, 'BT': BT, 'CBR': CBR, + 'CZ': CZ, 'DC': DC, 'DP': DP, 'EMUW': EMUW, 'FNP': FNP, 'FF': FF, 'FL': FL, 'FRIKI': FRIKI, 'GPW': GPW, 'HDB': HDB, 'HDS': HDS, 'HDT': HDT, 'HHD': HHD, 'HUNO': HUNO, 'ITT': ITT, + 'IHD': IHD, 'IS': IS, 'LCD': LCD, 'LDU': LDU, 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PHD': PHD, 'PT': PT, 'PTP': PTP, 'PTER': PTER, 'PTS': PTS, 'PTT': PTT, 'R4E': R4E, 'RAS': RAS, 'RF': RF, 'RTF': RTF, 'SAM': SAM, 'SHRI': SHRI, 'SN': SN, 'SP': SP, 'SPD': SPD, 'STC': STC, 'THR': THR, - 'TIK': TIK, 'TL': TL, 'TOCA': TOCA, 'TVC': TVC, 'TTG': TTG, 'UHD': UHD, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, 'YUS': YUS + 'TIK': TIK, 'TL': TL, 'TLZ': TLZ, 'TVC': TVC, 'TTG': TTG, 'TTR': TTR, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, 'YUS': YUS } api_trackers = { - 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'DP', 'FNP', 'FRIKI', 'HHD', 'HUNO', 'ITT', 'LCD', 'LDU', 'LST', 'LT', - 'OE', 'OTW', 'PSS', 'PT', 'PTT', 'RAS', 'RF', 'R4E', 'SAM', 'SHRI', 'SP', 'STC', 'TIK', 'TOCA', 'UHD', 'ULCX', 'UTP', 'YOINK', 'YUS' + 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'DP', 'EMUW', 'FNP', 'FRIKI', 'HHD', 'HUNO', 'IHD', 'ITT', 'LCD', 'LDU', 'LST', 'LT', + 'OE', 'OTW', 'PT', 'PTT', 'RAS', 'RF', 'R4E', 'SAM', 'SHRI', 'SP', 'STC', 'TIK', 'TLZ', 'TTR', 'ULCX', 'UTP', 'YOINK', 'YUS' } other_api_trackers = { - 'ANT', 'BHDTV', 'DC', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' + 'ANT', 'BHDTV', 'DC', 'GPW', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' } http_trackers = { - 'AR', 'ASC', 'BT', 'FL', 'HDB', 'HDS', 'HDT', 'MTV', 'PTER', 'TTG' + 'AR', 'ASC', 'AZ', 'BJS', 'BT', 'CZ', 'FF', 'FL', 'HDB', 'HDS', 'HDT', 'IS', 'MTV', 'PHD', 'PTER', 'PTS', 'TTG' } diff --git a/src/trackerstatus.py b/src/trackerstatus.py index 8051fc0e4..87e7cd760 100644 --- a/src/trackerstatus.py +++ b/src/trackerstatus.py @@ -1,17 +1,22 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import asyncio +import cli_ui +import copy import os +import sys + from torf import Torrent -from src.trackers.PTP import PTP -from src.trackersetup import TRACKER_SETUP, tracker_class_map, http_trackers -from src.console import console + from data.config import config +from src.cleanup import cleanup, reset_terminal from src.clients import Clients -from src.uphelper import UploadHelper -from src.torrentcreate import create_base_from_existing_torrent +from src.console import console from src.dupe_checking import filter_dupes from src.imdb import get_imdb_info_api -import cli_ui -import copy +from src.torrentcreate import create_base_from_existing_torrent +from src.trackers.PTP import PTP +from src.trackersetup import TRACKER_SETUP, tracker_class_map +from src.uphelper import UploadHelper async def process_all_trackers(meta): @@ -30,7 +35,7 @@ async def process_all_trackers(meta): async def process_single_tracker(tracker_name, shared_meta): nonlocal successful_trackers local_meta = copy.deepcopy(shared_meta) # Ensure each task gets its own copy of meta - local_tracker_status = {'banned': False, 'skipped': False, 'dupe': False, 'upload': False} + local_tracker_status = {'banned': False, 'skipped': False, 'dupe': False, 'upload': False, 'other': False} disctype = local_meta.get('disctype', None) if local_meta['name'].endswith('DUPE?'): @@ -42,8 +47,6 @@ async def process_single_tracker(tracker_name, shared_meta): if tracker_name in tracker_class_map: tracker_class = tracker_class_map[tracker_name](config=config) - if tracker_name in http_trackers: - await tracker_class.validate_credentials(meta) if tracker_name in {"THR", "PTP"}: if local_meta.get('imdb_id', 0) == 0: while True: @@ -51,10 +54,15 @@ async def process_single_tracker(tracker_name, shared_meta): local_meta['imdb_id'] = 0 local_tracker_status['skipped'] = True break - - imdb_id = cli_ui.ask_string( - f"Unable to find IMDB id, please enter e.g.(tt1234567) or press Enter to skip uploading to {tracker_name}:" - ) + try: + imdb_id = cli_ui.ask_string( + f"Unable to find IMDB id, please enter e.g.(tt1234567) or press Enter to skip uploading to {tracker_name}:" + ) + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) if imdb_id is None or imdb_id.strip() == "": local_meta['imdb_id'] = 0 @@ -77,18 +85,20 @@ async def process_single_tracker(tracker_name, shared_meta): if local_meta['tracker_status'][tracker_name].get('skip_upload'): local_tracker_status['skipped'] = True - elif 'skipped' not in local_meta or local_meta['skipped'] is None: + elif 'skipped' not in local_meta and local_tracker_status['skipped'] is None: local_tracker_status['skipped'] = False if not local_tracker_status['banned'] and not local_tracker_status['skipped']: - if tracker_name == "AITHER": - if await tracker_setup.get_torrent_claims(local_meta, tracker_name): - local_tracker_status['skipped'] = True - else: - local_tracker_status['skipped'] = False + claimed = await tracker_setup.get_torrent_claims(local_meta, tracker_name) + if claimed: + local_tracker_status['skipped'] = True + else: + local_tracker_status['skipped'] = False if tracker_name not in {"PTP"} and not local_tracker_status['skipped']: dupes = await tracker_class.search_existing(local_meta, disctype) + if local_meta['tracker_status'][tracker_name].get('other', False): + local_tracker_status['other'] = True elif tracker_name == "PTP": ptp = PTP(config=config) groupID = await ptp.get_group_by_imdb(local_meta['imdb']) @@ -101,9 +111,17 @@ async def process_single_tracker(tracker_name, shared_meta): if ('skipping' not in local_meta or local_meta['skipping'] is None) and not local_tracker_status['skipped']: dupes = await filter_dupes(dupes, local_meta, tracker_name) - local_meta, is_dupe = await helper.dupe_check(dupes, local_meta, tracker_name) + meta['we_asked'] = False + is_dupe = await helper.dupe_check(dupes, local_meta, tracker_name) if is_dupe: local_tracker_status['dupe'] = True + + if tracker_name == "AITHER" and 'aither_trumpable' in local_meta: + meta['aither_trumpable'] = local_meta['aither_trumpable'] + + if f'{tracker_name}_cross_seed' in local_meta: + meta[f'{tracker_name}_cross_seed'] = local_meta[f'{tracker_name}_cross_seed'] + elif 'skipping' in local_meta: local_tracker_status['skipped'] = True @@ -135,14 +153,37 @@ async def process_single_tracker(tracker_name, shared_meta): console.print(f"[bold yellow]Tracker '{tracker_name}' passed all checks.") if ( not local_meta['unattended'] - or (local_meta['unattended'] and local_meta.get('unattended-confirm', False)) + or (local_meta['unattended'] and local_meta.get('unattended_confirm', False)) ) and not we_already_asked: - edit_choice = "y" if local_meta['unattended'] else input("Enter 'y' to upload, or press enter to skip uploading:") - if edit_choice.lower() == 'y': - local_tracker_status['upload'] = True - successful_trackers += 1 - else: - local_tracker_status['upload'] = False + try: + tracker_rename = await tracker_class.get_name(meta) + except Exception: + try: + tracker_rename = await tracker_class.edit_name(meta) + except Exception: + tracker_rename = None + + display_name = None + if tracker_rename is not None: + if isinstance(tracker_rename, dict) and 'name' in tracker_rename: + display_name = tracker_rename['name'] + elif isinstance(tracker_rename, str): + display_name = tracker_rename + + if display_name is not None and display_name != "" and display_name != meta['name'] and not meta.get('cross_seeding', False): + console.print(f"[bold yellow]{tracker_name} applies a naming change for this release: [green]{display_name}[/green][/bold yellow]") + try: + edit_choice = "y" if local_meta['unattended'] else input("Enter 'y' to upload, or press enter to skip uploading:") + if edit_choice.lower() == 'y': + local_tracker_status['upload'] = True + successful_trackers += 1 + else: + local_tracker_status['upload'] = False + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) else: local_tracker_status['upload'] = True successful_trackers += 1 diff --git a/src/tvdb.py b/src/tvdb.py index 23299b0e4..10f12866e 100644 --- a/src/tvdb.py +++ b/src/tvdb.py @@ -1,662 +1,398 @@ -import httpx +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# Restricted-use credential — permitted only under UAPL v1.0 and associated service provider terms +import asyncio +import base64 import re -from src.console import console -from data.config import config - -config = config - - -async def get_tvdb_episode_data(base_dir, token, tvdb_id, season, episode, api_key=None, retry_attempted=False, debug=False): - if debug: - console.print(f"[cyan]Fetching TVDb episode data for S{season}E{episode}...[/cyan]") - - url = f"https://api4.thetvdb.com/v4/series/%7Btvdb_id%7D/episodes/default" - params = { - "page": 1, - "season": season, - "episodeNumber": episode - } - headers = { - "accept": "application/json", - "Authorization": f"Bearer {token}" - } - - try: - async with httpx.AsyncClient() as client: - response = await client.get(url, params=params, headers=headers, timeout=30.0) - - # Handle unauthorized responses - if response.status_code == 401: - # Only attempt a retry once to prevent infinite loops - if api_key and not retry_attempted: - console.print("[yellow]Unauthorized access. Refreshing TVDb token...[/yellow]") - new_token = await get_tvdb_token(api_key, base_dir) - if new_token: - # Retry the request with the new token - return await get_tvdb_episode_data( - base_dir, new_token, tvdb_id, season, episode, api_key, True - ) - else: - console.print("[red]Failed to refresh TVDb token[/red]") - return None - else: - console.print("[red]Unauthorized access to TVDb API[/red]") - return None - - response.raise_for_status() - data = response.json() - - # Check for "Unauthorized" message in response body - if data.get("message") == "Unauthorized": - if api_key and not retry_attempted: - console.print("[yellow]Token invalid or expired. Refreshing TVDb token...[/yellow]") - new_token = await get_tvdb_token(api_key, base_dir) - if new_token: - return await get_tvdb_episode_data( - base_dir, new_token, tvdb_id, season, episode, api_key, True - ) - else: - console.print("[red]Failed to refresh TVDb token[/red]") - return None - else: - console.print("[red]Unauthorized response from TVDb API[/red]") - return None - - if data.get("status") == "success" and data.get("data") and data["data"].get("episodes"): - episode_data = data["data"]["episodes"][0] - series_data = data["data"].get("series", {}) - - result = { - "episode_name": episode_data.get("name", ""), - "overview": episode_data.get("overview", ""), - "season_number": episode_data.get("seasonNumber", season), - "episode_number": episode_data.get("number", episode), - "air_date": episode_data.get("aired", ""), - "season_name": episode_data.get("seasonName", ""), - "series_name": series_data.get("name", ""), - "series_overview": series_data.get("overview", ""), - 'series_year': series_data.get("year", ""), - } - - if debug: - console.print(f"[green]Found episode: {result['season_name']} - S{result['season_number']}E{result['episode_number']} - {result['episode_name']}[/green] - {result['air_date']}") - console.print(f"[yellow]Overview: {result['overview']}") - console.print(f"[yellow]Series: {result['series_name']} - {result['series_overview']}[/yellow]") - return result - else: - console.print(f"[yellow]No TVDB episode data found for S{season}E{episode}[/yellow]") - return None +from tvdb_v4_official import TVDB - except httpx.HTTPStatusError as e: - console.print(f"[red]HTTP error occurred: {e.response.status_code} - {e.response.text}[/red]") - return None - except httpx.RequestError as e: - console.print(f"[red]Request error occurred: {e}[/red]") - return None - except Exception as e: - console.print(f"[red]Error fetching TVDb episode data: {e}[/red]") - return None +from src.console import console -async def get_tvdb_token(api_key, base_dir): - console.print("[cyan]Authenticating with TVDb API...[/cyan]") +def _get_tvdb_k() -> str: + k = ( + b"MDEwMTEwMDEwMDExMDAxMDAxMDExMDAxMDExMTEwMDAwMTAwMTExMDAxMTAxMTAxMDEwMTAwMDEwMDExMDEwMD" + b"AxMDAxMTAxMDExMDEwMTAwMTAxMDAwMTAxMTEwMTAwMDEwMTEwMDEwMDExMDAxMDAxMDAxMDAxMDAxMTAwMTEw" + b"MTAwMTEwMTAxMDEwMDExMDAxMTAwMDAwMDExMDAwMDAxMDAxMTEwMDExMDEwMTAwMTEwMTAwMDAxMTAxMTAwMD" + b"EwMDExMDAwMTAxMDExMTAxMDAwMTAxMDAxMTAwMTAwMTAxMTAwMTAxMDEwMTAwMDEwMDAxMDEwMTExMDEwMDAx" + b"MDAxMTEwMDExMTEwMTAwMTAwMDAxMDAxMTAxMDEwMDEwMTEwMTAwMTAwMDExMTAxMDEwMDAxMDExMTEwMDAwMT" + b"AxMTAwMTAxMDEwMTExMDEwMTAwMTAwMTEwMTAxMDAxMDAxMTEwMDEwMTAxMTEwMTAxMTAxMDAxMTAxMDEw" + ) + binary_bytes = base64.b64decode(k) + b64_bytes = bytes( + int(binary_bytes[i: i + 8], 2) for i in range(0, len(binary_bytes), 8) + ) + return base64.b64decode(b64_bytes).decode() - url = "https://api4.thetvdb.com/v4/login" - headers = { - "accept": "application/json", - "Content-Type": "application/json" - } - payload = { - "apikey": api_key, - "pin": "string" # Default value as specified in the example - } - try: - async with httpx.AsyncClient() as client: - response = await client.post(url, json=payload, headers=headers, timeout=30.0) - response.raise_for_status() - data = response.json() +tvdb = TVDB(_get_tvdb_k()) - if data.get("status") == "success" and data.get("data") and data["data"].get("token"): - token = data["data"]["token"] - console.print("[green]Successfully authenticated with TVDb[/green]") - console.print(f"[bold yellow]New TVDb token: {token[:10]}...[/bold yellow]") - # Update the token in the in-memory configuration - config['DEFAULT']['tvdb_token'] = f'"{token}"' +class tvdb_data: + def __init__(self, config): + self.config = config + pass - # Save the updated config to disk - try: - # Get the config file path - config_path = f"{base_dir}/data/config.py" - - # Read the current config file - with open(config_path, 'r', encoding='utf-8') as file: - config_data = file.read() - - token_pattern = '"tvdb_token":' - if token_pattern in config_data: - # Find the line with tvdb_token - lines = config_data.splitlines() - for i, line in enumerate(lines): - if token_pattern in line: - # Split the line at the colon and keep everything before it - prefix = line.split(':', 1)[0] - # Create a new line with the updated token - lines[i] = f'{prefix}: "{token}",' + async def search_tvdb_series(self, filename, year=None, debug=False): + if debug: + console.print(f"filename for TVDB search: {filename} year: {year}") + results = tvdb.search({filename}, year=year, type="series", lang="eng") + await asyncio.sleep(0.1) + try: + if results and len(results) > 0: + # Try to find the best match based on year + best_match = None + search_year = str(year) if year else '' + + if search_year: + # First, try to find exact year match + for result in results: + if result.get('year') == search_year: + best_match = result + break + + # If no exact match, check aliases for year-based names + if not best_match and search_year: + for result in results: + aliases = result.get('aliases', []) + if aliases: + # Check if any alias contains the year in parentheses + for alias in aliases: + alias_name = alias.get('name', '') if isinstance(alias, dict) else alias + if f"({search_year})" in alias_name: + best_match = result + break + if best_match: break - # Rejoin the lines and write back to the file - new_config_data = '\n'.join(lines) - with open(config_path, 'w', encoding='utf-8') as file: - file.write(new_config_data) + # If still no match, use first result + if not best_match: + best_match = results[0] + + series_id = best_match['tvdb_id'] + if debug: + console.print(f"[blue]TVDB series ID: {series_id}[/blue]") + return results, series_id + else: + console.print("[yellow]No TVDB results found[/yellow]") + return None, None + except Exception as e: + console.print(f"[red]Error: {e}[/red]") + return None, None + + async def get_tvdb_episodes(self, series_id, debug=False): + try: + # Get all episodes for the series with pagination + all_episodes = [] + page = 0 + max_pages = 20 # Safety limit to prevent infinite loops + + while page < max_pages: + if debug and page > 0: + console.print(f"[cyan]Fetching TVDB episodes page {page + 1}[/cyan]") - console.print(f"[bold green]TVDb token successfully saved to {config_path}[/bold green]") + try: + episodes_response = tvdb.get_series_episodes( + series_id, + season_type="default", + page=page, + lang="eng" + ) + + # Handle both dict response and direct episodes list + if isinstance(episodes_response, dict): + current_episodes = episodes_response.get('episodes', []) else: - console.print("[yellow]Warning: Could not find tvdb_token in configuration file[/yellow]") - console.print("[yellow]The token will be used for this session only.[/yellow]") + # Fallback for direct list response + current_episodes = episodes_response if isinstance(episodes_response, list) else [] - except Exception as e: - console.print(f"[yellow]Warning: Could not update TVDb token in configuration file: {e}[/yellow]") - console.print("[yellow]The token will be used for this session only.[/yellow]") + if not current_episodes: + if debug: + console.print(f"[yellow]No episodes found on page {page + 1}, stopping pagination[/yellow]") + break - return token - else: - console.print("[red]Failed to get TVDb token: Invalid response format[/red]") - return None - - except httpx.HTTPStatusError as e: - console.print(f"[red]HTTP error occurred during TVDb authentication: {e.response.status_code} - {e.response.text}[/red]") - return None - except httpx.RequestError as e: - console.print(f"[red]Request error occurred during TVDb authentication: {e}[/red]") - return None - except Exception as e: - console.print(f"[red]Error authenticating with TVDb: {e}[/red]") - return None + all_episodes.extend(current_episodes) + if debug: + console.print(f"[cyan]Retrieved {len(current_episodes)} episodes from page {page + 1} (total: {len(all_episodes)})[/cyan]") -async def get_tvdb_series_episodes(base_dir, token, tvdb_id, season, episode, api_key=None, retry_attempted=False, debug=False): - if debug: - console.print(f"[cyan]Fetching episode list for series ID {tvdb_id}...[/cyan]") - - url = f"https://api4.thetvdb.com/v4/series/%7Btvdb_id%7D/extended?meta=episodes&short=false" - headers = { - "accept": "application/json", - "Authorization": f"Bearer {token}" - } - - all_episodes = [] - - try: - async with httpx.AsyncClient() as client: - response = await client.get(url, headers=headers, timeout=30.0) - - # Handle unauthorized responses - if response.status_code == 401: - # Only attempt a retry once to prevent infinite loops - if api_key and not retry_attempted: - console.print("[yellow]Unauthorized access. Refreshing TVDb token...[/yellow]") - new_token = await get_tvdb_token(api_key, base_dir) - if new_token: - # Retry the request with the new token - return await get_tvdb_series_episodes( - base_dir, new_token, tvdb_id, season, episode, api_key, True - ) - else: - console.print("[red]Failed to refresh TVDb token[/red]") - return (season, episode) - else: - console.print("[red]Unauthorized access to TVDb API[/red]") - return (season, episode) - - response.raise_for_status() - data = response.json() - - # Check for "Unauthorized" message in response body - if data.get("message") == "Unauthorized": - if api_key and not retry_attempted: - console.print("[yellow]Token invalid or expired. Refreshing TVDb token...[/yellow]") - new_token = await get_tvdb_token(api_key, base_dir) - if new_token: - return await get_tvdb_series_episodes( - base_dir, new_token, tvdb_id, season, episode, api_key, True - ) - else: - console.print("[red]Failed to refresh TVDb token[/red]") - return (season, episode) - else: - console.print("[red]Unauthorized response from TVDb API[/red]") - return (season, episode) + # If we got fewer than 500 results, we've reached the end + if len(current_episodes) < 500: + if debug: + console.print(f"[cyan]Page {page + 1} returned {len(current_episodes)} episodes (< 500), pagination complete[/cyan]") + break - if data.get("status") == "success" and data.get("data"): - episodes = data["data"].get("episodes", []) - all_episodes = episodes + page += 1 + await asyncio.sleep(0.1) # Rate limiting - if not all_episodes: - console.print(f"[yellow]No episodes found for TVDB series ID {tvdb_id}[/yellow]") - return (season, episode) + except Exception as page_error: + if debug: + console.print(f"[yellow]Error fetching page {page + 1}: {page_error}[/yellow]") + # If first page fails, re-raise; otherwise, stop pagination + if page == 0: + raise page_error + else: + break if debug: - console.print(f"[cyan]Looking for season {season} episode {episode} in series {tvdb_id}[/cyan]") - - # Process and organize episode data - episodes_by_season = {} - absolute_mapping = {} # Map absolute numbers to season/episode - - # Sort by aired date first (if available) - def get_aired_date(ep): - aired = ep.get("aired") - # Return default value if aired is None or not present - if aired is None: - return "9999-99-99" - return aired - - all_episodes.sort(key=get_aired_date) - - for ep in all_episodes: - season_number = ep.get("seasonNumber") - episode_number = ep.get("number") - absolute_episode_count = ep.get("absoluteNumber") - - # Ensure season_number is valid and convert to int if needed - if season_number is not None: - try: - season_number = int(season_number) - except (ValueError, TypeError): - console.print(f"[yellow]Invalid season number: {season_number}, skipping episode[/yellow]") - continue - else: - console.print(f"[yellow]Missing season number for episode {ep.get('name', 'Unknown')}, skipping[/yellow]") - continue - - # Ensure episode_number is valid - if episode_number is not None: - try: - episode_number = int(episode_number) - except (ValueError, TypeError): - console.print(f"[yellow]Invalid episode number: {episode_number}, skipping episode[/yellow]") - continue - - # Handle special seasons (e.g., season 0) - is_special = season_number == 0 - - if not is_special: - # Store mapping of absolute number to season/episode - absolute_mapping[absolute_episode_count] = { - "season": season_number, - "episode": episode_number, - "episode_data": ep - } - - episode_data = { - "id": ep.get("id"), - "name": ep.get("name", ""), - "overview": ep.get("overview", ""), - "seasonNumber": season_number, - "episodeNumber": episode_number, - "absoluteNumber": absolute_episode_count if not is_special else None, - "aired": ep.get("aired"), - "runtime": ep.get("runtime"), - "imageUrl": ep.get("image"), - "thumbUrl": ep.get("thumbnail"), - "isMovie": ep.get("isMovie", False), - "airsAfterSeason": ep.get("airsAfterSeason"), - "airsBeforeSeason": ep.get("airsBeforeSeason"), - "airsBeforeEpisode": ep.get("airsBeforeEpisode"), - "productionCode": ep.get("productionCode", ""), - "finaleType": ep.get("finaleType", ""), - "year": ep.get("year") - } - - # Create a season entry if it doesn't exist - if season_number not in episodes_by_season: - episodes_by_season[season_number] = [] - - # Add the episode to its season - episodes_by_season[season_number].append(episode_data) - - # Sort episodes within each season by episode number - for s in episodes_by_season: - valid_episodes = [ep for ep in episodes_by_season[s] if ep["episodeNumber"] is not None] - episodes_by_season[s] = sorted(valid_episodes, key=lambda ep: ep["episodeNumber"]) - - # If season and episode were provided, try to find the matching episode - if season is not None and episode is not None: - found_episode = None - - # Ensure season is an integer - try: - season = int(season) - except (ValueError, TypeError): + console.print(f"[green]Total episodes retrieved: {len(all_episodes)} across {page + 1} page(s)[/green]") + + # Create the response structure + episodes_data = { + 'episodes': all_episodes, + 'aliases': [] # Will be populated if available from first response + } + + # Try to get aliases from series info (may need separate call) + try: + if all_episodes: + # Get series details for aliases + series_info = tvdb.get_series_extended(series_id) + if 'aliases' in series_info: + episodes_data['aliases'] = series_info['aliases'] + except Exception as alias_error: + if debug: + console.print(f"[yellow]Could not retrieve series aliases: {alias_error}[/yellow]") + + # Extract specific English alias only if it contains a year (e.g., "Cats eye (2025)") + specific_alias = None + if 'aliases' in episodes_data and episodes_data['aliases']: + # Pattern to match a 4-digit year in parentheses + year_pattern = re.compile(r'\((\d{4})\)') + eng_aliases = [ + alias['name'] for alias in episodes_data['aliases'] + if alias.get('language') == 'eng' and year_pattern.search(alias['name']) + ] + if eng_aliases: + # Get the last English alias with year (usually the most specific one) + specific_alias = eng_aliases[-1] if debug: - console.print(f"[yellow]Invalid season number provided: {season}, using as-is[/yellow]") + console.print(f"[blue]English alias with year: {specific_alias}[/blue]") + + return episodes_data, specific_alias + + except Exception as e: + console.print(f"[red]Error getting episodes: {e}[/red]") + return None, None + + async def get_tvdb_by_external_id(self, imdb, tmdb, debug=False, tv_movie=False): + # Try IMDB first if available + if imdb: + try: + if isinstance(imdb, str) and imdb.startswith('tt'): + imdb_formatted = imdb + elif isinstance(imdb, str) and imdb.isdigit(): + imdb_formatted = f"tt{int(imdb):07d}" + elif isinstance(imdb, int): + imdb_formatted = f"tt{imdb:07d}" + else: + imdb_formatted = str(imdb) if debug: - console.print(f"[cyan]Looking for season {season} (type: {type(season)}) in episodes_by_season keys: {sorted(episodes_by_season.keys())} (types: {[type(s) for s in episodes_by_season.keys()]})[/cyan]") + console.print(f"[cyan]Trying TVDB lookup with IMDB ID: {imdb_formatted}[/cyan]") - # First try to find the episode in the specified season - if season in episodes_by_season: - if debug: - console.print(f"[green]Found season {season} in episodes_by_season[/green]") + results = tvdb.search_by_remote_id(imdb_formatted) + await asyncio.sleep(0.1) - # Convert episode to int if not already - try: - episode = int(episode) - except (ValueError, TypeError): - if debug: - console.print(f"[yellow]Invalid episode number provided: {episode}, using as-is[/yellow]") + if results and len(results) > 0: + if debug: + console.print(f"[blue]results: {results}[/blue]") - max_episode_in_season = max([ep["episodeNumber"] or 0 for ep in episodes_by_season[season]]) + # Look for series results first + for result in results: + if 'series' in result: + series_id = result['series']['id'] + if debug: + console.print(f"[blue]TVDB series ID from IMDB: {series_id}[/blue]") + return series_id + + # If tv_movie is True, check for episode with seriesId first, then movie + if tv_movie: + # Check if any result has an episode with a seriesId + for result in results: + if 'episode' in result and result['episode'].get('seriesId'): + series_id = result['episode']['seriesId'] + if debug: + console.print(f"[blue]TVDB series ID from episode entry (tv_movie): {series_id}[/blue]") + return series_id - if episode <= max_episode_in_season: - # Episode exists in this season normally - for ep in episodes_by_season[season]: - if ep["episodeNumber"] == episode: - found_episode = ep + # If no episode with seriesId, accept movie results + for result in results: + if 'movie' in result: + movie_id = result['movie']['id'] if debug: - console.print(f"[green]Found episode S{season}E{episode} directly: {ep['name']}[/green]") - # Since we found it directly, return the original season and episode - return (season, episode) - else: - # Episode number is greater than max in this season, so try absolute numbering - if debug: - console.print(f"[yellow]Episode {episode} is greater than max episode ({max_episode_in_season}) in season {season}[/yellow]") - console.print("[yellow]Trying to find by absolute episode number...[/yellow]") - - # Calculate absolute episode number - absolute_number = episode - for s in range(1, season): - if s in episodes_by_season: - absolute_number += len(episodes_by_season[s]) - - if absolute_number in absolute_mapping: - actual_season = absolute_mapping[absolute_number]["season"] - actual_episode = absolute_mapping[absolute_number]["episode"] - - # Find the episode in the seasons data - for ep in episodes_by_season[actual_season]: - if ep["episodeNumber"] == actual_episode: - found_episode = ep - if debug: - console.print(f"[green]Found by absolute number {absolute_number}: S{actual_season}E{actual_episode} - {ep['name']}[/green]") - console.print(f"[bold yellow]Note: S{season}E{episode} maps to S{actual_season}E{actual_episode} using absolute numbering[/bold yellow]") - # Return the absolute-based season and episode since that's what corresponds to the actual content - return (actual_season, actual_episode) - else: - if debug: - console.print(f"[red]Could not find episode with absolute number {absolute_number}[/red]") - # Return original values if absolute mapping failed - return (season, episode) - else: - if debug: - console.print(f"[red]Season {season} not found in series[/red]") - # Return original values if season wasn't found - return (season, episode) + console.print(f"[blue]TVDB movie ID from IMDB (tv_movie): {movie_id}[/blue]") + return movie_id - # If we get here and haven't returned yet, return the original values - if not found_episode: if debug: - console.print(f"[yellow]No matching episode found, keeping original S{season}E{episode}[/yellow]") - return (season, episode) - - # If we get here, no specific episode was requested or processing, so return the original values - return (season, episode) - - except httpx.HTTPStatusError as e: - console.print(f"[red]HTTP error occurred: {e.response.status_code} - {e.response.text}[/red]") - return (season, episode) - except httpx.RequestError as e: - console.print(f"[red]Request error occurred: {e}[/red]") - return (season, episode) - except Exception as e: - console.print(f"[red]Error fetching TVDb episode list: {str(e)}[/red]") - import traceback - console.print(f"[dim]{traceback.format_exc()}[/dim]") - return (season, episode) - - -async def get_tvdb_series_data(base_dir, token, tvdb_id, api_key=None, retry_attempted=False, debug=False): - url = f"https://api4.thetvdb.com/v4/series/%7Btvdb_id%7D" - headers = { - "accept": "application/json", - "Authorization": f"Bearer {token}" - } - - try: - async with httpx.AsyncClient() as client: - response = await client.get(url, headers=headers, timeout=30.0) - - if response.status_code == 401: - if api_key and not retry_attempted: - console.print("[yellow]Unauthorized access. Refreshing TVDb token...[/yellow]") - new_token = await get_tvdb_token(api_key, base_dir) - if new_token: - return await get_tvdb_series_data( - base_dir, new_token, tvdb_id, api_key, True, debug - ) - else: - console.print("[red]Failed to refresh TVDb token[/red]") - return None - else: - console.print("[red]Unauthorized access to TVDb API[/red]") - return None - - response.raise_for_status() - data = response.json() - - if data.get("message") == "Unauthorized": - if api_key and not retry_attempted: - console.print("[yellow]Token invalid or expired. Refreshing TVDb token...[/yellow]") - new_token = await get_tvdb_token(api_key, base_dir) - if new_token: - return await get_tvdb_series_data( - base_dir, new_token, tvdb_id, api_key, True, debug - ) - else: - console.print("[red]Failed to refresh TVDb token[/red]") - return None + result_types = [list(result.keys())[0] for result in results if result] + console.print(f"[yellow]IMDB search returned results but no {'series or movie' if tv_movie else 'series'} found (got: {result_types})[/yellow]") else: - console.print("[red]Unauthorized response from TVDb API[/red]") - return None - - if data.get("status") == "success" and data.get("data"): - series_data = data["data"] - series_name = series_data.get("name") + if debug: + console.print("[yellow]No TVDB series found for IMDB ID[/yellow]") + except Exception as e: if debug: - console.print(f"[bold cyan]TVDB series name: {series_name}[/bold cyan]") - return series_name - else: - console.print(f"[yellow]No TVDb series data found for {tvdb_id}[/yellow]") - return None - - except httpx.HTTPStatusError as e: - console.print(f"[red]HTTP error occurred: {e.response.status_code} - {e.response.text}[/red]") - return None - except httpx.RequestError as e: - console.print(f"[red]Request error occurred: {e}[/red]") - return None - except Exception as e: - console.print(f"[red]Error fetching TVDb series data: {e}[/red]") - return None + console.print(f"[red]Error getting TVDB by IMDB ID: {e}[/red]") + if tmdb: + try: + tmdb_str = str(tmdb) -async def get_tvdb_series(base_dir, title, year, apikey=None, token=None, debug=False): - if debug: - console.print(f"[cyan]Searching for TVDb series: {title} ({year})...[/cyan]") - - # Validate inputs - if not apikey: - console.print("[red]No TVDb API key provided[/red]") - return 0 + if debug: + console.print(f"[cyan]Trying TVDB lookup with TMDB ID: {tmdb_str}[/cyan]") - if not token: - console.print("[red]No TVDb token provided[/red]") - return 0 + results = tvdb.search_by_remote_id(tmdb_str) + await asyncio.sleep(0.1) - if not title: - console.print("[red]No title provided for TVDb search[/red]") - return 0 + if results and len(results) > 0: + if debug: + console.print(f"[blue]results: {results}[/blue]") - async def search_tvdb(search_title, search_year=None, attempt_description=""): - """Helper function to perform the actual search""" - url = "https://api4.thetvdb.com/v4/search" - headers = { - "accept": "application/json", - "Authorization": f"Bearer {token}" - } - params = { - "query": search_title - } + # Look for series results first + for result in results: + if 'series' in result: + series_id = result['series']['id'] + if debug: + console.print(f"[blue]TVDB series ID from TMDB: {series_id}[/blue]") + return series_id + + # If tv_movie is True, check for episode with seriesId first, then movie + if tv_movie: + # Check if any result has an episode with a seriesId + for result in results: + if 'episode' in result and result['episode'].get('seriesId'): + series_id = result['episode']['seriesId'] + if debug: + console.print(f"[blue]TVDB series ID from episode entry (tv_movie): {series_id}[/blue]") + return series_id - if search_year: - params["year"] = search_year + # If no episode with seriesId, accept movie results + for result in results: + if 'movie' in result: + movie_id = result['movie']['id'] + if debug: + console.print(f"[blue]TVDB movie ID from TMDB (tv_movie): {movie_id}[/blue]") + return movie_id - if debug: - console.print(f"[cyan]{attempt_description}Searching with query: '{search_title}'{f' (year: {search_year})' if search_year else ''}[/cyan]") - - async with httpx.AsyncClient() as client: - response = await client.get(url, params=params, headers=headers, timeout=30.0) - - if response.status_code == 401: - console.print("[yellow]Unauthorized access. Token may be expired. Refreshing TVDb token...[/yellow]") - new_token = await get_tvdb_token(apikey, base_dir) - if new_token: - headers["Authorization"] = f"Bearer {new_token}" - response = await client.get(url, params=params, headers=headers, timeout=30.0) - response.raise_for_status() - else: - console.print("[red]Failed to refresh TVDb token[/red]") - return None - else: - response.raise_for_status() - - data = response.json() - - if data.get("message") == "Unauthorized": - console.print("[yellow]Token invalid or expired. Refreshing TVDb token...[/yellow]") - new_token = await get_tvdb_token(apikey, base_dir) - if new_token: - headers["Authorization"] = f"Bearer {new_token}" - response = await client.get(url, params=params, headers=headers, timeout=30.0) - response.raise_for_status() - data = response.json() + if debug: + result_types = [list(result.keys())[0] for result in results if result] + console.print(f"[yellow]TMDB search returned results but no {'series or movie' if tv_movie else 'series'} found (got: {result_types})[/yellow]") else: - console.print("[red]Failed to refresh TVDb token[/red]") - return None - - return data + if debug: + console.print("[yellow]No TVDB series found for TMDB ID[/yellow]") + except Exception as e: + if debug: + console.print(f"[red]Error getting TVDB by TMDB ID: {e}[/red]") - def names_match(series_name, search_title): - """Check if series name matches the search title (case-insensitive, basic cleanup)""" - if not series_name or not search_title: - return False + result_type_str = "series or movie" if tv_movie else "series" + console.print(f"[yellow]No TVDB {result_type_str} found for any available external ID[/yellow]") + return None - series_clean = series_name.lower().strip() - title_clean = search_title.lower().strip() - if series_clean == title_clean: - return True + async def get_imdb_id_from_tvdb_episode_id(self, episode_id, debug=False): + try: + episode_data = tvdb.get_episode_extended(episode_id) + if debug: + console.print(f"[yellow]Episode data retrieved for episode ID {episode_id}[/yellow]") - series_cleaned = re.sub(r'[^\w\s]', '', series_clean) - title_cleaned = re.sub(r'[^\w\s]', '', title_clean) + remote_ids = episode_data.get('remoteIds', []) + imdb_id = None - return series_cleaned == title_cleaned + if isinstance(remote_ids, list): + for remote_id in remote_ids: + if remote_id.get('type') == 2 or remote_id.get('sourceName') == 'IMDB': + imdb_id = remote_id.get('id') + break - try: - # First attempt: Search with title and year (if year provided) - data = await search_tvdb(title, year, "Initial attempt: ") + if imdb_id and debug: + console.print(f"[blue]TVDB episode ID: {episode_id} maps to IMDB ID: {imdb_id}[/blue]") + elif debug: + console.print(f"[yellow]No IMDB ID found for TVDB episode ID: {episode_id}[/yellow]") - if data and data.get("status") == "success" and data.get("data"): - all_results = data["data"] - series_list = [item for item in all_results if item.get("type") == "series"] + return imdb_id + except Exception as e: + console.print(f"[red]Error getting IMDB ID from TVDB episode ID: {e}[/red]") + return None - if debug: - console.print(f"[green]Found {len(all_results)} total results, {len(series_list)} series matches[/green]") - if series_list: - for i, series in enumerate(series_list[:3]): - name = series.get("name", "Unknown") - year_found = series.get("year", "Unknown") - tvdb_id = series.get("tvdb_id", "Unknown") - console.print(f"[cyan] {i+1}. {name} ({year_found}) - ID: {tvdb_id}[/cyan]") - - # Check if we found series and if the first result matches our title - if series_list: - first_series = series_list[0] - series_name = first_series.get("name", "") - - if names_match(series_name, title): - tvdb_id = first_series.get("tvdb_id") - series_year = first_series.get("year", "Unknown") + async def get_specific_episode_data(self, data, season, episode, debug=False): + if debug: + console.print("[yellow]Getting specific episode data from TVDB data[/yellow]") - if debug: - console.print(f"[green]Title match found: {series_name} ({series_year}) - ID: {tvdb_id}[/green]") - return tvdb_id + # Handle both dict (full series data) and list (episodes only) formats + if isinstance(data, dict): + episodes = data.get('episodes', []) + elif isinstance(data, list): + episodes = data + else: + console.print("[red]No episode data available or invalid format[/red]") + return None, None, None, None, None, None, None - elif year: - if debug: - console.print(f"[yellow]Series name '{series_name}' doesn't match title '{title}'. Retrying without year...[/yellow]") + if not episodes: + console.print("[red]No episodes found in data[/red]") + return None, None, None, None, None, None, None - # Second attempt: Search without year - data2 = await search_tvdb(title, None, "Retry without year: ") + # Convert season and episode to int for comparison + try: + season_int = int(season) if season is not None else None + episode_int = int(episode) if episode is not None and episode != 0 else None + except (ValueError, TypeError) as e: + console.print(f"[red]Invalid season or episode format: season={season}, episode={episode}, error={e}[/red]") + return None, None, None, None, None, None, None - if data2 and data2.get("status") == "success" and data2.get("data"): - all_results2 = data2["data"] - series_list2 = [item for item in all_results2 if item.get("type") == "series"] + if season_int is None: + console.print(f"[red]Season is None after conversion: season_int={season_int}[/red]") + return None, None, None, None, None, None, None - if debug: - console.print(f"[green]Retry found {len(all_results2)} total results, {len(series_list2)} series matches[/green]") - if series_list2: - for i, series in enumerate(series_list2[:3]): - name = series.get("name", "Unknown") - year_found = series.get("year", "Unknown") - tvdb_id = series.get("tvdb_id", "Unknown") - console.print(f"[cyan] {i+1}. {name} ({year_found}) - ID: {tvdb_id}[/cyan]") - - # Look for a better match in the new results - for series in series_list2: - series_name2 = series.get("name", "") - if names_match(series_name2, title): - tvdb_id = series.get("tvdb_id") - series_year = series.get("year", "Unknown") + if debug: + console.print(f"[blue]Total episodes retrieved from TVDB: {len(episodes)}[/blue]") + console.print(f"[blue]Looking for Season: {season_int}, Episode: {episode_int}[/blue]") - if debug: - console.print(f"[green]Better match found without year: {series_name2} ({series_year}) - ID: {tvdb_id}[/green]") - return tvdb_id - else: - if debug: - console.print(f"[yellow]No results found in retry without year for '{title}'[/yellow]") - return 0 - else: + # If episode_int is None or 0, return first episode of the season + if episode_int is None or episode_int == 0: + for ep in episodes: + if ep.get('seasonNumber') == season_int: if debug: - console.print(f"[yellow]Series name '{series_name}' doesn't match title '{title}' and no year provided. No further attempts will be made.[/yellow]") - return 0 - - else: + console.print(f"[green]Found first episode of season {season_int}: S{season_int:02d}E{ep.get('number'):02d} - {ep.get('name')}[/green]") + return ( + ep.get('seasonName'), + ep.get('name'), + ep.get('overview'), + ep.get('seasonNumber'), + ep.get('number'), + ep.get('year'), + ep.get('id') + ) + + # Try to find exact season/episode match + for ep in episodes: + if ep.get('seasonNumber') == season_int and ep.get('number') == episode_int: if debug: - console.print("[yellow]No series found in search results[/yellow]") - return 0 - else: - if debug: - console.print(f"[yellow]No TVDb results found for '{title}' ({year or 'no year'})[/yellow]") - if data and data.get("message"): - console.print(f"[yellow]API message: {data['message']}[/yellow]") - return 0 - - except httpx.HTTPStatusError as e: - if e.response.status_code == 401: - console.print("[red]Invalid API key or unauthorized access to TVDb[/red]") - elif e.response.status_code == 404: - console.print(f"[yellow]No results found for '{title}' ({year or 'no year'})[/yellow]") - elif e.response.status_code == 400: - console.print("[red]Bad request - check search parameters[/red]") - if debug: - console.print(f"[red]Response: {e.response.text}[/red]") - else: - console.print(f"[red]HTTP error occurred: {e.response.status_code} - {e.response.text}[/red]") - return 0 - except httpx.RequestError as e: - console.print(f"[red]Request error occurred: {e}[/red]") - return 0 - except Exception as e: - console.print(f"[red]Error searching TVDb series: {e}[/red]") - return 0 + console.print(f"[green]Found exact match: S{season_int:02d}E{episode_int:02d} - {ep.get('name')}[/green]") + return ( + ep.get('seasonName'), + ep.get('name'), + ep.get('overview'), + ep.get('seasonNumber'), + ep.get('number'), + ep.get('year'), + ep.get('id') + ) + + # Try to find an episode with this absolute number directly + console.print("[yellow]No exact match found, trying absolute number mapping...[/yellow]") + for ep in episodes: + if ep.get('absoluteNumber') == episode_int: + mapped_season = ep.get('seasonNumber') + mapped_episode = ep.get('number') + if debug: + console.print(f"[green]Mapped absolute #{episode_int} -> S{mapped_season:02d}E{mapped_episode:02d} - {ep.get('name')}[/green]") + return ( + ep.get('seasonName'), + ep.get('name'), + ep.get('overview'), + ep.get('seasonNumber'), + ep.get('number'), + ep.get('year'), + ep.get('id') + ) + + console.print(f"[red]Could not find episode for S{season_int:02d}E{episode_int:02d} or absolute #{episode_int}[/red]") + return None, None, None, None, None, None, None diff --git a/src/tvmaze.py b/src/tvmaze.py index d06ce3b25..0203e3310 100644 --- a/src/tvmaze.py +++ b/src/tvmaze.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 from src.console import console import httpx import json @@ -10,7 +11,7 @@ async def search_tvmaze(filename, year, imdbID, tvdbID, manual_date=None, tvmaze - Otherwise, only returns `tvmaze_id`. """ if debug: - console.print(f"[cyan]Searching TVMaze for TVDB {tvdbID} or IMDB {imdbID} or {filename} ({year})[/cyan]") + console.print(f"[cyan]Searching TVMaze for TVDB {tvdbID} or IMDB {imdbID} or {filename} ({year}) and returning {return_full_tuple}.[/cyan]") # Convert TVDB ID to integer try: tvdbID = int(tvdbID) if tvdbID not in (None, '', '0') else 0 @@ -109,14 +110,10 @@ async def fetch_tvmaze_data(url, params): if debug: console.print(f"[cyan]Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmaze_id})[/cyan]") - if 'externals' in selected_show: - if 'thetvdb' in selected_show['externals'] and not tvdbID: - tvdbID = selected_show['externals']['thetvdb'] - if tvdbID: - tvdbID = int(tvdbID) - return_full_tuple = True - if debug: + if debug and return_full_tuple: console.print(f"[cyan]Returning TVmaze ID: {tvmaze_id} (type: {type(tvmaze_id).__name__}), IMDb ID: {imdbID} (type: {type(imdbID).__name__}), TVDB ID: {tvdbID} (type: {type(tvdbID).__name__})[/cyan]") + elif debug: + console.print(f"[cyan]Returning TVmaze ID: {tvmaze_id} (type: {type(tvmaze_id).__name__})[/cyan]") if tvmaze_id is None: tvmaze_id = 0 if imdbID is None: @@ -143,7 +140,7 @@ async def _make_tvmaze_request(url, params): return {} -async def get_tvmaze_episode_data(tvmaze_id, season, episode): +async def get_tvmaze_episode_data(tvmaze_id, season, episode, meta=None): url = f"https://api.tvmaze.com/shows/%7Btvmaze_id%7D/episodebynumber" params = { "season": season, @@ -185,7 +182,9 @@ async def get_tvmaze_episode_data(tvmaze_id, season, episode): "series_name": show_data.get("name", data.get("_links", {}).get("show", {}).get("name", "")), "series_overview": show_data.get("summary", "").replace("

", "").replace("

", "").strip(), "image": data.get("image", {}).get("original", None) if data.get("image") else None, + "image_medium": data.get("image", {}).get("medium", None) if data.get("image") else None, "series_image": show_data.get("image", {}).get("original", None) if show_data.get("image") else None, + "series_image_medium": show_data.get("image", {}).get("medium", None) if show_data.get("image") else None, } return result @@ -194,11 +193,115 @@ async def get_tvmaze_episode_data(tvmaze_id, season, episode): return None except httpx.HTTPStatusError as e: - console.print(f"[red]HTTP error occurred: {e.response.status_code} - {e.response.text}[/red]") + if e.response.status_code == 404 and meta is not None: + console.print("[yellow]Episode not found using season/episode, trying date-based lookup...[/yellow]") + + # Try to get airdate from meta data + airdate = None + + # First priority: manual_date + if meta and meta.get('manual_date'): + airdate = meta['manual_date'] + if meta.get('debug'): + console.print(f"[cyan]Using manual_date: {airdate}[/cyan]") + + # Second priority: find airdate from tvdb_episode_data using tvdb_episode_id + elif meta and meta.get('tvdb_episode_id') and meta.get('tvdb_episode_data'): + tvdb_episode_id = meta['tvdb_episode_id'] + tvdb_data = meta['tvdb_episode_data'] + + # Handle both dict and list formats + episodes = tvdb_data.get('episodes', []) if isinstance(tvdb_data, dict) else tvdb_data + + for ep in episodes: + if ep.get('id') == tvdb_episode_id: + airdate = ep.get('aired') + if airdate: + if meta.get('debug'): + console.print(f"[cyan]Found airdate from TVDB episode data: {airdate}[/cyan]") + break + + if not airdate: + if meta.get('debug'): + console.print(f"[yellow]Could not find airdate for TVDB episode ID {tvdb_episode_id}[/yellow]") + + # Try date-based lookup if we have an airdate + if airdate: + if meta.get('debug'): + console.print(f"[cyan]Attempting TVMaze lookup by date: {airdate}[/cyan]") + return await get_tvmaze_episode_data_by_date(tvmaze_id, airdate) + else: + if meta.get('debug'): + console.print("[yellow]No airdate available for fallback lookup[/yellow]") + return None + else: + return None + except httpx.RequestError as e: + console.print(f"[red]TVMaze Request error occurred: {e}[/red]") + return None + except Exception as e: + console.print(f"[red]TVMaze Error fetching TVMaze episode data: {e}[/red]") + return None + + +async def get_tvmaze_episode_data_by_date(tvmaze_id, airdate): + url = f"https://api.tvmaze.com/shows/%7Btvmaze_id%7D/episodesbydate" + params = {"date": airdate} + + try: + async with httpx.AsyncClient(follow_redirects=True) as client: + response = await client.get(url, params=params, timeout=10.0) + response.raise_for_status() + data = response.json() + + if data and len(data) > 0: + # Take the first episode from the date (in case multiple episodes aired on same date) + episode_data = data[0] + + # Get show data for additional information + show_data = {} + if "show" in episode_data.get("_links", {}) and "href" in episode_data["_links"]["show"]: + show_url = episode_data["_links"]["show"]["href"] + show_name = episode_data["_links"]["show"].get("name", "") + + show_response = await client.get(show_url, timeout=10.0) + if show_response.status_code == 200: + show_data = show_response.json() + else: + show_data = {"name": show_name} + + # Clean HTML tags from summary + summary = episode_data.get("summary", "") + if summary: + summary = summary.replace("

", "").replace("

", "").strip() + + # Format the response in a consistent structure + result = { + "episode_name": episode_data.get("name", ""), + "overview": summary, + "season_number": episode_data.get("season", 0), + "episode_number": episode_data.get("number", 0), + "air_date": episode_data.get("airdate", ""), + "runtime": episode_data.get("runtime", 0), + "series_name": show_data.get("name", episode_data.get("_links", {}).get("show", {}).get("name", "")), + "series_overview": show_data.get("summary", "").replace("

", "").replace("

", "").strip(), + "image": episode_data.get("image", {}).get("original", None) if episode_data.get("image") else None, + "image_medium": episode_data.get("image", {}).get("medium", None) if episode_data.get("image") else None, + "series_image": show_data.get("image", {}).get("original", None) if show_data.get("image") else None, + "series_image_medium": show_data.get("image", {}).get("medium", None) if show_data.get("image") else None, + } + + return result + else: + console.print(f"[yellow]No episode data found for date {airdate}[/yellow]") + return None + + except httpx.HTTPStatusError as e: + console.print(f"[red]TVMaze HTTP error occurred in episodesbydate: {e.response.status_code} - {e.response.text}[/red]") return None except httpx.RequestError as e: - console.print(f"[red]Request error occurred: {e}[/red]") + console.print(f"[red]TVMaze Request error occurred in episodesbydate: {e}[/red]") return None except Exception as e: - console.print(f"[red]Error fetching TVMaze episode data: {e}[/red]") + console.print(f"[red]TVMaze Error fetching TVMaze episode data by date: {e}[/red]") return None diff --git a/src/uphelper.py b/src/uphelper.py index afc5d915a..cbaea5b38 100644 --- a/src/uphelper.py +++ b/src/uphelper.py @@ -1,44 +1,168 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import cli_ui -from rich.console import Console -from data.config import config +import os +import json +import sys +from difflib import SequenceMatcher -console = Console() +from cogs.redaction import redact_private_info +from data.config import config +from src.cleanup import cleanup, reset_terminal +from src.console import console +from src.trackersetup import tracker_class_map class UploadHelper: async def dupe_check(self, dupes, meta, tracker_name): + # set trackers here so that they are not double checked later with cross seeding + meta.setdefault('dupe_checked_trackers', []).append(tracker_name) + if not dupes: if meta['debug']: console.print(f"[green]No dupes found at[/green] [yellow]{tracker_name}[/yellow]") - meta['upload'] = True - return meta, False + return False else: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - dupe_text = "\n".join([d['name'] if isinstance(d, dict) else d for d in dupes]) - console.print(f"[bold blue]Check if these are actually dupes from {tracker_name}:[/bold blue]") - console.print() - console.print(f"[bold cyan]{dupe_text}[/bold cyan]") - if meta.get('dupe', False) is False: - upload = cli_ui.ask_yes_no(f"Upload to {tracker_name} anyway?", default=False) - meta['we_asked'] = True + tracker_class = tracker_class_map[tracker_name](config=config) + try: + tracker_rename = await tracker_class.get_name(meta) + except Exception: + try: + tracker_rename = await tracker_class.edit_name(meta) + except Exception: + tracker_rename = None + display_name = None + if tracker_rename is not None: + if isinstance(tracker_rename, dict) and 'name' in tracker_rename: + display_name = tracker_rename['name'] + elif isinstance(tracker_rename, str): + display_name = tracker_rename + + if meta.get('trumpable', False): + trumpable_dupes = [d for d in dupes if isinstance(d, dict) and d.get('trumpable')] + if trumpable_dupes: + trumpable_text = "\n".join([ + f"{d['name']} - {d['link']}" if 'link' in d else d['name'] + for d in trumpable_dupes + ]) + console.print("[bold red]Trumpable found![/bold red]") + console.print(f"[bold cyan]{trumpable_text}[/bold cyan]") + + meta['aither_trumpable'] = [ + {'name': d.get('name'), 'link': d.get('link')} + for d in trumpable_dupes + ] + + # Remove trumpable dupes from the main list + dupes = [d for d in dupes if not (isinstance(d, dict) and d.get('trumpable'))] + if (not meta['unattended'] or (meta['unattended'] and meta.get('unattended_confirm', False))) and not meta.get('ask_dupe', False): + dupe_text = "\n".join([ + f"{d['name']} - {d['link']}" if isinstance(d, dict) and 'link' in d and d['link'] is not None else (d['name'] if isinstance(d, dict) else d) + for d in dupes + ]) + if not dupe_text and meta.get('trumpable', False): + console.print("[yellow]Please check the trumpable entries above to see if you want to upload, and report the trumpable torrent if you upload.[/yellow]") + if meta.get('dupe', False) is False: + try: + upload = cli_ui.ask_yes_no(f"Upload to {tracker_name} anyway?", default=False) + meta['we_asked'] = True + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + else: + upload = True + meta['we_asked'] = False else: - upload = True - meta['we_asked'] = False + if meta.get('filename_match', False) and meta.get('file_count_match', False): + console.print(f'[bold red]Exact match found! - {meta["filename_match"]}[/bold red]') + try: + upload = cli_ui.ask_yes_no(f"Upload to {tracker_name} anyway?", default=False) + meta['we_asked'] = True + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + else: + console.print(f"[bold blue]Check if these are actually dupes from {tracker_name}:[/bold blue]") + console.print() + console.print(f"[bold cyan]{dupe_text}[/bold cyan]") + if meta.get('dupe', False) is False: + try: + upload = cli_ui.ask_yes_no(f"Upload to {tracker_name} anyway?", default=False) + meta['we_asked'] = True + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + else: + upload = True else: if meta.get('dupe', False) is False: upload = False else: upload = True + display_name = display_name if display_name is not None else meta.get('name', '') + + if tracker_name in ["BHD"]: + if meta['debug']: + console.print("[yellow]BHD cross seeding check[/yellow]") + tracker_download_link = meta.get(f'{tracker_name}_matched_download') + # Ensure display_name is a string before using 'in' operator + if display_name: + edition = meta.get('edition', '') + region = meta.get('region', '') + if edition and edition in display_name: + display_name = display_name.replace(f"{edition} ", "") + if region and region in display_name: + display_name = display_name.replace(f"{region} ", "") + for d in dupes: + if isinstance(d, dict): + similarity = SequenceMatcher(None, d.get('name', '').lower(), display_name.lower().strip()).ratio() + if similarity > 0.9 and meta.get('size_match', False) and tracker_download_link: + meta[f'{tracker_name}_cross_seed'] = tracker_download_link + if meta['debug']: + console.print(f'[bold red]Cross-seed link saved for {tracker_name}: {redact_private_info(tracker_download_link)}.[/bold red]') + break + + elif meta.get('filename_match', False) and meta.get('file_count_match', False): + if meta['debug']: + console.print(f"[yellow]{tracker_name} filename and file count cross seeding check[/yellow]") + tracker_download_link = meta.get(f'{tracker_name}_matched_download') + for d in dupes: + if isinstance(d, dict) and tracker_download_link: + meta[f'{tracker_name}_cross_seed'] = tracker_download_link + if meta['debug']: + console.print(f'[bold red]Cross-seed link saved for {tracker_name}: {redact_private_info(tracker_download_link)}.[/bold red]') + break + + elif meta.get('size_match', False): + if meta['debug']: + console.print(f"[yellow]{tracker_name} size cross seeding check[/yellow]") + tracker_download_link = meta.get(f'{tracker_name}_matched_download') + for d in dupes: + if isinstance(d, dict): + similarity = SequenceMatcher(None, d.get('name', '').lower(), display_name.lower().strip()).ratio() + if meta['debug']: + console.print(f"[debug] Comparing sizes with similarity {similarity:.4f}") + if similarity > 0.9 and tracker_download_link: + meta[f'{tracker_name}_cross_seed'] = tracker_download_link + if meta['debug']: + console.print(f'[bold red]Cross-seed link saved for {tracker_name}: {redact_private_info(tracker_download_link)}.[/bold red]') + break + if upload is False: - return meta, True + return True else: for each in dupes: each_name = each['name'] if isinstance(each, dict) else each if each_name == meta['name']: meta['name'] = f"{meta['name']} DUPE?" - return meta, False + return False async def get_confirmation(self, meta): if meta['debug'] is True: @@ -48,61 +172,182 @@ async def get_confirmation(self, meta): console.print("[bold yellow]Database Info[/bold yellow]") console.print(f"[bold]Title:[/bold] {meta['title']} ({meta['year']})") console.print() - console.print(f"[bold]Overview:[/bold] {meta['overview'][:100]}....") - console.print() - if meta.get('category') == 'TV' and not meta.get('tv_pack') and meta.get('auto_episode_title'): - console.print(f"[bold]Episode Title:[/bold] {meta['auto_episode_title']}") - console.print() - if meta.get('category') == 'TV' and not meta.get('tv_pack') and meta.get('overview_meta'): - console.print(f"[bold]Episode overview:[/bold] {meta['overview_meta']}") + if not meta.get('emby', False): + console.print(f"[bold]Overview:[/bold] {meta['overview'][:100]}....") console.print() - console.print(f"[bold]Genre:[/bold] {meta['genres']}") - console.print() - if str(meta.get('demographic', '')) != '': - console.print(f"[bold]Demographic:[/bold] {meta['demographic']}") + if meta.get('category') == 'TV' and not meta.get('tv_pack') and meta.get('auto_episode_title'): + console.print(f"[bold]Episode Title:[/bold] {meta['auto_episode_title']}") + console.print() + if meta.get('category') == 'TV' and not meta.get('tv_pack') and meta.get('overview_meta'): + console.print(f"[bold]Episode overview:[/bold] {meta['overview_meta']}") + console.print() + console.print(f"[bold]Genre:[/bold] {meta['genres']}") console.print() + if str(meta.get('demographic', '')) != '': + console.print(f"[bold]Demographic:[/bold] {meta['demographic']}") + console.print() console.print(f"[bold]Category:[/bold] {meta['category']}") console.print() - if int(meta.get('tmdb_id') or 0) != 0: - console.print(f"[bold]TMDB:[/bold] https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb_id']}") - if int(meta.get('imdb_id') or 0) != 0: - console.print(f"[bold]IMDB:[/bold] https://www.imdb.com/title/tt{meta['imdb']}") - if int(meta.get('tvdb_id') or 0) != 0: - console.print(f"[bold]TVDB:[/bold] https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") - if int(meta.get('tvmaze_id') or 0) != 0: - console.print(f"[bold]TVMaze:[/bold] https://www.tvmaze.com/shows/{meta['tvmaze_id']}") - if int(meta.get('mal_id') or 0) != 0: - console.print(f"[bold]MAL:[/bold] https://myanimelist.net/anime/{meta['mal_id']}") - console.print() - if int(meta.get('freeleech', 0)) != 0: - console.print(f"[bold]Freeleech:[/bold] {meta['freeleech']}") - tag = "" if meta['tag'] == "" else f" / {meta['tag'][1:]}" - res = meta['source'] if meta['is_disc'] == "DVD" else meta['resolution'] - console.print(f"{res} / {meta['type']}{tag}") - if meta.get('personalrelease', False) is True: - console.print("[bold green]Personal Release![/bold green]") + if meta.get('emby_debug', False): + if int(meta.get('original_imdb', 0)) != 0: + imdb = str(meta.get('original_imdb', 0)).zfill(7) + console.print(f"[bold]IMDB:[/bold] https://www.imdb.com/title/tt{imdb}") + if int(meta.get('original_tmdb', 0)) != 0: + console.print(f"[bold]TMDB:[/bold] https://www.themoviedb.org/{meta['category'].lower()}/{meta['original_tmdb']}") + if int(meta.get('original_tvdb', 0)) != 0: + console.print(f"[bold]TVDB:[/bold] https://www.thetvdb.com/?id={meta['original_tvdb']}&tab=series") + if int(meta.get('original_tvmaze', 0)) != 0: + console.print(f"[bold]TVMaze:[/bold] https://www.tvmaze.com/shows/{meta['original_tvmaze']}") + if int(meta.get('original_mal', 0)) != 0: + console.print(f"[bold]MAL:[/bold] https://myanimelist.net/anime/{meta['original_mal']}") + else: + if int(meta.get('tmdb_id') or 0) != 0: + console.print(f"[bold]TMDB:[/bold] https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb_id']}") + if int(meta.get('imdb_id') or 0) != 0: + console.print(f"[bold]IMDB:[/bold] https://www.imdb.com/title/tt{meta['imdb']}") + if int(meta.get('tvdb_id') or 0) != 0: + console.print(f"[bold]TVDB:[/bold] https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") + if int(meta.get('tvmaze_id') or 0) != 0: + console.print(f"[bold]TVMaze:[/bold] https://www.tvmaze.com/shows/{meta['tvmaze_id']}") + if int(meta.get('mal_id') or 0) != 0: + console.print(f"[bold]MAL:[/bold] https://myanimelist.net/anime/{meta['mal_id']}") console.print() - if meta.get('unattended', False) is False: - await self.get_missing(meta) - ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" - if ring_the_bell: - console.print(ring_the_bell) + if not meta.get('emby', False): + if int(meta.get('freeleech', 0)) != 0: + console.print(f"[bold]Freeleech:[/bold] {meta['freeleech']}") + + info_parts = [] + info_parts.append(meta['source'] if meta['is_disc'] == 'DVD' else meta['resolution']) + info_parts.append(meta['type']) + if meta.get('tag', ''): + info_parts.append(meta['tag'][1:]) + if meta.get('region', ''): + info_parts.append(meta['region']) + if meta.get('distributor', ''): + info_parts.append(meta['distributor']) + console.print(' / '.join(info_parts)) + + if meta.get('personalrelease', False) is True: + console.print("[bold green]Personal Release![/bold green]") + console.print() + + if meta.get('unattended', False) and not meta.get('unattended_confirm', False) and not meta.get('emby_debug', False): + if meta['debug'] is True: + console.print("[bold yellow]Unattended mode is enabled, skipping confirmation.[/bold yellow]") + return True + else: + if not meta.get('emby', False): + await self.get_missing(meta) + ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" + if ring_the_bell: + console.print(ring_the_bell) if meta.get('is disc', False) is True: meta['keep_folder'] = False if meta.get('keep_folder') and meta['isdir']: - console.print("[bold yellow]Uploading with --keep-folder[/bold yellow]") kf_confirm = console.input("[bold yellow]You specified --keep-folder. Uploading in folders might not be allowed.[/bold yellow] [green]Proceed? y/N: [/green]").strip().lower() if kf_confirm != 'y': console.print("[bold red]Aborting...[/bold red]") exit() - console.print(f"[bold]Name:[/bold] {meta['name']}") - confirm = console.input("[bold green]Is this correct?[/bold green] [yellow]y/N[/yellow]: ").strip().lower() == 'y' - else: - console.print(f"[bold]Name:[/bold] {meta['name']}") - confirm = True + if not meta.get('emby', False): + console.print(f"[bold]Name:[/bold] {meta['name']}") + confirm = console.input("[bold green]Is this correct?[/bold green] [yellow]y/N[/yellow]: ").strip().lower() == 'y' + elif not meta.get('emby_debug', False): + confirm = console.input("[bold green]Is this correct?[/bold green] [yellow]y/N[/yellow]: ").strip().lower() == 'y' + if meta.get('emby_debug', False): + if meta.get('original_imdb', 0) != meta.get('imdb_id', 0): + imdb = str(meta.get('imdb_id', 0)).zfill(7) + console.print(f"[bold red]IMDB ID changed from {meta['original_imdb']} to {meta['imdb_id']}[/bold red]") + console.print(f"[bold cyan]IMDB URL:[/bold cyan] [yellow]https://www.imdb.com/title/tt{imdb}[/yellow]") + if meta.get('original_tmdb', 0) != meta.get('tmdb_id', 0): + console.print(f"[bold red]TMDB ID changed from {meta['original_tmdb']} to {meta['tmdb_id']}[/bold red]") + console.print(f"[bold cyan]TMDB URL:[/bold cyan] [yellow]https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb_id']}[/yellow]") + if meta.get('original_mal', 0) != meta.get('mal_id', 0): + console.print(f"[bold red]MAL ID changed from {meta['original_mal']} to {meta['mal_id']}[/bold red]") + console.print(f"[bold cyan]MAL URL:[/bold cyan] [yellow]https://myanimelist.net/anime/{meta['mal_id']}[/yellow]") + if meta.get('original_tvmaze', 0) != meta.get('tvmaze_id', 0): + console.print(f"[bold red]TVMaze ID changed from {meta['original_tvmaze']} to {meta['tvmaze_id']}[/bold red]") + console.print(f"[bold cyan]TVMaze URL:[/bold cyan] [yellow]https://www.tvmaze.com/shows/{meta['tvmaze_id']}[/yellow]") + if meta.get('original_tvdb', 0) != meta.get('tvdb_id', 0): + console.print(f"[bold red]TVDB ID changed from {meta['original_tvdb']} to {meta['tvdb_id']}[/bold red]") + console.print(f"[bold cyan]TVDB URL:[/bold cyan] [yellow]https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series[/yellow]") + if meta.get('original_category', None) != meta.get('category', None): + console.print(f"[bold red]Category changed from {meta['original_category']} to {meta['category']}[/bold red]") + console.print(f"[bold cyan]Regex Title:[/bold cyan] [yellow]{meta.get('regex_title', 'N/A')}[/yellow], [bold cyan]Secondary Title:[/bold cyan] [yellow]{meta.get('regex_secondary_title', 'N/A')}[/yellow], [bold cyan]Year:[/bold cyan] [yellow]{meta.get('regex_year', 'N/A')}, [bold cyan]AKA:[/bold cyan] [yellow]{meta.get('aka', '')}[/yellow]") + console.print() + if meta.get('original_imdb', 0) == meta.get('imdb_id', 0) and meta.get('original_tmdb', 0) == meta.get('tmdb_id', 0) and meta.get('original_mal', 0) == meta.get('mal_id', 0) and meta.get('original_tvmaze', 0) == meta.get('tvmaze_id', 0) and meta.get('original_tvdb', 0) == meta.get('tvdb_id', 0) and meta.get('original_category', None) == meta.get('category', None): + console.print("[bold yellow]Database ID's are correct![/bold yellow]") + return True + else: + nfo_dir = os.path.join(f"{meta['base_dir']}/data") + os.makedirs(nfo_dir, exist_ok=True) + json_file_path = os.path.join(nfo_dir, "db_check.json") + + def imdb_url(https://github.com/imdb_id): + return f"https://www.imdb.com/title/tt%7Bstr(imdb_id).zfill(7)%7D" if imdb_id and str(imdb_id).isdigit() else None + + def tmdb_url(https://github.com/tmdb_id,%20category): + return f"https://www.themoviedb.org/%7Bstr(category).lower()%7D/%7Btmdb_id%7D" if tmdb_id and category else None + + def tvdb_url(https://github.com/tvdb_id): + return f"https://www.thetvdb.com/?id={tvdb_id}&tab=series" if tvdb_id else None + + def tvmaze_url(https://github.com/tvmaze_id): + return f"https://www.tvmaze.com/shows/%7Btvmaze_id%7D" if tvmaze_id else None + + def mal_url(https://github.com/mal_id): + return f"https://myanimelist.net/anime/%7Bmal_id%7D" if mal_id else None + + db_check_entry = { + "path": meta.get('path'), + "original": { + "imdb_id": meta.get('original_imdb', 'N/A'), + "imdb_url": imdb_url(https://github.com/meta.get('original_imdb')), + "tmdb_id": meta.get('original_tmdb', 'N/A'), + "tmdb_url": tmdb_url(https://github.com/meta.get('original_tmdb'), meta.get('original_category')), + "tvdb_id": meta.get('original_tvdb', 'N/A'), + "tvdb_url": tvdb_url(https://github.com/meta.get('original_tvdb')), + "tvmaze_id": meta.get('original_tvmaze', 'N/A'), + "tvmaze_url": tvmaze_url(https://github.com/meta.get('original_tvmaze')), + "mal_id": meta.get('original_mal', 'N/A'), + "mal_url": mal_url(https://github.com/meta.get('original_mal')), + "category": meta.get('original_category', 'N/A') + }, + "changed": { + "imdb_id": meta.get('imdb_id', 'N/A'), + "imdb_url": imdb_url(https://github.com/meta.get('imdb_id')), + "tmdb_id": meta.get('tmdb_id', 'N/A'), + "tmdb_url": tmdb_url(https://github.com/meta.get('tmdb_id'), meta.get('category')), + "tvdb_id": meta.get('tvdb_id', 'N/A'), + "tvdb_url": tvdb_url(https://github.com/meta.get('tvdb_id')), + "tvmaze_id": meta.get('tvmaze_id', 'N/A'), + "tvmaze_url": tvmaze_url(https://github.com/meta.get('tvmaze_id')), + "mal_id": meta.get('mal_id', 'N/A'), + "mal_url": mal_url(https://github.com/meta.get('mal_id')), + "category": meta.get('category', 'N/A') + }, + "tracker": meta.get('matched_tracker', 'N/A'), + } + + # Append to JSON file (as a list of entries) + if os.path.exists(json_file_path): + with open(json_file_path, 'r', encoding='utf-8') as f: + try: + db_data = json.load(f) + if not isinstance(db_data, list): + db_data = [] + except Exception: + db_data = [] + else: + db_data = [] + + db_data.append(db_check_entry) + + with open(json_file_path, 'w', encoding='utf-8') as f: + json.dump(db_data, f, indent=2, ensure_ascii=False) + return True return confirm diff --git a/src/uploadscreens.py b/src/uploadscreens.py index 4fb928ab7..38353bfff 100644 --- a/src/uploadscreens.py +++ b/src/uploadscreens.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 from src.console import console import os import pyimgbox @@ -11,6 +12,8 @@ import json from concurrent.futures import ThreadPoolExecutor import traceback +import httpx +import aiofiles try: from data.config import config @@ -21,7 +24,7 @@ exit(1) -def upload_image_task(args): +async def upload_image_task(args): image, img_host, config, meta = args try: timeout = 60 # Default timeout @@ -29,7 +32,7 @@ def upload_image_task(args): if img_host == "imgbox": try: - image_list = asyncio.run(imgbox_upload(os.getcwd(), [image], meta, return_dict={})) + image_list = await imgbox_upload(os.getcwd(), [image], meta, return_dict={}) if image_list and all( 'img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list ): @@ -48,65 +51,82 @@ def upload_image_task(args): } elif img_host == "ptpimg": - payload = { - 'format': 'json', - 'api_key': config['DEFAULT']['ptpimg_api'] - } - - with open(image, 'rb') as file: - files = [('file-upload[0]', file)] - headers = {'referer': 'https://ptpimg.me/index.php'} - - try: - response = requests.post( - "https://ptpimg.me/upload.php", headers=headers, data=payload, files=files, timeout=timeout - ) - response.raise_for_status() # Raise an exception for HTTP errors - response_data = response.json() + try: + payload = { + 'format': 'json', + 'api_key': config['DEFAULT']['ptpimg_api'].strip() + } + except KeyError: + return {'status': 'failed', 'reason': 'Missing ptpimg API key in config'} - if not response_data or not isinstance(response_data, list) or 'code' not in response_data[0]: - return {'status': 'failed', 'reason': "Invalid JSON response from ptpimg"} + try: + async with httpx.AsyncClient() as client: + async with aiofiles.open(image, 'rb') as file: + files = {'file-upload[0]': (os.path.basename(image), await file.read())} + headers = {'referer': 'https://ptpimg.me/index.php'} - code = response_data[0]['code'] - ext = response_data[0]['ext'] - img_url = f"https://ptpimg.me/%7Bcode%7D.%7Bext%7D" - raw_url = img_url - web_url = img_url + try: + response = await client.post( + "https://ptpimg.me/upload.php", + headers=headers, + data=payload, + files=files, + timeout=timeout + ) + + response.raise_for_status() + response_data = response.json() - except requests.exceptions.Timeout: - return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: - return {'status': 'failed', 'reason': f"Request failed: {str(e)}"} - except json.JSONDecodeError: - return {'status': 'failed', 'reason': 'Invalid JSON response from ptpimg'} + if not response_data or not isinstance(response_data, list) or 'code' not in response_data[0]: + return {'status': 'failed', 'reason': "Invalid JSON response from ptpimg"} + + code = response_data[0]['code'] + ext = response_data[0]['ext'] + img_url = f"https://ptpimg.me/%7Bcode%7D.%7Bext%7D" + raw_url = img_url + web_url = img_url + + except httpx.TimeoutException: + console.print("[red][ptpimg] Request timed out.") + return {'status': 'failed', 'reason': 'Request timed out'} + except ValueError as e: + console.print(f"[red][ptpimg] ValueError: {str(e)}") + return {'status': 'failed', 'reason': f"Request failed: {str(e)}"} + except json.JSONDecodeError as e: + console.print(f"[red][ptpimg] JSONDecodeError: {str(e)}") + return {'status': 'failed', 'reason': 'Invalid JSON response from ptpimg'} + except Exception as e: + console.print(f"[red][ptpimg] Exception: {str(e)}") + return {'status': 'failed', 'reason': f"Error during ptpimg upload: {str(e)}"} elif img_host == "imgbb": url = "https://api.imgbb.com/1/upload" try: - with open(image, "rb") as img_file: - encoded_image = base64.b64encode(img_file.read()).decode('utf8') + async with aiofiles.open(image, "rb") as img_file: + encoded_image = base64.b64encode(await img_file.read()).decode('utf8') data = { 'key': config['DEFAULT']['imgbb_api'], 'image': encoded_image, } - response = requests.post(url, data=data, timeout=timeout) - response_data = response.json() - if response.status_code != 200 or not response_data.get('success'): - console.print("[yellow]imgbb failed, trying next image host") - return {'status': 'failed', 'reason': 'imgbb upload failed'} + async with httpx.AsyncClient() as client: + response = await client.post(url, data=data, timeout=timeout) + response_data = response.json() + if response.status_code != 200 or not response_data.get('success'): + console.print("[yellow]imgbb failed, trying next image host") + return {'status': 'failed', 'reason': 'imgbb upload failed'} - img_url = response_data['data'].get('medium', {}).get('url') or response_data['data']['thumb']['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] + img_url = response_data['data'].get('medium', {}).get('url') or response_data['data']['thumb']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") - return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} - except requests.exceptions.Timeout: + except httpx.TimeoutException: console.print("[red]Request timed out. The server took too long to respond.") return {'status': 'failed', 'reason': 'Request timed out'} @@ -114,7 +134,7 @@ def upload_image_task(args): console.print(f"[red]Invalid JSON response: {e}") return {'status': 'failed', 'reason': 'Invalid JSON response'} - except requests.exceptions.RequestException as e: + except httpx.RequestError as e: console.print(f"[red]Request failed with error: {e}") return {'status': 'failed', 'reason': str(e)} @@ -159,58 +179,81 @@ def upload_image_task(args): elif img_host == "ptscreens": url = "https://ptscreens.com/api/1/upload" try: - files = { - 'source': ('file-upload[0]', open(image, 'rb')), - } headers = { 'X-API-Key': config['DEFAULT']['ptscreens_api'] } - response = requests.post(url, headers=headers, files=files, timeout=timeout) - response_data = response.json() - if response_data.get('status_code') != 200: - console.print("[yellow]ptscreens failed, trying next image host") - return {'status': 'failed', 'reason': 'ptscreens upload failed'} - img_url = response_data['image']['medium']['url'] - raw_url = response_data['image']['url'] - web_url = response_data['image']['url_viewer'] - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + async with httpx.AsyncClient() as client: + async with aiofiles.open(image, 'rb') as file: + files = { + 'source': ('file-upload[0]', await file.read()) + } - except requests.exceptions.Timeout: + response = await client.post(url, headers=headers, files=files, timeout=timeout) + response_data = response.json() + + if response.status_code == 400: + console.print("[yellow]ptscreens upload failed: Duplicate upload (400)") + return {'status': 'failed', 'reason': 'ptscreens duplicate'} + + if response_data.get('status_code') != 200: + console.print("[yellow]ptscreens failed") + return {'status': 'failed', 'reason': 'ptscreens upload failed'} + + img_url = response_data['image']['medium']['url'] + raw_url = response_data['image']['url'] + web_url = response_data['image']['url_viewer'] + + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except httpx.TimeoutException: console.print("[red]Request timed out. The server took too long to respond.") return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: + except httpx.RequestError as e: console.print(f"[red]Request failed with error: {e}") return {'status': 'failed', 'reason': str(e)} + except ValueError as e: + console.print(f"[red]Invalid JSON response from ptscreens: {e}") + return {'status': 'failed', 'reason': 'Invalid JSON response'} elif img_host == "onlyimage": url = "https://onlyimage.org/api/1/upload" try: + async with aiofiles.open(image, "rb") as img_file: + encoded_image = base64.b64encode(await img_file.read()).decode('utf8') + data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + 'image': encoded_image } headers = { 'X-API-Key': config['DEFAULT']['onlyimage_api'], } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response_data = response.json() - if response.status_code != 200 or not response_data.get('success'): - console.print("[yellow]OnlyImage failed, trying next image host") - return {'status': 'failed', 'reason': 'OnlyImage upload failed'} - img_url = response_data['data']['image']['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + async with httpx.AsyncClient() as client: + response = await client.post(url, data=data, headers=headers, timeout=timeout) + response_data = response.json() - except requests.exceptions.Timeout: + if response.status_code != 200 or not response_data.get('success'): + console.print("[yellow]OnlyImage failed, trying next image host") + return {'status': 'failed', 'reason': 'OnlyImage upload failed'} + + img_url = response_data['data']['medium']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except httpx.TimeoutException: console.print("[red]Request timed out. The server took too long to respond.") return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: + except httpx.RequestError as e: console.print(f"[red]Request failed with error: {e}") return {'status': 'failed', 'reason': str(e)} + except ValueError as e: + console.print(f"[red]Invalid JSON response from OnlyImage: {e}") + return {'status': 'failed', 'reason': 'Invalid JSON response'} elif img_host == "pixhost": url = "https://api.pixhost.to/images" @@ -219,37 +262,41 @@ def upload_image_task(args): 'content_type': '0', 'max_th_size': 350 } - files = { - 'img': ('file-upload[0]', open(image, 'rb')) - } - response = requests.post(url, data=data, files=files, timeout=timeout) - if response.status_code != 200: - console.print(f"[yellow]pixhost failed with status code {response.status_code}, trying next image host") - return {'status': 'failed', 'reason': f'pixhost upload failed with status code {response.status_code}'} + async with httpx.AsyncClient() as client: + async with aiofiles.open(image, 'rb') as file: + files = { + 'img': ('file-upload[0]', await file.read()) + } - try: - response_data = response.json() - if 'th_url' not in response_data: - console.print("[yellow]pixhost failed: Invalid response format") - return {'status': 'failed', 'reason': 'Invalid response from pixhost'} + response = await client.post(url, data=data, files=files, timeout=timeout) - raw_url = response_data['th_url'].replace('https://t/', 'https://img/').replace('/thumbs/', '/images/') - img_url = response_data['th_url'] - web_url = response_data['show_url'] + if response.status_code != 200: + console.print(f"[yellow]pixhost failed with status code {response.status_code}, trying next image host") + return {'status': 'failed', 'reason': f'pixhost upload failed with status code {response.status_code}'} - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + try: + response_data = response.json() + if 'th_url' not in response_data: + console.print("[yellow]pixhost failed: Invalid response format") + return {'status': 'failed', 'reason': 'Invalid response from pixhost'} - except ValueError as e: - console.print(f"[red]Invalid JSON response from pixhost: {e}") - return {'status': 'failed', 'reason': 'Invalid JSON response'} + raw_url = response_data['th_url'].replace('https://t/', 'https://img/').replace('/thumbs/', '/images/') + img_url = response_data['th_url'] + web_url = response_data['show_url'] - except requests.exceptions.Timeout: + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except ValueError as e: + console.print(f"[red]Invalid JSON response from pixhost: {e}") + return {'status': 'failed', 'reason': 'Invalid JSON response'} + + except httpx.TimeoutException: console.print("[red]Request to pixhost timed out. The server took too long to respond.") return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: + except httpx.RequestError as e: console.print(f"[red]pixhost request failed with error: {e}") return {'status': 'failed', 'reason': str(e)} @@ -325,43 +372,106 @@ def upload_image_task(args): 'X-API-Key': pass_api_key } - with open(image, 'rb') as img_file: - files = {'source': (os.path.basename(image), img_file)} - response = requests.post(url, headers=headers, files=files, timeout=timeout) + async with httpx.AsyncClient() as client: + async with aiofiles.open(image, 'rb') as img_file: + files = {'source': (os.path.basename(image), await img_file.read())} + response = await client.post(url, headers=headers, files=files, timeout=timeout) - if 'application/json' in response.headers.get('Content-Type', ''): - response_data = response.json() - else: - console.print(f"[red]Passtheimage did not return JSON. Status: {response.status_code}, Response: {response.text[:200]}") - return {'status': 'failed', 'reason': f'Non-JSON response from passtheimage: {response.status_code}'} + if 'application/json' in response.headers.get('Content-Type', ''): + response_data = response.json() + else: + console.print(f"[red]Passtheimage did not return JSON. Status: {response.status_code}, Response: {response.text[:200]}") + return {'status': 'failed', 'reason': f'Non-JSON response from passtheimage: {response.status_code}'} - if response.status_code != 200 or response_data.get('status_code') != 200: - error_message = response_data.get('error', {}).get('message', 'Unknown error') - error_code = response_data.get('error', {}).get('code', 'Unknown code') - console.print(f"[yellow]Passtheimage failed (code: {error_code}): {error_message}") - return {'status': 'failed', 'reason': f'passtheimage upload failed: {error_message}'} + if response.status_code != 200 or response_data.get('status_code') != 200: + error_message = response_data.get('error', {}).get('message', 'Unknown error') + error_code = response_data.get('error', {}).get('code', 'Unknown code') + console.print(f"[yellow]Passtheimage failed (code: {error_code}): {error_message}") + return {'status': 'failed', 'reason': f'passtheimage upload failed: {error_message}'} - if 'image' in response_data: - img_url = response_data['image']['url'] - raw_url = response_data['image']['url'] - web_url = response_data['image']['url_viewer'] + if 'image' in response_data: + img_url = response_data['image']['url'] + raw_url = response_data['image']['url'] + web_url = response_data['image']['url_viewer'] - if not img_url or not raw_url or not web_url: - console.print(f"[yellow]Incomplete URL data from passtheimage response: {response_data}") - return {'status': 'failed', 'reason': 'Incomplete URL data from passtheimage'} + if not img_url or not raw_url or not web_url: + console.print(f"[yellow]Incomplete URL data from passtheimage response: {response_data}") + return {'status': 'failed', 'reason': 'Incomplete URL data from passtheimage'} - return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url, 'local_file_path': image} + return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url, 'local_file_path': image} - except requests.exceptions.Timeout: + except httpx.TimeoutException: console.print("[red]Request to passtheimage timed out after 60 seconds") return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: + except httpx.RequestError as e: console.print(f"[red]Request to passtheimage failed with error: {e}") return {'status': 'failed', 'reason': str(e)} + except ValueError as e: + console.print(f"[red]Invalid JSON response from passtheimage: {e}") + return {'status': 'failed', 'reason': 'Invalid JSON response'} except Exception as e: console.print(f"[red]Unexpected error with passtheimage: {str(e)}") return {'status': 'failed', 'reason': f'Unexpected error: {str(e)}'} + elif img_host == "seedpool_cdn": + url = "https://i.seedpool.org/upload" + api_key = config['DEFAULT'].get('seedpool_cdn_api') + + if not api_key: + console.print("[red]Seedpool CDN API key not found in config.") + return {'status': 'failed', 'reason': 'Missing Seedpool CDN API key'} + + try: + headers = {'Authorization': f'Bearer {api_key}'} + + async with httpx.AsyncClient() as client: + async with aiofiles.open(image, 'rb') as img_file: + files = {'files[]': (os.path.basename(image), await img_file.read())} + + response = await client.post(url, headers=headers, files=files, timeout=timeout) + + if response.status_code not in (200, 201): + console.print(f"[yellow]Seedpool CDN failed with status code {response.status_code}, trying next image host") + return {'status': 'failed', 'reason': f'Seedpool CDN upload failed with status code {response.status_code}'} + + response_data = response.json() + + if 'files' in response_data and len(response_data['files']) > 0: + file_data = response_data['files'][0] + + # Use medium variant as primary, fallback to base URL + img_url = file_data.get('variants', {}).get('medium', file_data['url']) + raw_url = file_data['url'] + web_url = file_data['url'] + + # Use thumbnail_url if available, otherwise use thumb variant + if 'thumbnail_url' in file_data: + img_url = file_data['thumbnail_url'] + elif 'thumb' in file_data.get('variants', {}): + img_url = file_data['variants']['thumb'] + + if meta['debug']: + console.print(f"[green]Seedpool CDN upload successful: {file_data['cdn_id']}") + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + else: + console.print("[yellow]Seedpool CDN returned empty files array") + return {'status': 'failed', 'reason': 'No files in Seedpool CDN response'} + + except httpx.TimeoutException: + console.print("[red]Request to Seedpool CDN timed out.") + return {'status': 'failed', 'reason': 'Request timed out'} + except httpx.RequestError as e: + console.print(f"[red]Seedpool CDN request failed: {e}") + return {'status': 'failed', 'reason': str(e)} + except ValueError as e: + console.print(f"[red]Invalid JSON response from Seedpool CDN: {e}") + return {'status': 'failed', 'reason': 'Invalid JSON response'} + except Exception as e: + console.print(f"[red]Unexpected error with Seedpool CDN: {e}") + return {'status': 'failed', 'reason': f'Unexpected error: {str(e)}'} + if img_url and raw_url and web_url: return { 'status': 'success', @@ -387,15 +497,41 @@ def upload_image_task(args): thread_pool = ThreadPoolExecutor(max_workers=10) -async def upload_screens(meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False, max_retries=3): +async def upload_screens(meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False, max_retries=3, allowed_hosts=None): if 'image_list' not in meta: meta['image_list'] = [] if meta['debug']: upload_start_time = time.time() os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") + initial_img_host = config['DEFAULT'][f'img_host_{img_host_num}'] img_host = meta['imghost'] + + # Check if current host is allowed, if not find an approved one + if allowed_hosts is not None and img_host not in allowed_hosts: + console.print(f"[yellow]Current image host '{img_host}' is not in allowed hosts: {allowed_hosts}[/yellow]") + + # Find the first approved host from config + approved_host = None + for i in range(1, 10): # Check img_host_1 through img_host_9 + host_key = f'img_host_{i}' + if host_key in config['DEFAULT']: + host = config['DEFAULT'][host_key] + if host in allowed_hosts: + approved_host = host + img_host_num = i + console.print(f"[green]Switching to approved image host: {approved_host}[/green]") + break + + if approved_host: + img_host = approved_host + else: + console.print(f"[red]No approved image hosts found in config. Available: {allowed_hosts}[/red]") + return meta['image_list'], len(meta['image_list']) + + if meta['debug']: + console.print(f"[blue]Using image host: {img_host} (configured: {initial_img_host})[/blue]") using_custom_img_list = isinstance(custom_img_list, list) and bool(custom_img_list) if 'image_sizes' not in meta: @@ -410,15 +546,18 @@ async def upload_screens(meta, screens, img_host_num, i, total_screens, custom_i image_patterns = ["*.png", ".[!.]*.png"] image_glob = [] for pattern in image_patterns: - image_glob.extend(glob.glob(pattern)) + glob_results = await asyncio.to_thread(glob.glob, pattern) + image_glob.extend(glob_results) unwanted_patterns = ["FILE*", "PLAYLIST*", "POSTER*"] unwanted_files = set() for pattern in unwanted_patterns: - unwanted_files.update(glob.glob(pattern)) + glob_results = await asyncio.to_thread(glob.glob, pattern) + unwanted_files.update(glob_results) if pattern.startswith("FILE") or pattern.startswith("PLAYLIST") or pattern.startswith("POSTER"): hidden_pattern = "." + pattern - unwanted_files.update(glob.glob(hidden_pattern)) + hidden_glob_results = await asyncio.to_thread(glob.glob, hidden_pattern) + unwanted_files.update(hidden_glob_results) image_glob = [file for file in image_glob if file not in unwanted_files] image_glob = list(set(image_glob)) @@ -438,6 +577,8 @@ def extract_numeric_suffix(filename): # Determine images needed images_needed = total_screens - existing_count if not retry_mode else total_screens + if meta['debug']: + console.print(f"[blue]Existing images: {existing_count}, Images needed: {images_needed}, Total screens: {total_screens}[/blue]") if existing_count >= total_screens and not retry_mode and img_host == initial_img_host and not using_custom_img_list: console.print(f"[yellow]Skipping upload: {existing_count} existing, {total_screens} required.") @@ -450,7 +591,7 @@ def extract_numeric_suffix(filename): # Concurrency Control default_pool_size = len(upload_tasks) - host_limits = {"onlyimage": 6, "ptscreens": 1, "lensdump": 1, "passtheimage": 6} + host_limits = {"onlyimage": 6, "ptscreens": 6, "lensdump": 1, "passtheimage": 6} pool_size = host_limits.get(img_host, default_pool_size) max_workers = min(len(upload_tasks), pool_size) semaphore = asyncio.Semaphore(max_workers) @@ -467,7 +608,7 @@ async def async_upload(task, max_retries=3): while retry_count <= max_retries: future = None try: - future = asyncio.create_task(asyncio.to_thread(upload_image_task, task_args)) + future = asyncio.create_task(upload_image_task(task_args)) running_tasks.add(future) try: @@ -478,6 +619,12 @@ async def async_upload(task, max_retries=3): return (index, result) else: reason = result.get('reason', 'Unknown error') + if "duplicate" in reason.lower(): + console.print(f"[yellow]Skipping host because duplicate image {index}: {reason}[/yellow]") + return None + elif "api key" in reason.lower(): + console.print(f"[red]API key error for {img_host}. Aborting further attempts.[/red]") + return None if retry_count < max_retries: retry_count += 1 console.print(f"[yellow]Retry {retry_count}/{max_retries} for image {index}: {reason}[/yellow]") @@ -520,14 +667,23 @@ async def async_upload(task, max_retries=3): try: max_retries = 3 - upload_results = await asyncio.gather(*[async_upload(task, max_retries) for task in upload_tasks]) - results = [res for res in upload_results if res is not None] - results.sort(key=lambda x: x[0]) + try: + upload_results = await asyncio.gather(*[async_upload(task, max_retries) for task in upload_tasks]) + results = [res for res in upload_results if res is not None] + results.sort(key=lambda x: x[0]) + except Exception as e: + console.print(f"[red]Error during uploads: {str(e)}[/red]") successfully_uploaded = [(index, result) for index, result in results if result['status'] == 'success'] + if meta['debug']: + console.print(f"[blue]Successfully uploaded {len(successfully_uploaded)} out of {len(upload_tasks)} attempted uploads.[/blue]") # Ensure we only switch hosts if necessary - if (len(successfully_uploaded) + len(meta['image_list'])) < meta.get('cutoff', 1) and not retry_mode and img_host == initial_img_host and not using_custom_img_list: + if meta['debug']: + console.print(f"[blue]Double checking current image host: {img_host}, Initial image host: {initial_img_host}[/blue]") + console.print(f"[blue]retry_mode: {retry_mode}, using_custom_img_list: {using_custom_img_list}[/blue]") + console.print(f"[blue]successfully_uploaded={len(successfully_uploaded)}, meta['image_list']={len(meta['image_list'])}, cutoff={meta.get('cutoff', 1)}[/blue]") + if (len(successfully_uploaded) + len(meta['image_list'])) < images_needed and not retry_mode and img_host == initial_img_host and not using_custom_img_list: img_host_num += 1 if f'img_host_{img_host_num}' in config['DEFAULT']: meta['imghost'] = config['DEFAULT'][f'img_host_{img_host_num}'] @@ -558,8 +714,11 @@ async def async_upload(task, max_retries=3): image_size = os.path.getsize(local_file_path) meta['image_sizes'][raw_url] = image_size - if not using_custom_img_list: - console.print(f"[green]Successfully obtained and uploaded {len(new_images)} images.") + if len(new_images) and len(new_images) > 0: + if not using_custom_img_list: + console.print(f"[green]Successfully obtained and uploaded {len(new_images)} images.") + else: + raise Exception("No images uploaded. Configure additional image hosts or use a different -ih") if meta['debug']: console.print(f"Screenshot uploads processed in {time.time() - upload_start_time:.4f} seconds") diff --git a/src/video.py b/src/video.py index a8b0af6a5..0acba27a4 100644 --- a/src/video.py +++ b/src/video.py @@ -1,8 +1,12 @@ -import os +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import cli_ui +import glob import json +import os import re -import glob -import cli_ui +import sys + +from src.cleanup import cleanup, reset_terminal from src.console import console from src.exportmi import mi_resolution @@ -104,30 +108,31 @@ async def get_video_encode(mi, type, bdinfo): if mi['media']['track'][1].get('Encoded_Library_Settings', None): has_encode_settings = True bit_depth = mi['media']['track'][1].get('BitDepth', '0') + encoded_library_name = mi['media']['track'][1].get('Encoded_Library_Name', None) except Exception: format = bdinfo['video'][0]['codec'] format_profile = bdinfo['video'][0]['profile'] - if type in ("ENCODE", "WEBRIP", "DVDRIP"): # ENCODE or WEBRIP or DVDRIP + if format in ('AV1', 'VP9', 'VC-1'): + codec = format + elif type in ("ENCODE", "WEBRIP", "DVDRIP"): # ENCODE or WEBRIP or DVDRIP if format == 'AVC': codec = 'x264' elif format == 'HEVC': codec = 'x265' - elif format == 'AV1': - codec = 'AV1' + elif format == 'MPEG-4 Visual': + if encoded_library_name: + if 'xvid' in encoded_library_name.lower(): + codec = 'XviD' + elif 'divx' in encoded_library_name.lower(): + codec = 'DivX' elif type in ('WEBDL', 'HDTV'): # WEB-DL if format == 'AVC': codec = 'H.264' elif format == 'HEVC': codec = 'H.265' - elif format == 'AV1': - codec = 'AV1' if type == 'HDTV' and has_encode_settings is True: codec = codec.replace('H.', 'x') - elif format == "VP9": - codec = "VP9" - elif format == "VC-1": - codec = "VC-1" if format_profile == 'High 10': profile = "Hi10P" else: @@ -139,7 +144,7 @@ async def get_video_encode(mi, type, bdinfo): return video_encode, video_codec, has_encode_settings, bit_depth -async def get_video(videoloc, mode): +async def get_video(videoloc, mode, sorted_filelist=False): filelist = [] videoloc = os.path.abspath(videoloc) if os.path.isdir(videoloc): @@ -155,10 +160,29 @@ async def get_video(videoloc, mode): for tf in filelist: console.print(f"[cyan]{tf}") console.print(f"[bold red]Possible sample file detected in filelist!: [yellow]{f}") - if cli_ui.ask_yes_no("Do you want to remove it?", default="yes"): - filelist.remove(f) + try: + if cli_ui.ask_yes_no("Do you want to remove it?", default="yes"): + filelist.remove(f) + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + if any(tag in file for tag in ['{tmdb-', '{imdb-', '{tvdb-']): + console.print(f"[bold red]This looks like some *arr renamed file which is not allowed: [yellow]{file}") + try: + if cli_ui.ask_yes_no("Do you want to upload with this file?", default="yes"): + pass + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) try: - video = sorted(filelist)[0] + if sorted_filelist: + video = sorted(filelist, key=os.path.getsize, reverse=True)[0] + else: + video = sorted(filelist)[0] except IndexError: console.print("[bold red]No Video files found") if mode == 'cli': @@ -166,7 +190,10 @@ async def get_video(videoloc, mode): else: video = videoloc filelist.append(videoloc) - filelist = sorted(filelist) + if sorted_filelist: + filelist = sorted(filelist, key=os.path.getsize, reverse=True) + else: + filelist = sorted(filelist) return video, filelist @@ -199,7 +226,7 @@ async def get_resolution(guess, folder_id, base_dir): scan = mi['media']['track'][1]['ScanType'] except Exception: scan = "Progressive" - if scan == "Progressive": + if not scan or scan == "Progressive": scan = "p" elif scan == "Interlaced": scan = 'i' @@ -277,3 +304,49 @@ async def is_sd(resolution): else: sd = 0 return sd + + +async def get_video_duration(meta): + if not meta.get('is_disc') == "BDMV" and meta.get('mediainfo', {}).get('media', {}).get('track'): + general_track = next((track for track in meta['mediainfo']['media']['track'] + if track.get('@type') == 'General'), None) + + if general_track and general_track.get('Duration'): + try: + media_duration_seconds = float(general_track['Duration']) + formatted_duration = int(media_duration_seconds // 60) + return formatted_duration + except ValueError: + if meta['debug']: + console.print(f"[red]Invalid duration value: {general_track['Duration']}[/red]") + return None + else: + if meta['debug']: + console.print("[red]No valid duration found in MediaInfo General track[/red]") + return None + else: + return None + + +async def get_container(meta): + if meta.get('is_disc', '') == 'BDMV': + return 'm2ts' + elif meta.get('is_disc', '') == 'HDDVD': + return 'evo' + elif meta.get('is_disc', '') == 'DVD': + return 'vob' + else: + file_list = meta.get('filelist', []) + + if not file_list: + console.print("[red]No files found to determine container[/red]") + return '' + + try: + largest_file_path = max(file_list, key=os.path.getsize) + except (OSError, ValueError) as e: + console.print(f"[red]Error getting container for file: {e}[/red]") + return '' + + extension = os.path.splitext(largest_file_path)[1] + return extension.lstrip('.').lower() if extension else '' diff --git a/src/vs.py b/src/vs.py index 4209464f6..94e4f3c8f 100644 --- a/src/vs.py +++ b/src/vs.py @@ -1,3 +1,4 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 import vapoursynth as vs from awsmfunc import ScreenGen, DynamicTonemap, zresize import random diff --git a/upload.py b/upload.py index cdffc4bd2..1a0d505d1 100644 --- a/upload.py +++ b/upload.py @@ -1,41 +1,51 @@ #!/usr/bin/env python3 -from src.args import Args -from src.clients import Clients -from src.uploadscreens import upload_screens -import json -from pathlib import Path +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +import aiofiles import asyncio -import os -import sys -import platform -import shutil import cli_ui -import traceback -import time +import discord import gc +import json +import os +import platform import re import requests -import discord +import shutil +import sys +import time +import traceback + from packaging import version -from src.trackersetup import tracker_class_map, api_trackers, other_api_trackers, http_trackers -from src.trackerhandle import process_trackers -from src.queuemanage import handle_queue -from src.console import console -from src.torrentcreate import create_torrent, create_random_torrents, create_base_from_existing_torrent -from src.uphelper import UploadHelper -from src.trackerstatus import process_all_trackers -from src.takescreens import disc_screenshots, dvd_screenshots, screenshots -from src.cleanup import cleanup, reset_terminal +from pathlib import Path + +from bin.get_mkbrr import ensure_mkbrr_binary +from cogs.redaction import clean_meta_for_export, redact_private_info +from discordbot import send_discord_notification, send_upload_status_notification from src.add_comparison import add_comparison +from src.args import Args +from src.cleanup import cleanup, reset_terminal +from src.clients import Clients +from src.console import console +from src.disc_menus import process_disc_menus +from src.dupe_checking import filter_dupes from src.get_name import get_name from src.get_desc import gen_desc -from discordbot import send_discord_notification, send_upload_status_notification -from cogs.redaction import clean_meta_for_export +from src.get_tracker_data import get_tracker_data from src.languages import process_desc_language -from bin.get_mkbrr import ensure_mkbrr_binary - +from src.nfo_link import nfo_link +from src.queuemanage import handle_queue, save_processed_path, process_site_upload_item +from src.takescreens import disc_screenshots, dvd_screenshots, screenshots +from src.torrentcreate import create_torrent, create_random_torrents, create_base_from_existing_torrent +from src.trackerhandle import process_trackers +from src.trackerstatus import process_all_trackers +from src.trackersetup import TRACKER_SETUP, tracker_class_map, api_trackers, other_api_trackers, http_trackers +from src.trackers.COMMON import COMMON +from src.trackers.PTP import PTP +from src.trackers.AR import AR +from src.uphelper import UploadHelper +from src.uploadscreens import upload_screens -cli_ui.setup(color='always', title="Audionut's Upload Assistant") +cli_ui.setup(color='always', title="Upload Assistant") running_subprocesses = set() base_dir = os.path.abspath(os.path.dirname(__file__)) @@ -65,7 +75,7 @@ async def merge_meta(meta, saved_meta, path): saved_meta = json.load(f) overwrite_list = [ 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', + 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'description_link', 'description_file', 'desc', 'draft', 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio', 'manual_type', 'tvmaze_manual' ] @@ -121,6 +131,44 @@ def update_oeimg_to_onlyimage(): console.print("[yellow]No 'oeimg' or 'oeimg_api' found to update in config.py[/yellow]") +async def validate_tracker_logins(meta, trackers=None): + if 'tracker_status' not in meta: + meta['tracker_status'] = {} + + # Filter trackers that are in both the list and tracker_class_map + valid_trackers = [tracker for tracker in trackers if tracker in tracker_class_map and tracker in http_trackers] + if "RTF" in trackers: + valid_trackers.append("RTF") + + if valid_trackers: + + async def validate_single_tracker(tracker_name): + """Validate credentials for a single tracker.""" + try: + if tracker_name not in meta['tracker_status']: + meta['tracker_status'][tracker_name] = {} + + tracker_class = tracker_class_map[tracker_name](config=config) + if meta['debug']: + console.print(f"[cyan]Validating {tracker_name} credentials...[/cyan]") + if tracker_name == "RTF": + login = await tracker_class.api_test(meta) + else: + login = await tracker_class.validate_credentials(meta) + + if not login: + meta['tracker_status'][tracker_name]['skipped'] = True + + return tracker_name, login + except Exception as e: + console.print(f"[red]Error validating {tracker_name}: {e}[/red]") + meta['tracker_status'][tracker_name]['skipped'] = True + return tracker_name, False + + # Run all tracker validations concurrently + await asyncio.gather(*[validate_single_tracker(tracker) for tracker in valid_trackers]) + + async def process_meta(meta, base_dir, bot=None): """Process the metadata for each queued path.""" if use_discord and bot: @@ -146,93 +194,265 @@ async def process_meta(meta, base_dir, bot=None): if str(ua).lower() == "true": meta['unattended'] = True console.print("[yellow]Running in Auto Mode") - meta['base_dir'] = base_dir prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) try: - meta = await prep.gather_prep(meta=meta, mode='cli') + results = await asyncio.gather( + prep.gather_prep(meta=meta, mode='cli'), + return_exceptions=True # Returns exceptions instead of raising them + ) + for result in results: + if isinstance(result, Exception): + return + else: + meta = result except Exception as e: console.print(f"Error in gather_prep: {e}") console.print(traceback.format_exc()) return - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await get_name(meta) + + meta['emby_debug'] = meta.get('emby_debug') if meta.get('emby_debug', False) else config['DEFAULT'].get('emby_debug', False) + if meta.get('emby_cat', None) == "movie" and meta.get('category', None) != "MOVIE": + console.print(f"[red]Wrong category detected! Expected 'MOVIE', but found: {meta.get('category', None)}[/red]") + meta['we_are_uploading'] = False + return + elif meta.get('emby_cat', None) == "tv" and meta.get('category', None) != "TV": + console.print("[red]TV content is not supported at this time[/red]") + meta['we_are_uploading'] = False + return + + # If unattended confirm and we had to get metadata ids from filename searching, skip the quick return so we can prompt about database information + if meta.get('emby', False) and not meta.get('no_ids', False) and not meta.get('unattended_confirm', False) and meta.get('unattended', False): + await nfo_link(meta) + meta['we_are_uploading'] = False + return + parser = Args(config) helper = UploadHelper() - if meta.get('trackers'): - trackers = meta['trackers'] - else: - default_trackers = config['TRACKERS'].get('default_trackers', '') - trackers = [tracker.strip() for tracker in default_trackers.split(',')] - if isinstance(trackers, str): - if "," in trackers: - trackers = [t.strip().upper() for t in trackers.split(',')] - else: - trackers = [trackers.strip().upper()] # Make it a list with one element - else: - trackers = [t.strip().upper() for t in trackers] - meta['trackers'] = trackers - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) - f.close() - confirm = await helper.get_confirmation(meta) + if not meta.get('emby', False): + if meta.get('trackers_remove', False): + remove_list = [t.strip().upper() for t in meta['trackers_remove'].split(',')] + for tracker in remove_list: + if tracker in meta['trackers']: + meta['trackers'].remove(tracker) + + meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await get_name(meta) + + if meta['debug']: + console.print(f"Trackers list before editing: {meta['trackers']}") + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + f.close() + + if meta.get('emby_debug', False): + meta['original_imdb'] = meta.get('imdb_id', None) + meta['original_tmdb'] = meta.get('tmdb_id', None) + meta['original_mal'] = meta.get('mal_id', None) + meta['original_tvmaze'] = meta.get('tvmaze_id', None) + meta['original_tvdb'] = meta.get('tvdb_id', None) + meta['original_category'] = meta.get('category', None) + if 'matched_tracker' not in meta: + await client.get_pathed_torrents(meta['path'], meta) + if meta['is_disc']: + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' + else: + search_term = os.path.basename(meta['filelist'][0]) if meta['filelist'] else None + search_file_folder = 'file' + await get_tracker_data(meta['video'], meta, search_term, search_file_folder, meta['category'], only_id=meta['only_id']) + + editargs_tracking = () + previous_trackers = meta.get('trackers', []) + try: + confirm = await helper.get_confirmation(meta) + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) while confirm is False: - editargs = cli_ui.ask_string("Input args that need correction e.g. (--tag NTb --category tv --tmdb 12345)") - editargs = (meta['path'],) + tuple(editargs.split()) - if meta.get('debug', False): - editargs += ("--debug",) - if meta.get('trackers', None) is not None: - editargs += ("--trackers", ",".join(meta["trackers"])) - meta, help, before_args = parser.parse(editargs, meta) + try: + editargs = cli_ui.ask_string("Input args that need correction e.g. (--tag NTb --category tv --tmdb 12345)") + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) + + if editargs == "continue": + break + + if not editargs or not editargs.strip(): + console.print("[yellow]No input provided. Please enter arguments, type `continue` to continue or press Ctrl+C to exit.[/yellow]") + continue + + try: + editargs = tuple(editargs.split()) + except AttributeError: + console.print("[red]Bad input detected[/red]") + confirm = False + continue + # Tracks multiple edits + editargs_tracking = editargs_tracking + editargs + # Carry original args over, let parse handle duplicates + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')) + editargs_tracking, meta) + if not meta.get('trackers'): + meta['trackers'] = previous_trackers if isinstance(meta.get('trackers'), str): if "," in meta['trackers']: - meta['trackers'] = [t.strip() for t in meta['trackers'].split(',')] + meta['trackers'] = [t.strip().upper() for t in meta['trackers'].split(',')] else: - meta['trackers'] = [meta['trackers']] + meta['trackers'] = [meta['trackers'].strip().upper()] + elif isinstance(meta.get('trackers'), list): + meta['trackers'] = [t.strip().upper() for t in meta['trackers'] if isinstance(t, str)] + if meta['debug']: + console.print(f"Trackers list during edit process: {meta['trackers']}") meta['edit'] = True meta = await prep.gather_prep(meta=meta, mode='cli') meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await get_name(meta) - confirm = await helper.get_confirmation(meta) + try: + confirm = await helper.get_confirmation(meta) + except EOFError: + console.print("\n[red]Exiting on user request (Ctrl+C)[/red]") + await cleanup() + reset_terminal() + sys.exit(1) - console.print(f"[green]Processing {meta['name']} for upload...[/green]") + if meta.get('emby', False): + if not meta['debug']: + await nfo_link(meta) + meta['we_are_uploading'] = False + return - audio_prompted = False - for tracker in ["HUNO", "OE", "AITHER", "ULCX", "DP", "CBR", "ASC", "BT", "LDU"]: - if tracker in trackers: - if not audio_prompted: - await process_desc_language(meta, desc=None, tracker=tracker) - audio_prompted = True - else: - if 'tracker_status' not in meta: - meta['tracker_status'] = {} - if tracker not in meta['tracker_status']: - meta['tracker_status'][tracker] = {} - if meta.get('unattended_audio_skip', False) or meta.get('unattended_subtitle_skip', False): - meta['tracker_status'][tracker]['skip_upload'] = True + if 'remove_trackers' in meta and meta['remove_trackers']: + removed = [] + for tracker in meta['remove_trackers']: + if tracker in meta['trackers']: + if meta['debug']: + console.print(f"[DEBUG] Would have removed {tracker} found in client") else: - meta['tracker_status'][tracker]['skip_upload'] = False + meta['trackers'].remove(tracker) + removed.append(tracker) + if removed: + console.print(f"[yellow]Removing trackers already in your client: {', '.join(removed)}[/yellow]") + if not meta['trackers']: + console.print("[red]No trackers remain after removal.[/red]") + successful_trackers = 0 + meta['skip_uploading'] = 10 - await asyncio.sleep(0.2) - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) - await asyncio.sleep(0.2) + else: + console.print(f"[green]Processing {meta['name']} for upload...[/green]") - successful_trackers = await process_all_trackers(meta) + # reset trackers after any removals + trackers = meta['trackers'] + + audio_prompted = False + for tracker in ["AITHER", "ASC", "BJS", "BT", "CBR", "DP", "FF", "GPW", "HUNO", "IHD", "LDU", "LT", "OE", "PTS", "SAM", "SHRI", "SPD", "TTR", "TVC", "ULCX"]: + if tracker in trackers: + if not audio_prompted: + await process_desc_language(meta, desc=None, tracker=tracker) + audio_prompted = True + else: + if 'tracker_status' not in meta: + meta['tracker_status'] = {} + if tracker not in meta['tracker_status']: + meta['tracker_status'][tracker] = {} + if meta.get('unattended_audio_skip', False) or meta.get('unattended_subtitle_skip', False): + meta['tracker_status'][tracker]['skip_upload'] = True + else: + meta['tracker_status'][tracker]['skip_upload'] = False + + await asyncio.sleep(0.2) + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + await asyncio.sleep(0.2) + + try: + await validate_tracker_logins(meta, trackers) + await asyncio.sleep(0.2) + except Exception as e: + console.print(f"[yellow]Warning: Tracker validation encountered an error: {e}[/yellow]") + + successful_trackers = await process_all_trackers(meta) + + if meta.get('trackers_pass') is not None: + meta['skip_uploading'] = meta.get('trackers_pass') + else: + meta['skip_uploading'] = int(config['DEFAULT'].get('tracker_pass_checks', 1)) - if meta.get('trackers_pass') is not None: - meta['skip_uploading'] = meta.get('trackers_pass') - else: - meta['skip_uploading'] = int(config['DEFAULT'].get('tracker_pass_checks', 1)) if successful_trackers < int(meta['skip_uploading']) and not meta['debug']: - console.print(f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). EXITING........[/red]") + console.print(f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). No uploads being processed.[/red]") else: meta['we_are_uploading'] = True + common = COMMON(config) + if meta.get('site_check', False): + for tracker in meta['trackers']: + upload_status = meta['tracker_status'].get(tracker, {}).get('upload', False) + if not upload_status: + if tracker == "AITHER" and meta.get('aither_trumpable') and len(meta.get('aither_trumpable', [])) > 0: + pass + else: + continue + if tracker not in meta['tracker_status']: + continue + + log_path = f"{base_dir}/tmp/{tracker}_search_results.json" + if not await common.path_exists(log_path): + await common.makedirs(os.path.dirname(log_path)) + + search_data = [] + if os.path.exists(log_path): + try: + async with aiofiles.open(log_path, 'r', encoding='utf-8') as f: + content = await f.read() + search_data = json.loads(content) if content.strip() else [] + except Exception: + search_data = [] + + existing_uuids = {entry.get('uuid') for entry in search_data if isinstance(entry, dict)} + + if meta['uuid'] not in existing_uuids: + search_entry = { + 'uuid': meta['uuid'], + 'path': meta.get('path', ''), + 'imdb_id': meta.get('imdb_id', 0), + 'tmdb_id': meta.get('tmdb_id', 0), + 'tvdb_id': meta.get('tvdb_id', 0), + 'mal_id': meta.get('mal_id', 0), + 'tvmaze_id': meta.get('tvmaze_id', 0), + } + if tracker == "AITHER": + search_entry['trumpable'] = meta.get('aither_trumpable', '') + search_data.append(search_entry) + + async with aiofiles.open(log_path, 'w', encoding='utf-8') as f: + await f.write(json.dumps(search_data, indent=4)) + meta['we_are_uploading'] = False + return + filename = meta.get('title', None) bdmv_filename = meta.get('filename', None) bdinfo = meta.get('bdinfo', None) videopath = meta.get('filelist', [None]) videopath = videopath[0] if videopath else None console.print(f"Processing {filename} for upload.....") + + meta['frame_overlay'] = config['DEFAULT'].get('frame_overlay', False) + for tracker in ['AZ', 'CZ', 'PHD']: + upload_status = meta['tracker_status'].get(tracker, {}).get('upload', False) + if tracker in meta['trackers'] and meta['frame_overlay'] and upload_status is True: + meta['frame_overlay'] = False + console.print("[yellow]AZ, CZ, and PHD do not allow frame overlays. Frame overlay will be disabled for this upload.[/yellow]") + + bdmv_mi_created = False + for tracker in ["ANT", "DC", "HUNO", "LCD"]: + upload_status = meta['tracker_status'].get(tracker, {}).get('upload', False) + if tracker in trackers and upload_status is True: + if not bdmv_mi_created: + await common.get_bdmv_mediainfo(meta) + bdmv_mi_created = True + progress_task = asyncio.create_task(print_progress("[yellow]Still processing, please wait...", interval=10)) try: if 'manual_frames' not in meta: @@ -258,9 +478,32 @@ async def process_meta(meta, base_dir, bot=None): meta['image_sizes'] = image_data['image_sizes'] if meta.get('debug'): console.print("[cyan]Loaded previously saved image sizes") + + if 'tonemapped' in image_data and not meta.get('tonemapped'): + meta['tonemapped'] = image_data['tonemapped'] + if meta.get('debug'): + console.print("[cyan]Loaded previously saved tonemapped status[/cyan]") + except Exception as e: console.print(f"[yellow]Could not load saved image data: {str(e)}") + if meta.get('is_disc', ""): + menus_data_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/menu_images.json" + if os.path.exists(menus_data_file): + try: + with open(menus_data_file, 'r') as menus_file: + menu_image_file = json.load(menus_file) + + if 'menu_images' in menu_image_file and not meta.get('menu_images'): + meta['menu_images'] = menu_image_file['menu_images'] + if meta.get('debug'): + console.print(f"[cyan]Loaded {len(menu_image_file['menu_images'])} previously saved disc menus") + + except Exception as e: + console.print(f"[yellow]Could not load saved menu image data: {str(e)}") + elif meta.get('path_to_menu_screenshots', ""): + await process_disc_menus(meta, config) + # Take Screenshots try: if meta['is_disc'] == "BDMV": @@ -357,6 +600,16 @@ async def process_meta(meta, base_dir, bot=None): if 'image_list' not in meta: meta['image_list'] = [] + manual_frames_str = meta.get('manual_frames', '') + if isinstance(manual_frames_str, str): + manual_frames_list = [f.strip() for f in manual_frames_str.split(',') if f.strip()] + manual_frames_count = len(manual_frames_list) + if meta['debug']: + console.print(f"Manual frames entered: {manual_frames_count}") + else: + manual_frames_count = 0 + if manual_frames_count > 0: + meta['screens'] = manual_frames_count if len(meta.get('image_list', [])) < meta.get('cutoff') and meta.get('skip_imghost_upload', False) is False: return_dict = {} try: @@ -384,7 +637,8 @@ async def process_meta(meta, base_dir, bot=None): try: image_data = { "image_list": meta.get('image_list', []), - "image_sizes": meta.get('image_sizes', {}) + "image_sizes": meta.get('image_sizes', {}), + "tonemapped": meta.get('tonemapped', False) } with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/image_data.json", 'w') as img_file: @@ -401,6 +655,12 @@ async def process_meta(meta, base_dir, bot=None): except asyncio.CancelledError: pass + # check for valid image hosts for trackers that require it + for tracker_name in meta['trackers']: + if tracker_name in ['BHD', 'DC', 'GPW', 'HUNO', 'MTV', 'OE', 'PTP', 'TVC']: + tracker_class = tracker_class_map[tracker_name](config=config) + await tracker_class.check_image_hosts(meta) + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") if not os.path.exists(torrent_path): reuse_torrent = None @@ -518,12 +778,21 @@ def get_remote_version(url): def extract_changelog(content, from_version, to_version): """Extracts the changelog entries between the specified versions.""" - pattern = rf'__version__\s*=\s*"{re.escape(to_version)}"\s*(.*?)__version__\s*=\s*"{re.escape(from_version)}"' - match = re.search(pattern, content, re.DOTALL) - if match: - return match.group(1).strip() - else: - return None + # Try to find the to_version with 'v' prefix first (current format) + patterns_to_try = [ + rf'__version__\s*=\s*"{re.escape(to_version)}"\s*\n\s*"""\s*(.*?)\s*"""', # Try with 'v' prefix + rf'__version__\s*=\s*"{re.escape(to_version.lstrip("v"))}"\s*\n\s*"""\s*(.*?)\s*"""' # Try without 'v' prefix + ] + + for pattern in patterns_to_try: + match = re.search(pattern, content, re.DOTALL) + if match: + changelog = match.group(1).strip() + # Remove the comment markers (# ) that were added by the GitHub Action + changelog = re.sub(r'^# ', '', changelog, flags=re.MULTILINE) + return changelog + + return None async def update_notification(base_dir): @@ -570,9 +839,17 @@ async def do_the_thing(base_dir): else: break + meta['ua_name'] = 'Upload Assistant' meta['current_version'] = await update_notification(base_dir) + signature = 'Created by Upload Assistant' + if meta.get('current_version', ''): + signature += f" {meta['current_version']}" + meta['ua_signature'] = signature + meta['base_dir'] = base_dir + cleanup_only = any(arg in ('--cleanup', '-cleanup') for arg in sys.argv) and len(sys.argv) <= 2 + sanitize_meta = config['DEFAULT'].get('sanitize_meta', True) try: # If cleanup is the only operation, use a dummy path to satisfy the parser @@ -586,7 +863,8 @@ async def do_the_thing(base_dir): if meta.get('cleanup'): if os.path.exists(f"{base_dir}/tmp"): shutil.rmtree(f"{base_dir}/tmp") - console.print("[bold green]Successfully emptied tmp directory") + console.print("[yellow]Successfully emptied tmp directory[/yellow]") + console.print() if not meta.get('path') or cleanup_only: exit(0) @@ -600,10 +878,16 @@ async def do_the_thing(base_dir): is_binary = await get_mkbrr_path(meta, base_dir) if not meta['mkbrr']: - meta['mkbrr'] = int(config['DEFAULT'].get('mkbrr', False)) + try: + meta['mkbrr'] = int(config['DEFAULT'].get('mkbrr', False)) + except ValueError: + if meta['debug']: + console.print("[yellow]Invalid mkbrr config value, defaulting to False[/yellow]") + meta['mkbrr'] = False if meta['mkbrr'] and not is_binary: console.print("[bold red]mkbrr binary is not available. Please ensure it is installed correctly.[/bold red]") console.print("[bold red]Reverting to Torf[/bold red]") + console.print() meta['mkbrr'] = False queue, log_file = await handle_queue(path, meta, paths, base_dir) @@ -611,10 +895,21 @@ async def do_the_thing(base_dir): processed_files_count = 0 skipped_files_count = 0 base_meta = {k: v for k, v in meta.items()} - for path in queue: + + for queue_item in queue: total_files = len(queue) try: meta = base_meta.copy() + + if meta.get('site_upload_queue'): + # Extract path and metadata from site upload queue item + path = await process_site_upload_item(queue_item, meta) + current_item_path = path # Store for logging + else: + # Regular queue processing + path = queue_item + current_item_path = path + meta['path'] = path meta['uuid'] = None @@ -627,7 +922,9 @@ async def do_the_thing(base_dir): try: shutil.rmtree(tmp_path) os.makedirs(tmp_path, exist_ok=True) - console.print(f"[bold green]Successfully cleaned temp directory for {os.path.basename(path)}") + if meta['debug']: + console.print(f"[yellow]Successfully cleaned temp directory for {os.path.basename(path)}[/yellow]") + console.print() except Exception as e: console.print(f"[bold red]Failed to delete temp directory: {str(e)}") @@ -657,7 +954,6 @@ async def do_the_thing(base_dir): console.print(f"[red]Exception: '{path}': {e}") reset_terminal() - sanitize_meta = config['DEFAULT'].get('sanitize_meta', True) if use_discord and config['DISCORD'].get('discord_bot_token') and not meta['debug']: if (config.get('DISCORD', {}).get('only_unattended', False) and meta.get('unattended', False)) or not config.get('DISCORD', {}).get('only_unattended', False): try: @@ -691,15 +987,25 @@ async def do_the_thing(base_dir): await process_meta(meta, base_dir, bot=bot) - if 'we_are_uploading' not in meta: - console.print("we are not uploading.......") - if 'queue' in meta and meta.get('queue') is not None: - processed_files_count += 1 - skipped_files_count += 1 - console.print(f"[cyan]Processed {processed_files_count}/{total_files} files with {skipped_files_count} skipped uploading.") - if not meta['debug']: - if log_file: - await save_processed_file(log_file, path) + if 'we_are_uploading' not in meta or not meta.get('we_are_uploading', False): + if config['DEFAULT'].get('cross_seeding', True): + await process_cross_seeds(meta) + if not meta.get('site_check', False): + if not meta.get('emby', False): + console.print("we are not uploading.......") + if 'queue' in meta and meta.get('queue') is not None: + processed_files_count += 1 + if not meta.get('emby', False): + skipped_files_count += 1 + console.print(f"[cyan]Processed {processed_files_count}/{total_files} files with {skipped_files_count} skipped uploading.") + else: + console.print(f"[cyan]Processed {processed_files_count}/{total_files}.") + if not meta['debug'] or "debug" in os.path.basename(log_file): + if log_file: + if meta.get('site_upload_queue'): + await save_processed_path(log_file, current_item_path) + else: + await save_processed_file(log_file, path) else: console.print() @@ -707,17 +1013,94 @@ async def do_the_thing(base_dir): await process_trackers(meta, config, client, console, api_trackers, tracker_class_map, http_trackers, other_api_trackers) if use_discord and bot: await send_upload_status_notification(config, bot, meta) + + if config['DEFAULT'].get('cross_seeding', True): + await process_cross_seeds(meta) + if 'queue' in meta and meta.get('queue') is not None: processed_files_count += 1 if 'limit_queue' in meta and int(meta['limit_queue']) > 0: console.print(f"[cyan]Successfully uploaded {processed_files_count - skipped_files_count} of {meta['limit_queue']} in limit with {total_files} files.") else: console.print(f"[cyan]Successfully uploaded {processed_files_count - skipped_files_count}/{total_files} files.") - if not meta['debug']: + if not meta['debug'] or "debug" in os.path.basename(log_file): if log_file: - await save_processed_file(log_file, path) - await asyncio.sleep(0.1) - if sanitize_meta: + if meta.get('site_upload_queue'): + await save_processed_path(log_file, current_item_path) + else: + await save_processed_file(log_file, path) + + if meta['debug']: + finish_time = time.time() + console.print(f"Uploads processed in {finish_time - start_time:.4f} seconds") + + if use_discord and bot: + if config['DISCORD'].get('send_upload_links'): + try: + discord_message = "" + for tracker, status in meta.get('tracker_status', {}).items(): + try: + if tracker == "MTV" and 'status_message' in status and "data error" not in str(status['status_message']): + discord_message += f"{str(status['status_message'])}\n" + if 'torrent_id' in status: + tracker_class = tracker_class_map[tracker](config=config) + torrent_url = tracker_class.torrent_url + discord_message += f"{tracker}: {torrent_url}{status['torrent_id']}\n" + else: + if ( + 'status_message' in status + and 'torrent_id' not in status + and "data error" not in str(status['status_message']) + and tracker != "MTV" + ): + discord_message += f"{tracker}: {redact_private_info(status['status_message'])}\n" + elif 'status_message' in status and "data error" in str(status['status_message']): + discord_message += f"{tracker}: {str(status['status_message'])}\n" + else: + if 'skipping' in status and not status['skipping']: + discord_message += f"{tracker} gave no useful message.\n" + except Exception as e: + discord_message += f"Error printing {tracker} data: {e}\n" + discord_message += "All tracker uploads processed.\n" + await send_discord_notification(config, bot, discord_message, debug=meta.get('debug', False), meta=meta) + except Exception as e: + console.print(f"[red]Error in tracker print loop: {e}[/red]") + else: + await send_discord_notification(config, bot, f"Finished uploading: {meta['path']}\n", debug=meta.get('debug', False), meta=meta) + + find_requests = config['DEFAULT'].get('search_requests', False) if meta.get('search_requests') is None else meta.get('search_requests') + if find_requests and meta['trackers'] not in ([], None) and not (meta.get('site_check', False) and 'is_disc' not in meta): + console.print("[green]Searching for requests on supported trackers.....") + tracker_setup = TRACKER_SETUP(config=config) + if meta.get('site_check', False): + trackers = meta['requested_trackers'] + else: + trackers = meta['trackers'] + await tracker_setup.tracker_request(meta, trackers) + + if meta.get('site_check', False): + if 'queue' in meta and meta.get('queue') is not None: + processed_files_count += 1 + skipped_files_count += 1 + console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") + if not meta['debug'] or "debug" in os.path.basename(log_file): + if log_file: + if meta.get('site_upload_queue'): + await save_processed_path(log_file, current_item_path) + else: + await save_processed_file(log_file, path) + + if meta.get('delete_tmp', False) and os.path.exists(tmp_path) and meta.get('emby', False): + try: + shutil.rmtree(tmp_path) + console.print(f"[yellow]Successfully deleted temp directory for {os.path.basename(path)}[/yellow]") + console.print() + except Exception as e: + console.print(f"[bold red]Failed to delete temp directory: {str(e)}") + + if 'limit_queue' in meta and int(meta['limit_queue']) > 0: + if (processed_files_count - skipped_files_count) >= int(meta['limit_queue']): + if sanitize_meta and not meta.get('emby', False): try: await asyncio.sleep(0.2) # We can't race the status prints meta = await clean_meta_for_export(meta) @@ -726,25 +1109,17 @@ async def do_the_thing(base_dir): await cleanup() gc.collect() reset_terminal() - - if 'limit_queue' in meta and int(meta['limit_queue']) > 0: - if (processed_files_count - skipped_files_count) >= int(meta['limit_queue']): - console.print(f"[red]Uploading limit of {meta['limit_queue']} files reached. Stopping queue processing. {skipped_files_count} skipped files.") break - if meta['debug']: - finish_time = time.time() - console.print(f"Uploads processed in {finish_time - start_time:.4f} seconds") - - if use_discord and bot: - await send_discord_notification(config, bot, f"Finsished uploading: {meta['path']}", debug=meta.get('debug', False), meta=meta) - - if sanitize_meta: + if sanitize_meta and not meta.get('emby', False): try: - await asyncio.sleep(0.3) # We can't race the status prints + await asyncio.sleep(0.2) meta = await clean_meta_for_export(meta) except Exception as e: console.print(f"[red]Error cleaning meta for export: {e}") + await cleanup() + gc.collect() + reset_terminal() except Exception as e: console.print(f"[bold red]An unexpected error occurred: {e}") @@ -766,9 +1141,186 @@ async def do_the_thing(base_dir): reset_terminal() +async def process_cross_seeds(meta): + all_trackers = api_trackers | http_trackers | other_api_trackers + + # Get list of trackers to exclude (already in client) + remove_list = [] + if meta.get('remove_trackers', False): + if isinstance(meta['remove_trackers'], str): + remove_list = [t.strip().upper() for t in meta['remove_trackers'].split(',')] + elif isinstance(meta['remove_trackers'], list): + remove_list = [t.strip().upper() for t in meta['remove_trackers'] if isinstance(t, str)] + + # Check for trackers that haven't been dupe-checked yet + dupe_checked_trackers = meta.get('dupe_checked_trackers', []) + + # Validate tracker configs and build list of valid unchecked trackers + valid_unchecked_trackers = [] + for tracker in all_trackers: + if tracker in dupe_checked_trackers or meta.get(f'{tracker}_cross_seed', None) is not None or tracker in remove_list: + continue + + tracker_config = config.get('TRACKERS', {}).get(tracker, {}) + if not tracker_config: + if meta.get('debug'): + console.print(f"[yellow]Tracker {tracker} not found in config, skipping[/yellow]") + continue + + api_key = tracker_config.get('api_key', '') + announce_url = tracker_config.get('announce_url', '') + + # Ensure both values are strings and strip whitespace + api_key = str(api_key).strip() if api_key else '' + announce_url = str(announce_url).strip() if announce_url else '' + + # Skip if both api_key and announce_url are empty + if not api_key and not announce_url: + if meta.get('debug'): + console.print(f"[yellow]Tracker {tracker} has no api_key or announce_url set, skipping[/yellow]") + continue + + # Skip trackers with placeholder announce URLs + placeholder_patterns = ['', 'customannounceurl', 'get from upload page', 'Custom_Announce_URL', 'PASS_KEY', 'insertyourpasskeyhere'] + announce_url_lower = announce_url.lower() + if any(pattern.lower() in announce_url_lower for pattern in placeholder_patterns): + if meta.get('debug'): + console.print(f"[yellow]Tracker {tracker} has placeholder announce_url, skipping[/yellow]") + continue + + valid_unchecked_trackers.append(tracker) + + # Search for cross-seeds on unchecked trackers + if valid_unchecked_trackers and config['DEFAULT'].get('cross_seed_check_everything', False): + console.print(f"[cyan]Checking for cross-seeds on unchecked trackers: {valid_unchecked_trackers}[/cyan]") + + try: + await validate_tracker_logins(meta, valid_unchecked_trackers) + await asyncio.sleep(0.2) + except Exception as e: + console.print(f"[yellow]Warning: Tracker validation encountered an error: {e}[/yellow]") + + # Store original unattended value + original_unattended = meta.get('unattended', False) + meta['unattended'] = True + + helper = UploadHelper() + + async def check_tracker_for_dupes(tracker): + try: + tracker_class = tracker_class_map[tracker](config=config) + disctype = meta.get('disctype', '') + + # Search for existing torrents + if tracker != "PTP": + dupes = await tracker_class.search_existing(meta, disctype) + else: + ptp = PTP(config=config) + if not meta.get('ptp_groupID'): + groupID = await ptp.get_group_by_imdb(meta['imdb']) + meta['ptp_groupID'] = groupID + dupes = await ptp.search_existing(meta['ptp_groupID'], meta, disctype) + + if dupes: + dupes = await filter_dupes(dupes, meta, tracker) + await helper.dupe_check(dupes, meta, tracker) + + except Exception as e: + if meta.get('debug'): + console.print(f"[yellow]Error checking {tracker} for cross-seeds: {e}[/yellow]") + + # Run all dupe checks concurrently + await asyncio.gather(*[check_tracker_for_dupes(tracker) for tracker in valid_unchecked_trackers], return_exceptions=True) + + # Restore original unattended value + meta['unattended'] = original_unattended + + # Filter to only trackers with cross-seed data + valid_trackers = [tracker for tracker in all_trackers if meta.get(f'{tracker}_cross_seed', None) is not None] + + if not valid_trackers: + if meta.get('debug'): + console.print("[yellow]No trackers found with cross-seed data[/yellow]") + return + + console.print(f"[cyan]Valid trackers for cross-seed check: {valid_trackers}[/cyan]") + + common = COMMON(config) + try: + concurrency_limit = int(config.get('DEFAULT', {}).get('cross_seed_concurrency', 8)) + except (TypeError, ValueError): + concurrency_limit = 8 + semaphore = asyncio.Semaphore(max(1, concurrency_limit)) + debug = meta.get('debug', False) + + async def handle_cross_seed(tracker): + cross_seed_key = f'{tracker}_cross_seed' + cross_seed_value = meta.get(cross_seed_key, False) + + if debug: + console.print(f"[cyan]Debug: {tracker} - cross_seed: {redact_private_info(cross_seed_value)}") + + if not cross_seed_value: + return + + if debug: + console.print(f"[green]Found cross-seed for {tracker}!") + + download_url = None + if isinstance(cross_seed_value, str) and cross_seed_value.startswith('http'): + download_url = cross_seed_value + + headers = None + if tracker == "RTF": + headers = { + 'accept': 'application/json', + 'Authorization': config['TRACKERS'][tracker]['api_key'].strip(), + } + + if tracker == "AR" and download_url: + try: + ar = AR(config=config) + auth_key = await ar.get_auth_key(meta) + + # Extract torrent_pass from announce_url + announce_url = config['TRACKERS']['AR'].get('announce_url', '') + # Pattern: http://tracker.alpharatio.cc:2710/PASSKEY/announce + match = re.search(r':\d+/([^/]+)/announce', announce_url) + torrent_pass = match.group(1) if match else None + + if auth_key and torrent_pass: + # Append auth_key and torrent_pass to download_url + separator = '&' if '?' in download_url else '?' + download_url += f"{separator}authkey={auth_key}&torrent_pass={torrent_pass}" + if debug: + console.print("[cyan]Added AR auth_key and torrent_pass to download URL[/cyan]") + except Exception as e: + if debug: + console.print(f"[yellow]Error getting AR auth credentials: {e}[/yellow]") + + async with semaphore: + await common.download_tracker_torrent( + meta, + tracker, + headers=headers, + params=None, + downurl=download_url, + hash_is_id=False, + cross=True + ) + await client.add_to_client(meta, tracker, cross=True) + + tasks = [(tracker, asyncio.create_task(handle_cross_seed(tracker))) for tracker in valid_trackers] + + results = await asyncio.gather(*(task for _, task in tasks), return_exceptions=True) + for (tracker, _), result in zip(tasks, results): + if isinstance(result, Exception): + console.print(f"[red]Cross-seed handling failed for {tracker}: {result}[/red]") + + async def get_mkbrr_path(meta, base_dir=None): try: - mkbrr_path = await ensure_mkbrr_binary(base_dir, debug=meta['debug'], version="v1.8.1") + mkbrr_path = await ensure_mkbrr_binary(base_dir, debug=meta['debug'], version="v1.14.0") return mkbrr_path except Exception as e: console.print(f"[red]Error setting up mkbrr binary: {e}[/red]") @@ -791,9 +1343,6 @@ async def main(): console.print("[bold red]Program interrupted. Exiting safely.[/bold red]") except Exception as e: console.print(f"[bold red]Unexpected error: {e}[/bold red]") - finally: - await cleanup() - reset_terminal() if __name__ == "__main__": @@ -811,5 +1360,6 @@ async def main(): console.print(f"[bold red]Critical error: {e}[/bold red]") finally: asyncio.run(cleanup()) + gc.collect() reset_terminal() sys.exit(0) diff --git a/web_ui/__init__.py b/web_ui/__init__.py new file mode 100644 index 000000000..cee65e5de --- /dev/null +++ b/web_ui/__init__.py @@ -0,0 +1,3 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +"""Upload Assistant Web UI""" +__version__ = "1.0.0" diff --git a/web_ui/server.py b/web_ui/server.py new file mode 100644 index 000000000..55e97cdf0 --- /dev/null +++ b/web_ui/server.py @@ -0,0 +1,355 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +from flask import Flask, render_template, request, jsonify, Response +from flask_cors import CORS +import subprocess +import json +import os +import sys +import traceback +import re +import threading +import queue +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +app = Flask(__name__) +CORS(app) + +# ANSI color code regex pattern +ANSI_ESCAPE = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') + +# Store active processes +active_processes = {} + + +def strip_ansi(text): + """Remove ANSI escape codes from text""" + return ANSI_ESCAPE.sub('', text) + + +@app.route('/') +def index(): + """Serve the main UI""" + try: + return render_template('index.html') + except Exception as e: + error_msg = f"Error loading template: {str(e)}\n\nTraceback:\n{traceback.format_exc()}" + print(error_msg) + return f"
{error_msg}
", 500 + + +@app.route('/api/health') +def health(): + """Health check endpoint""" + return jsonify({ + 'status': 'healthy', + 'success': True, + 'message': 'Upload Assistant Web UI is running' + }) + + +@app.route('/api/browse') +def browse_path(): + """Browse filesystem paths""" + path = request.args.get('path', '/') + print(f"Browsing path: {path}") + + try: + if not os.path.exists(path): + return jsonify({ + 'error': f'Path does not exist: {path}', + 'success': False + }), 404 + + if not os.path.isdir(path): + return jsonify({ + 'error': f'Not a directory: {path}', + 'success': False + }), 400 + + items = [] + try: + for item in sorted(os.listdir(path)): + # Skip hidden files + if item.startswith('.'): + continue + + full_path = os.path.join(path, item) + try: + is_dir = os.path.isdir(full_path) + + items.append({ + 'name': item, + 'path': full_path, + 'type': 'folder' if is_dir else 'file', + 'children': [] if is_dir else None + }) + except (PermissionError, OSError): + continue + + print(f"Found {len(items)} items in {path}") + + except PermissionError: + error_msg = f'Permission denied: {path}' + print(f"Error: {error_msg}") + return jsonify({'error': error_msg, 'success': False}), 403 + + return jsonify({ + 'items': items, + 'success': True, + 'path': path, + 'count': len(items) + }) + + except Exception as e: + error_msg = f'Error browsing {path}: {str(e)}' + print(f"Error: {error_msg}") + print(traceback.format_exc()) + return jsonify({'error': error_msg, 'success': False}), 500 + + +@app.route('/api/execute', methods=['POST', 'OPTIONS']) +def execute_command(): + """Execute upload.py with interactive terminal support""" + + if request.method == 'OPTIONS': + return '', 204 + + try: + data = request.json + if not data: + return jsonify({'error': 'No JSON data received', 'success': False}), 400 + + path = data.get('path') + args = data.get('args', '') + session_id = data.get('session_id', 'default') + + print(f"Execute request - Path: {path}, Args: {args}, Session: {session_id}") + + if not path: + return jsonify({ + 'error': 'Missing path', + 'success': False + }), 400 + + def generate(): + try: + # Build command to run upload.py directly + command = ['python', '-u', '/Upload-Assistant/upload.py', path] + + # Add arguments if provided + if args: + import shlex + command.extend(shlex.split(args)) + + print(f"Running: {' '.join(command)}") + + yield f"data: {json.dumps({'type': 'system', 'data': f'Executing: {' '.join(command)}'})}\n\n" + + # Set environment to unbuffered and force line buffering + env = os.environ.copy() + env['PYTHONUNBUFFERED'] = '1' + env['PYTHONIOENCODING'] = 'utf-8' + # Disable Python output buffering + + process = subprocess.Popen( + command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=0, # Completely unbuffered + cwd='/Upload-Assistant', + env=env, + universal_newlines=True + ) + + # Store process for input handling (no queue needed) + active_processes[session_id] = { + 'process': process + } + + # Thread to read stdout - stream raw output with ANSI codes + def read_stdout(): + try: + while True: + # Read in small chunks for real-time streaming + chunk = process.stdout.read(1) + if not chunk: + break + output_queue.put(('stdout', chunk)) + except Exception as e: + print(f"stdout read error: {e}") + + # Thread to read stderr - stream raw output + def read_stderr(): + try: + while True: + chunk = process.stderr.read(1) + if not chunk: + break + output_queue.put(('stderr', chunk)) + except Exception as e: + print(f"stderr read error: {e}") + + output_queue = queue.Queue() + + # Start threads (no input thread needed - we write directly) + stdout_thread = threading.Thread(target=read_stdout, daemon=True) + stderr_thread = threading.Thread(target=read_stderr, daemon=True) + + stdout_thread.start() + stderr_thread.start() + + # Stream output as raw characters + while process.poll() is None or not output_queue.empty(): + try: + output_type, char = output_queue.get(timeout=0.1) + # Send raw character data (preserves ANSI codes) + yield f"data: {json.dumps({'type': output_type, 'data': char})}\n\n" + except queue.Empty: + # Send keepalive + yield f"data: {json.dumps({'type': 'keepalive'})}\n\n" + + # Wait for process to finish + process.wait() + + # Clean up + if session_id in active_processes: + del active_processes[session_id] + + yield f"data: {json.dumps({'type': 'exit', 'code': process.returncode})}\n\n" + + except Exception as e: + error_msg = f'Execution error: {str(e)}' + print(f"Error: {error_msg}") + print(traceback.format_exc()) + yield f"data: {json.dumps({'type': 'error', 'data': error_msg})}\n\n" + + # Clean up on error + if session_id in active_processes: + del active_processes[session_id] + + return Response(generate(), mimetype='text/event-stream') + + except Exception as e: + error_msg = f'Request error: {str(e)}' + print(f"Error: {error_msg}") + print(traceback.format_exc()) + return jsonify({'error': error_msg, 'success': False}), 500 + + +@app.route('/api/input', methods=['POST']) +def send_input(): + """Send user input to running process""" + try: + data = request.json + session_id = data.get('session_id', 'default') + user_input = data.get('input', '') + + print(f"Received input for session {session_id}: '{user_input}'") + + if session_id not in active_processes: + return jsonify({'error': 'No active process', 'success': False}), 404 + + # Always add newline to send the input + input_with_newline = user_input + '\n' + + # Write to process stdin + try: + process_info = active_processes[session_id] + process = process_info['process'] + + if process.poll() is None: # Process still running + process.stdin.write(input_with_newline) + process.stdin.flush() + print(f"Sent to stdin: '{input_with_newline.strip()}'") + else: + print(f"Process already terminated for session {session_id}") + return jsonify({'error': 'Process not running', 'success': False}), 400 + + except Exception as e: + print(f"Error writing to stdin: {str(e)}") + return jsonify({'error': f'Failed to write input: {str(e)}', 'success': False}), 500 + + return jsonify({'success': True}) + + except Exception as e: + error_msg = f'Input error: {str(e)}' + print(f"Error: {error_msg}") + return jsonify({'error': error_msg, 'success': False}), 500 + + +@app.route('/api/kill', methods=['POST']) +def kill_process(): + """Kill a running process""" + try: + data = request.json + session_id = data.get('session_id') + + print(f"Kill request for session {session_id}") + + if session_id not in active_processes: + return jsonify({'error': 'No active process', 'success': False}), 404 + + # Get the process + process_info = active_processes[session_id] + process = process_info['process'] + + # Terminate the process + process.terminate() + + # Give it a moment to terminate gracefully + try: + process.wait(timeout=2) + except Exception: + # Force kill if it doesn't terminate + process.kill() + + # Clean up + del active_processes[session_id] + + print(f"Process killed for session {session_id}") + return jsonify({'success': True, 'message': 'Process terminated'}) + + except Exception as e: + error_msg = f'Kill error: {str(e)}' + print(f"Error: {error_msg}") + return jsonify({'error': error_msg, 'success': False}), 500 + + +@app.errorhandler(404) +def not_found(e): + return jsonify({'error': 'Not found', 'success': False}), 404 + + +@app.errorhandler(500) +def internal_error(e): + print(f"500 error: {str(e)}") + print(traceback.format_exc()) + return jsonify({'error': 'Internal server error', 'success': False}), 500 + + +if __name__ == '__main__': + print("=" * 50) + print("Starting Upload Assistant Web UI...") + print("=" * 50) + print(f"Python version: {sys.version}") + print(f"Working directory: {os.getcwd()}") + print("Server will run at: http://localhost:5000") + print("Health check: http://localhost:5000/api/health") + print("=" * 50) + + try: + app.run( + host='0.0.0.0', + port=5000, + debug=True, + threaded=True, + use_reloader=False + ) + except Exception as e: + print(f"FATAL: Failed to start server: {str(e)}") + print(traceback.format_exc()) + sys.exit(1) diff --git a/web_ui/static/js/app.js b/web_ui/static/js/app.js new file mode 100644 index 000000000..3041c20f7 --- /dev/null +++ b/web_ui/static/js/app.js @@ -0,0 +1,673 @@ +const { useState, useRef, useEffect } = React; + +// Icon components +const FolderIcon = () => ( + + + +); + +const FolderOpenIcon = () => ( + + + +); + +const FileIcon = () => ( + + + +); + +const TerminalIcon = () => ( + + + +); + +const PlayIcon = () => ( + + + + +); + +const TrashIcon = () => ( + + + +); + +const UploadIcon = () => ( + + + +); + +function AudionutsUAGUI() { + const API_BASE = window.location.origin + '/api'; + + const [directories, setDirectories] = useState([ + { name: 'data', type: 'folder', path: '/data', children: [] }, + { name: 'torrent_storage_dir', type: 'folder', path: '/torrent_storage_dir', children: [] }, + { name: 'Upload-Assistant', type: 'folder', path: '/Upload-Assistant', children: [] } + ]); + + const [selectedPath, setSelectedPath] = useState(''); + const [selectedName, setSelectedName] = useState(''); + const [customArgs, setCustomArgs] = useState(''); + const [isExecuting, setIsExecuting] = useState(false); + const [expandedFolders, setExpandedFolders] = useState(new Set(['/data', '/torrent_storage_dir'])); + const [sessionId, setSessionId] = useState(''); + const [sidebarWidth, setSidebarWidth] = useState(320); + const [isResizing, setIsResizing] = useState(false); + const [userInput, setUserInput] = useState(''); + const [isDarkMode, setIsDarkMode] = useState(true); // Default to dark mode + + const terminalRef = useRef(null); + const terminalContainerRef = useRef(null); + const xtermRef = useRef(null); + const fitAddonRef = useRef(null); + const inputRef = useRef(null); + + // Initialize xterm.js terminal + useEffect(() => { + if (terminalContainerRef.current && !xtermRef.current) { + // Create terminal instance with dynamic theme + const getTerminalTheme = () => { + if (isDarkMode) { + return { + background: '#000000', + foreground: '#ffffff', + cursor: '#ffffff', + black: '#000000', + red: '#e06c75', + green: '#98c379', + yellow: '#d19a66', + blue: '#61afef', + magenta: '#c678dd', + cyan: '#56b6c2', + white: '#abb2bf', + brightBlack: '#5c6370', + brightRed: '#e06c75', + brightGreen: '#98c379', + brightYellow: '#d19a66', + brightBlue: '#61afef', + brightMagenta: '#c678dd', + brightCyan: '#56b6c2', + brightWhite: '#ffffff' + }; + } else { + return { + background: '#ffffff', + foreground: '#000000', + cursor: '#000000', + black: '#000000', + red: '#c91b00', + green: '#00c200', + yellow: '#c7c400', + blue: '#0037da', + magenta: '#c930c7', + cyan: '#00c5c7', + white: '#c7c7c7', + brightBlack: '#686868', + brightRed: '#ff6d67', + brightGreen: '#5ff967', + brightYellow: '#fefb67', + brightBlue: '#6871ff', + brightMagenta: '#ff76ff', + brightCyan: '#5ffdff', + brightWhite: '#ffffff' + }; + } + }; + + const term = new Terminal({ + cursorBlink: true, + fontSize: 14, + fontFamily: 'Menlo, Monaco, "Courier New", monospace', + theme: getTerminalTheme(), + scrollback: 10000, + convertEol: true + }); + + // Add fit addon for responsive sizing + const fitAddon = new FitAddon.FitAddon(); + term.loadAddon(fitAddon); + + // Open terminal in container + term.open(terminalContainerRef.current); + fitAddon.fit(); + + // Handle user input from terminal - DISABLED for now, use input bar instead + // We'll keep terminal read-only and use the input field below + term.onData(data => { + // Do nothing - input handled by input field below + }); + + // Welcome message + term.writeln('\x1b[1;36m╔═══════════════════════════════════════════════════════════════╗\x1b[0m'); + term.writeln('\x1b[1;36m║\x1b[0m \x1b[1;35mUpload Assistant Interactive Terminal\x1b[0m \x1b[1;36m║\x1b[0m'); + term.writeln('\x1b[1;36m╚═══════════════════════════════════════════════════════════════╝\x1b[0m'); + term.writeln(''); + term.writeln('\x1b[1;33m📋 Quick Start:\x1b[0m'); + term.writeln(' 1. Select a file or folder from the left panel'); + term.writeln(' 2. Add Upload Assistant arguments (optional)'); + term.writeln(' 3. Click "Execute Upload" to start'); + term.writeln(' 4. Type responses directly in this terminal'); + term.writeln(''); + + xtermRef.current = term; + fitAddonRef.current = fitAddon; + + // Resize terminal on window resize + const handleResize = () => { + if (fitAddonRef.current) { + fitAddonRef.current.fit(); + } + }; + window.addEventListener('resize', handleResize); + + return () => { + window.removeEventListener('resize', handleResize); + if (xtermRef.current) { + xtermRef.current.dispose(); + } + }; + } + }, []); + + // Update terminal theme when dark mode changes + useEffect(() => { + if (xtermRef.current) { + const getTerminalTheme = () => { + if (isDarkMode) { + return { + background: '#000000', + foreground: '#ffffff', + cursor: '#ffffff', + black: '#000000', + red: '#e06c75', + green: '#98c379', + yellow: '#d19a66', + blue: '#61afef', + magenta: '#c678dd', + cyan: '#56b6c2', + white: '#abb2bf', + brightBlack: '#5c6370', + brightRed: '#e06c75', + brightGreen: '#98c379', + brightYellow: '#d19a66', + brightBlue: '#61afef', + brightMagenta: '#c678dd', + brightCyan: '#56b6c2', + brightWhite: '#ffffff' + }; + } else { + return { + background: '#ffffff', + foreground: '#000000', + cursor: '#000000', + black: '#000000', + red: '#c91b00', + green: '#00c200', + yellow: '#c7c400', + blue: '#0037da', + magenta: '#c930c7', + cyan: '#00c5c7', + white: '#c7c7c7', + brightBlack: '#686868', + brightRed: '#ff6d67', + brightGreen: '#5ff967', + brightYellow: '#fefb67', + brightBlue: '#6871ff', + brightMagenta: '#ff76ff', + brightCyan: '#5ffdff', + brightWhite: '#ffffff' + }; + } + }; + + xtermRef.current.options.theme = getTerminalTheme(); + } + }, [isDarkMode]); + + // Focus input when executing + useEffect(() => { + if (isExecuting && inputRef.current) { + inputRef.current.focus(); + } + }, [isExecuting]); + + const sendInput = async () => { + if (!sessionId) return; + + const term = xtermRef.current; + + // If input is empty, just send Enter (newline) + const inputToSend = userInput.trim() === '' ? '' : userInput; + + setUserInput(''); + + // Show what user typed in terminal (only if not empty) + if (term && inputToSend !== '') { + term.writeln('\x1b[1;36m> ' + inputToSend + '\x1b[0m'); + } + + try { + // Send the input (empty string sends just Enter) + await fetch(`${API_BASE}/input`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + session_id: sessionId, + input: inputToSend + }) + }); + } catch (error) { + if (term) { + term.writeln('\x1b[1;31mFailed to send input: ' + error.message + '\x1b[0m'); + } + } + }; + + const handleKeyPress = (e) => { + if (e.key === 'Enter') { + sendInput(); + } + }; + useEffect(() => { + if (fitAddonRef.current && !isResizing) { + setTimeout(() => { + fitAddonRef.current.fit(); + }, 100); + } + }, [sidebarWidth, isResizing]); + + const toggleFolder = async (path) => { + const newExpanded = new Set(expandedFolders); + + if (newExpanded.has(path)) { + newExpanded.delete(path); + } else { + newExpanded.add(path); + await loadFolderContents(path); + } + + setExpandedFolders(newExpanded); + }; + + const loadFolderContents = async (path) => { + try { + const response = await fetch(`${API_BASE}/browse?path=${encodeURIComponent(path)}`); + const data = await response.json(); + + if (data.success && data.items) { + updateDirectoryTree(path, data.items); + } + } catch (error) { + console.error('Failed to load folder:', error); + } + }; + + const updateDirectoryTree = (path, items) => { + const updateTree = (nodes) => { + return nodes.map(node => { + if (node.path === path) { + return { ...node, children: items }; + } else if (node.children) { + return { ...node, children: updateTree(node.children) }; + } + return node; + }); + }; + + setDirectories(updateTree(directories)); + }; + + const renderFileTree = (items, level = 0) => { + return items.map((item, idx) => ( +
+
{ + if (item.type === 'folder') { + toggleFolder(item.path); + } + setSelectedPath(item.path); + setSelectedName(item.name); + }} + > + + {item.type === 'folder' ? ( + expandedFolders.has(item.path) ? : + ) : ( + + )} + + {item.name} +
+ {item.type === 'folder' && expandedFolders.has(item.path) && item.children && item.children.length > 0 && ( +
{renderFileTree(item.children, level + 1)}
+ )} +
+ )); + }; + + const executeCommand = async () => { + if (!selectedPath) { + if (xtermRef.current) { + xtermRef.current.writeln('\x1b[1;31m✗ Please select a file or folder first\x1b[0m'); + } + return; + } + + const term = xtermRef.current; + if (!term) return; + + const newSessionId = 'session_' + Date.now(); + setSessionId(newSessionId); + setIsExecuting(true); + + term.writeln(''); + term.writeln('\x1b[1;33m$ python upload.py "' + selectedPath + '" ' + customArgs + '\x1b[0m'); + term.writeln('\x1b[1;34m→ Starting execution...\x1b[0m'); + + try { + const response = await fetch(`${API_BASE}/execute`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + path: selectedPath, + args: customArgs, + session_id: newSessionId + }) + }); + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value); + const lines = chunk.split('\n'); + + for (const line of lines) { + if (!line.trim() || !line.startsWith('data: ')) continue; + + try { + const data = JSON.parse(line.substring(6)); + + if (data.type === 'stdout' || data.type === 'stderr') { + // Write raw output with ANSI codes preserved + term.write(data.data); + } else if (data.type === 'exit') { + term.writeln(''); + term.writeln(`\x1b[1;34m✓ Process exited with code ${data.code}\x1b[0m`); + } + } catch (e) { + console.error('Parse error:', e); + } + } + } + + term.writeln('\x1b[1;32m✓ Execution completed\x1b[0m'); + term.writeln(''); + } catch (error) { + term.writeln('\x1b[1;31m✗ Execution error: ' + error.message + '\x1b[0m'); + } finally { + setIsExecuting(false); + setSessionId(''); + } + }; + + const clearTerminal = async () => { + const term = xtermRef.current; + + // If a process is running, kill it first + if (isExecuting && sessionId) { + try { + await fetch(`${API_BASE}/kill`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ session_id: sessionId }) + }); + + if (term) { + term.writeln(''); + term.writeln('\x1b[1;31m✗ Process terminated by user\x1b[0m'); + term.writeln(''); + } + + setIsExecuting(false); + setSessionId(''); + } catch (error) { + console.error('Failed to kill process:', error); + } + } + + // Clear the terminal + if (term) { + term.clear(); + + // Re-show welcome message + term.writeln('\x1b[1;36m╔═══════════════════════════════════════════════════════════════╗\x1b[0m'); + term.writeln('\x1b[1;36m║\x1b[0m \x1b[1;35mUpload Assistant Interactive Terminal\x1b[0m \x1b[1;36m║\x1b[0m'); + term.writeln('\x1b[1;36m╚═══════════════════════════════════════════════════════════════╝\x1b[0m'); + term.writeln(''); + term.writeln('\x1b[1;33m📋 Quick Start:\x1b[0m'); + term.writeln(' 1. Select a file or folder from the left panel'); + term.writeln(' 2. Add Upload Assistant arguments (optional)'); + term.writeln(' 3. Click "Execute Upload" to start'); + term.writeln(' 4. Type responses in the input field below'); + term.writeln(''); + } + }; + + // Sidebar resizing + const startResizing = () => { + setIsResizing(true); + }; + + const stopResizing = () => { + setIsResizing(false); + }; + + const resize = (e) => { + if (isResizing) { + const newWidth = e.clientX; + if (newWidth >= 200 && newWidth <= 600) { + setSidebarWidth(newWidth); + } + } + }; + + useEffect(() => { + if (isResizing) { + window.addEventListener('mousemove', resize); + window.addEventListener('mouseup', stopResizing); + return () => { + window.removeEventListener('mousemove', resize); + window.removeEventListener('mouseup', stopResizing); + }; + } + }, [isResizing]); + + return ( +
+ {/* Left Sidebar - Resizable */} +
+
+

+ + File Browser +

+
+
+ {renderFileTree(directories)} +
+
+ + {/* Resize Handle */} +
+ + {/* Main Content */} +
+ {/* Top Panel */} +
+
+
+

+ + Upload Assistant Web UI +

+ + {/* Dark Mode Toggle */} +
+ + {isDarkMode ? '🌙 Dark' : '☀️ Light'} + + +
+
+ + {/* Selected Path Display */} + {selectedPath && ( +
+

Selected Path:

+

{selectedPath}

+
+ )} + + {/* Arguments */} +
+ + setCustomArgs(e.target.value)} + placeholder="--tmdb movie/12345 --trackers ptp,aither,ulcx --no-edition --no-tag" + className={`w-full px-3 py-2 border rounded-lg focus:ring-2 focus:ring-purple-500 focus:border-transparent ${ + isDarkMode + ? 'bg-gray-700 border-gray-600 text-white placeholder-gray-400' + : 'bg-white border-gray-300 text-gray-900' + }`} + disabled={isExecuting} + /> +

+ Common: --tmdb movie/12345 --trackers ptp,aither,ulcx --no-edition --no-tag +

+
+ + {/* Execute Button */} +
+ + +
+
+
+ + {/* Terminal Container */} +
+
+
+ +

Interactive Terminal

+ {isExecuting && ( + ● Running + )} +
+ + {/* xterm.js terminal container */} +
+ + {/* Input Field */} +
+ $ + setUserInput(e.target.value)} + onKeyPress={handleKeyPress} + disabled={!isExecuting} + placeholder={isExecuting ? "Type response and press Enter (or just press Enter to continue)..." : "Execute a command first"} + className={`flex-1 px-3 py-2 rounded border focus:border-purple-500 focus:outline-none disabled:opacity-50 disabled:cursor-not-allowed font-mono text-sm ${ + isDarkMode + ? 'bg-gray-900 text-white border-gray-700 placeholder-gray-500' + : 'bg-white text-gray-900 border-gray-400 placeholder-gray-400' + }`} + /> + +
+
+
+
+
+ ); +} + +// Render the app +const root = ReactDOM.createRoot(document.getElementById('root')); +root.render(); diff --git a/web_ui/templates/index.html b/web_ui/templates/index.html new file mode 100644 index 000000000..9bcfe4672 --- /dev/null +++ b/web_ui/templates/index.html @@ -0,0 +1,38 @@ + + + + + + Upload Assistant - Web UI + + + + + + + + + + + +
+ + +