mirror of
https://github.com/mkb79/audible-cli.git
synced 2025-04-22 05:37:09 -04:00
Compare commits
55 commits
Author | SHA1 | Date | |
---|---|---|---|
|
d1beda664a | ||
|
70af33a258 | ||
|
629a6ef171 | ||
|
d5d5f3985b | ||
|
1d0972b830 | ||
|
3839a026e8 | ||
|
1a42f1c644 | ||
|
6bc2b6797f | ||
|
1dc419868d | ||
|
08609d6ee2 | ||
|
f6a45c2998 | ||
|
8ee6fc810b | ||
|
87319862a6 | ||
|
194545e2d0 | ||
|
04fe4c2254 | ||
|
e62c0cbf29 | ||
|
fe4bc080e0 | ||
|
fa8012ec4d | ||
|
0fb1de2ce9 | ||
|
c293afb883 | ||
|
ea7226a8b8 | ||
|
b26ef99332 | ||
|
35fa35614d | ||
|
4bd2287222 | ||
|
7cc5e6a4c4 | ||
|
37c582ce70 | ||
|
8c7a2382d2 | ||
|
0731e54184 | ||
|
c54ea7416f | ||
|
ec09d05825 | ||
|
75f832c821 | ||
|
b6993ecce8 | ||
|
8a6f3edcb8 | ||
|
ee70469cac | ||
|
47ba6b7dd8 | ||
|
eabd0f7a43 | ||
|
72c45f5225 | ||
|
8e5f4a7a52 | ||
|
eaaf68e4d3 | ||
|
f7562246a5 | ||
|
0998eb773d | ||
|
f0cd65af2f | ||
|
ddae5f6707 | ||
|
b06426ae57 | ||
|
1cc48ba06d | ||
|
34a01f9084 | ||
|
e29f66ed1d | ||
|
087eafe582 | ||
|
ec0e6d5165 | ||
|
0668c48e31 | ||
|
5398e55fd2 | ||
|
6a6e0e10f2 | ||
|
e85d60055d | ||
|
2c277f0748 | ||
|
0fe30b2ea2 |
25 changed files with 609 additions and 2217 deletions
13
.github/FUNDING.yml
vendored
13
.github/FUNDING.yml
vendored
|
@ -1,13 +0,0 @@
|
|||
# These are supported funding model platforms
|
||||
|
||||
github: [mkb79] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
40
.github/workflows/build.yml
vendored
40
.github/workflows/build.yml
vendored
|
@ -9,12 +9,10 @@ jobs:
|
|||
|
||||
createrelease:
|
||||
name: Create Release
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
release_url: ${{ steps.create-release.outputs.upload_url }}
|
||||
runs-on: [ubuntu-latest]
|
||||
steps:
|
||||
- name: Create Release
|
||||
id: create-release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
@ -23,6 +21,13 @@ jobs:
|
|||
release_name: Release ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
- name: Output Release URL File
|
||||
run: echo "${{ steps.create_release.outputs.upload_url }}" > release_url.txt
|
||||
- name: Save Release URL File for publish
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: release_url
|
||||
path: release_url.txt
|
||||
|
||||
build:
|
||||
name: Build packages
|
||||
|
@ -39,13 +44,13 @@ jobs:
|
|||
zip -r9 audible_linux_ubuntu_latest audible
|
||||
OUT_FILE_NAME: audible_linux_ubuntu_latest.zip
|
||||
ASSET_MIME: application/zip # application/octet-stream
|
||||
- os: ubuntu-20.04
|
||||
- os: ubuntu-18.04
|
||||
TARGET: linux
|
||||
CMD_BUILD: >
|
||||
pyinstaller --clean -F --hidden-import audible_cli -n audible -c pyi_entrypoint.py &&
|
||||
cd dist/ &&
|
||||
zip -r9 audible_linux_ubuntu_20_04 audible
|
||||
OUT_FILE_NAME: audible_linux_ubuntu_20_04.zip
|
||||
zip -r9 audible_linux_ubuntu_18_04 audible
|
||||
OUT_FILE_NAME: audible_linux_ubuntu_18_04.zip
|
||||
ASSET_MIME: application/zip # application/octet-stream
|
||||
- os: macos-latest
|
||||
TARGET: macos
|
||||
|
@ -80,23 +85,34 @@ jobs:
|
|||
OUT_FILE_NAME: audible_win.zip
|
||||
ASSET_MIME: application/zip
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.11
|
||||
python-version: '3.8'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip .[pyi] && pip list
|
||||
- name: Build with pyinstaller for ${{matrix.TARGET}}
|
||||
run: ${{matrix.CMD_BUILD}}
|
||||
- name: Load Release URL File from release job
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: release_url
|
||||
path: release_url
|
||||
- name: Get Release File Name & Upload URL
|
||||
id: get_release_info
|
||||
shell: bash
|
||||
run: |
|
||||
value=`cat release_url/release_url.txt`
|
||||
echo ::set-output name=upload_url::$value
|
||||
- name: Upload Release Asset
|
||||
id: upload-release-asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.createrelease.outputs.release_url }}
|
||||
upload_url: ${{ steps.get_release_info.outputs.upload_url }}
|
||||
asset_path: ./dist/${{ matrix.OUT_FILE_NAME}}
|
||||
asset_name: ${{ matrix.OUT_FILE_NAME}}
|
||||
asset_content_type: ${{ matrix.ASSET_MIME}}
|
||||
|
|
12
.github/workflows/pypi-publish-test.yml
vendored
12
.github/workflows/pypi-publish-test.yml
vendored
|
@ -6,20 +6,20 @@ on:
|
|||
jobs:
|
||||
build-n-publish:
|
||||
name: Build and publish Audible-cli to TestPyPI
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@master
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.11
|
||||
python-version: 3.9
|
||||
- name: Install setuptools and wheel
|
||||
run: pip install --upgrade pip setuptools wheel
|
||||
- name: Build a binary wheel and a source tarball
|
||||
run: python setup.py sdist bdist_wheel
|
||||
- name: Publish distribution to Test PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
with:
|
||||
password: ${{ secrets.TEST_PYPI_API_TOKEN }}
|
||||
repository_url: https://test.pypi.org/legacy/
|
||||
|
|
12
.github/workflows/pypi-publish.yml
vendored
12
.github/workflows/pypi-publish.yml
vendored
|
@ -6,19 +6,19 @@ on:
|
|||
jobs:
|
||||
build-n-publish:
|
||||
name: Build and publish Audible-cli to PyPI
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@master
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.11
|
||||
python-version: 3.9
|
||||
- name: Install setuptools and wheel
|
||||
run: pip install --upgrade pip setuptools wheel
|
||||
- name: Build a binary wheel and a source tarball
|
||||
run: python setup.py sdist bdist_wheel
|
||||
- name: Publish distribution to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
|
136
CHANGELOG.md
136
CHANGELOG.md
|
@ -6,138 +6,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
|||
|
||||
## Unreleased
|
||||
|
||||
### Bugfix
|
||||
|
||||
- Fixing `[Errno 18] Invalid cross-device link` when downloading files using the `--output-dir` option. This error is fixed by creating the resume file on the same location as the target file.
|
||||
|
||||
### Added
|
||||
|
||||
- The `--chapter-type` option is added to the download command. Chapter can now be
|
||||
downloaded as `flat` or `tree` type. `tree` is the default. A default chapter type
|
||||
can be set in the config file.
|
||||
|
||||
### Changed
|
||||
|
||||
- Improved podcast ignore feature in download command
|
||||
- make `--ignore-podcasts` and `--resolve-podcasts` options of download command mutual
|
||||
exclusive
|
||||
- Switched from a HEAD to a GET request without loading the body in the downloader
|
||||
class. This change improves the program's speed, as the HEAD request was taking
|
||||
considerably longer than a GET request on some Audible pages.
|
||||
- `models.LibraryItem.get_content_metadatata` now accept a `chapter_type` argument.
|
||||
Additional keyword arguments to this method are now passed through the metadata
|
||||
request.
|
||||
- Update httpx version range to >=0.23.3 and <0.28.0.
|
||||
- fix typo from `resolve_podcats` to `resolve_podcasts`
|
||||
- `models.Library.resolve_podcats` is now deprecated and will be removed in a future version
|
||||
|
||||
## [0.3.1] - 2024-03-19
|
||||
|
||||
### Bugfix
|
||||
|
||||
- fix a `TypeError` on some Python versions when calling `importlib.metadata.entry_points` with group argument
|
||||
|
||||
## [0.3.0] - 2024-03-19
|
||||
|
||||
### Added
|
||||
|
||||
- Added a resume feature when downloading aaxc files.
|
||||
- New `downlaoder` module which contains a rework of the Downloader class.
|
||||
- If necessary, large audiobooks are now downloaded in parts.
|
||||
- Plugin command help page now contains additional information about the source of
|
||||
the plugin.
|
||||
- Command help text now starts with ´(P)` for plugin commands.
|
||||
|
||||
### Changed
|
||||
|
||||
- Rework plugin module
|
||||
- using importlib.metadata over setuptools (pkg_resources) to get entrypoints
|
||||
|
||||
## [0.2.6] - 2023-11-16
|
||||
|
||||
### Added
|
||||
|
||||
- Update marketplace choices in `manage auth-file add` command. Now all available marketplaces are listed.
|
||||
|
||||
### Bugfix
|
||||
|
||||
- Avoid tqdm progress bar interruption by logger’s output to console.
|
||||
- Fixing an issue with unawaited coroutines when the download command exited abnormal.
|
||||
|
||||
### Changed
|
||||
|
||||
- Update httpx version range to >=0.23.3 and <0.26.0.
|
||||
|
||||
### Misc
|
||||
|
||||
- add `freeze_support` to pyinstaller entry script (#78)
|
||||
|
||||
## [0.2.5] - 2023-09-26
|
||||
|
||||
### Added
|
||||
|
||||
- Dynamically load available marketplaces from the `audible package`. Allows to implement a new marketplace without updating `audible-cli`.
|
||||
|
||||
## [0.2.4] - 2022-09-21
|
||||
|
||||
### Added
|
||||
|
||||
- Allow download multiple cover sizes at once. Each cover size must be provided with the `--cover-size` option
|
||||
|
||||
|
||||
### Changed
|
||||
|
||||
- Rework start_date and end_date option
|
||||
|
||||
### Bugfix
|
||||
|
||||
- In some cases, the purchase date is None. This results in an exception. Now check for purchase date or date added and skip, if date is missing
|
||||
|
||||
## [0.2.3] - 2022-09-06
|
||||
|
||||
### Added
|
||||
|
||||
- `--start-date` and `--end-date` option to `download` command
|
||||
- `--start-date` and `--end-date` option to `library export` and `library list` command
|
||||
- better error handling for license requests
|
||||
- verify that a download link is valid
|
||||
- make sure an item is published before downloading the aax, aaxc or pdf file
|
||||
- `--ignore-errors` flag of the download command now continue, if an item failed to download
|
||||
|
||||
## [0.2.2] - 2022-08-09
|
||||
|
||||
### Bugfix
|
||||
|
||||
- PDFs could not be found using the download command (#112)
|
||||
|
||||
## [0.2.1] - 2022-07-29
|
||||
|
||||
### Added
|
||||
|
||||
- `library` command now outputs the `extended_product_description` field
|
||||
|
||||
### Changed
|
||||
|
||||
- by default a licenserequest (voucher) will not include chapter information by default
|
||||
- moved licenserequest part from `models.LibraryItem.get_aaxc_url` to its own `models.LibraryItem.get_license` function
|
||||
- allow book titles with hyphens (#96)
|
||||
- if there is no title fallback to an empty string (#98)
|
||||
- reduce `response_groups` for the download command to speed up fetching the library (#109)
|
||||
|
||||
### Fixed
|
||||
|
||||
- `Extreme` quality is not supported by the Audible API anymore (#107)
|
||||
- download command continued execution after error (#104)
|
||||
- Currently, paths with dots will break the decryption (#97)
|
||||
- `models.Library.from_api_full_sync` called `models.Library.from_api` with incorrect keyword arguments
|
||||
|
||||
### Misc
|
||||
|
||||
- reworked `cmd_remove-encryption` plugin command (e.g. support nested chapters, use chapter file for aaxc files)
|
||||
- added explanation in README.md for creating a second profile
|
||||
|
||||
## [0.2.0] - 2022-06-01
|
||||
|
||||
### Added
|
||||
|
||||
- `--aax-fallback` option to `download` command to download books in aax format and fallback to aaxc, if the book is not available as aax
|
||||
|
@ -160,7 +28,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
|||
|
||||
### Changed
|
||||
|
||||
- bump `audible` to v0.8.2 to fix a bug in httpx
|
||||
- bump `audible` to v0.8.1
|
||||
- rework plugin examples in `plugin_cmds`
|
||||
- rename `config.Config` to `config.ConfigFile`
|
||||
- move `click_verbosity_logger` from `_logging` to `decorators` and rename it to `verbosity_option`
|
||||
|
@ -191,7 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
|||
### Added
|
||||
|
||||
- the `--version` option now checks if an update for `audible-cli` is available
|
||||
- build macOS releases in `onedir` mode
|
||||
- build macOS releases in onedir mode
|
||||
|
||||
### Bugfix
|
||||
|
||||
|
|
57
README.md
57
README.md
|
@ -13,7 +13,7 @@ It depends on the following packages:
|
|||
* aiofiles
|
||||
* audible
|
||||
* click
|
||||
* colorama (on Windows machines)
|
||||
* colorama (on windows machines)
|
||||
* httpx
|
||||
* Pillow
|
||||
* tabulate
|
||||
|
@ -30,7 +30,7 @@ pip install audible-cli
|
|||
|
||||
```
|
||||
|
||||
or install it directly from GitHub with
|
||||
or install it directly from github with
|
||||
|
||||
```shell
|
||||
|
||||
|
@ -40,25 +40,18 @@ pip install .
|
|||
|
||||
```
|
||||
|
||||
or as the best solution using [pipx](https://pipx.pypa.io/stable/)
|
||||
|
||||
```shell
|
||||
|
||||
pipx install audible-cli
|
||||
|
||||
```
|
||||
|
||||
## Standalone executables
|
||||
|
||||
If you don't want to install `Python` and `audible-cli` on your machine, you can
|
||||
find standalone exe files below or on the [releases](https://github.com/mkb79/audible-cli/releases)
|
||||
page (including beta releases). At this moment Windows, Linux and macOS are supported.
|
||||
page. At this moment Windows, Linux and MacOS are supported.
|
||||
|
||||
### Links
|
||||
|
||||
1. Linux
|
||||
- [debian 11 onefile](https://github.com/mkb79/audible-cli/releases/latest/download/audible_linux_debian_11.zip)
|
||||
- [ubuntu latest onefile](https://github.com/mkb79/audible-cli/releases/latest/download/audible_linux_ubuntu_latest.zip)
|
||||
- [ubuntu 20.04 onefile](https://github.com/mkb79/audible-cli/releases/latest/download/audible_linux_ubuntu_20_04.zip)
|
||||
- [ubuntu 18.04 onefile](https://github.com/mkb79/audible-cli/releases/latest/download/audible_linux_ubuntu_18_04.zip)
|
||||
|
||||
2. macOS
|
||||
- [macOS latest onefile](https://github.com/mkb79/audible-cli/releases/latest/download/audible_mac.zip)
|
||||
|
@ -89,7 +82,7 @@ pyinstaller --clean -D --hidden-import audible_cli -n audible -c pyi_entrypoint
|
|||
|
||||
### Hints
|
||||
|
||||
There are some limitations when using plugins. The binary maybe does not contain
|
||||
There are some limitations when using plugins. The binarys maybe does not contain
|
||||
all the dependencies from your plugin script.
|
||||
|
||||
## Tab Completion
|
||||
|
@ -110,7 +103,7 @@ as config dir. Otherwise, it will use a folder depending on the operating
|
|||
system.
|
||||
|
||||
| OS | Path |
|
||||
|----------|-------------------------------------------|
|
||||
| --- | --- |
|
||||
| Windows | ``C:\Users\<user>\AppData\Local\audible`` |
|
||||
| Unix | ``~/.audible`` |
|
||||
| Mac OS X | ``~/.audible`` |
|
||||
|
@ -154,11 +147,7 @@ The APP section supports the following options:
|
|||
- primary_profile: The profile to use, if no other is specified
|
||||
- filename_mode: When using the `download` command, a filename mode can be
|
||||
specified here. If not present, "ascii" will be used as default. To override
|
||||
these option, you can provide a mode with the `--filename-mode` option of the
|
||||
download command.
|
||||
- chapter_type: When using the `download` command, a chapter type can be specified
|
||||
here. If not present, "tree" will be used as default. To override
|
||||
these option, you can provide a type with the `--chapter-type` option of the
|
||||
these option, you can provide a mode with the `filename-mode` option of the
|
||||
download command.
|
||||
|
||||
#### Profile section
|
||||
|
@ -166,7 +155,6 @@ The APP section supports the following options:
|
|||
- auth_file: The auth file for this profile
|
||||
- country_code: The marketplace for this profile
|
||||
- filename_mode: See APP section above. Will override the option in APP section.
|
||||
- chapter_type: See APP section above. Will override the option in APP section.
|
||||
|
||||
## Getting started
|
||||
|
||||
|
@ -174,14 +162,6 @@ Use the `audible-quickstart` or `audible quickstart` command in your shell
|
|||
to create your first config, profile and auth file. `audible-quickstart`
|
||||
runs on the interactive mode, so you have to answer multiple questions to finish.
|
||||
|
||||
If you have used `audible quickstart` and want to add a second profile, you need to first create a new authfile and then update your config.toml file.
|
||||
|
||||
So the correct order is:
|
||||
|
||||
1. add a new auth file using your second account using `audible manage auth-file add`
|
||||
2. add a new profile to your config and use the second auth file using `audible manage profile add`
|
||||
|
||||
|
||||
## Commands
|
||||
|
||||
Call `audible -h` to show the help and a list of all available subcommands. You can show the help for each subcommand like so: `audible <subcommand> -h`. If a subcommand has another subcommands, you csn do it the same way.
|
||||
|
@ -211,17 +191,6 @@ At this time, there the following buildin subcommands:
|
|||
- `add`
|
||||
- `remove`
|
||||
|
||||
## Example Usage
|
||||
|
||||
To download all of your audiobooks in the aaxc format use:
|
||||
```shell
|
||||
audible download --all --aaxc
|
||||
```
|
||||
To download all of your audiobooks after the Date 2022-07-21 in aax format use:
|
||||
```shell
|
||||
audible download --start-date "2022-07-21" --aax --all
|
||||
```
|
||||
|
||||
## Verbosity option
|
||||
|
||||
There are 6 different verbosity levels:
|
||||
|
@ -232,9 +201,9 @@ There are 6 different verbosity levels:
|
|||
- error
|
||||
- critical
|
||||
|
||||
By default, the verbosity level is set to `info`. You can provide another level like so: `audible -v <level> <subcommand> ...`.
|
||||
By default the verbosity level is set to `info`. You can provide another level like so: `audible -v <level> <subcommand> ...`.
|
||||
|
||||
If you use the `download` subcommand with the `--all` flag there will be a huge output. Best practise is to set the verbosity level to `error` with `audible -v error download --all ...`
|
||||
If you use the `download` sudcommand with the `--all` flag there will be a huge output. Best practise is to set the verbosity level to `error` with `audible -v error download --all ...`
|
||||
|
||||
## Plugins
|
||||
|
||||
|
@ -250,13 +219,13 @@ You can provide own subcommands and execute them with `audible SUBCOMMAND`.
|
|||
All plugin commands must be placed in the plugin folder. Every subcommand must
|
||||
have his own file. Every file have to be named ``cmd_{SUBCOMMAND}.py``.
|
||||
Each subcommand file must have a function called `cli` as entrypoint.
|
||||
This function has to be decorated with ``@click.group(name="GROUP_NAME")`` or
|
||||
This function have to be decorated with ``@click.group(name="GROUP_NAME")`` or
|
||||
``@click.command(name="GROUP_NAME")``.
|
||||
|
||||
Relative imports in the command files doesn't work. So you have to work with
|
||||
absolute imports. Please take care about this. If you have any issues with
|
||||
absolute imports please add your plugin path to the `PYTHONPATH` variable or
|
||||
add this lines of code to the beginning of your command script:
|
||||
add this lines of code to the begining of your command script:
|
||||
|
||||
```python
|
||||
import sys
|
||||
|
@ -272,7 +241,7 @@ Examples can be found
|
|||
|
||||
If you want to develop a complete plugin package for ``audible-cli`` you can
|
||||
do this on an easy way. You only need to register your sub-commands or
|
||||
subgroups to an entry-point in your setup.py that is loaded by the core
|
||||
sub-groups to an entry-point in your setup.py that is loaded by the core
|
||||
package.
|
||||
|
||||
Example for a setup.py
|
||||
|
|
|
@ -1,683 +0,0 @@
|
|||
"""Removes encryption of aax and aaxc files.
|
||||
|
||||
This is a proof-of-concept and for testing purposes only.
|
||||
|
||||
No error handling.
|
||||
Need further work. Some options do not work or options are missing.
|
||||
|
||||
Needs at least ffmpeg 4.4
|
||||
"""
|
||||
|
||||
|
||||
import json
|
||||
import operator
|
||||
import pathlib
|
||||
import re
|
||||
import subprocess # noqa: S404
|
||||
import tempfile
|
||||
import typing as t
|
||||
from enum import Enum
|
||||
from functools import reduce
|
||||
from glob import glob
|
||||
from shutil import which
|
||||
|
||||
import click
|
||||
from click import echo, secho
|
||||
|
||||
from audible_cli.decorators import pass_session
|
||||
from audible_cli.exceptions import AudibleCliException
|
||||
|
||||
|
||||
class ChapterError(AudibleCliException):
|
||||
"""Base class for all chapter errors."""
|
||||
|
||||
|
||||
class SupportedFiles(Enum):
|
||||
AAX = ".aax"
|
||||
AAXC = ".aaxc"
|
||||
|
||||
@classmethod
|
||||
def get_supported_list(cls):
|
||||
return list(set(item.value for item in cls))
|
||||
|
||||
@classmethod
|
||||
def is_supported_suffix(cls, value):
|
||||
return value in cls.get_supported_list()
|
||||
|
||||
@classmethod
|
||||
def is_supported_file(cls, value):
|
||||
return pathlib.PurePath(value).suffix in cls.get_supported_list()
|
||||
|
||||
|
||||
def _get_input_files(
|
||||
files: t.Union[t.Tuple[str], t.List[str]],
|
||||
recursive: bool = True
|
||||
) -> t.List[pathlib.Path]:
|
||||
filenames = []
|
||||
for filename in files:
|
||||
# if the shell does not do filename globbing
|
||||
expanded = list(glob(filename, recursive=recursive))
|
||||
|
||||
if (
|
||||
len(expanded) == 0
|
||||
and '*' not in filename
|
||||
and not SupportedFiles.is_supported_file(filename)
|
||||
):
|
||||
raise click.BadParameter("{filename}: file not found or supported.")
|
||||
|
||||
expanded_filter = filter(
|
||||
lambda x: SupportedFiles.is_supported_file(x), expanded
|
||||
)
|
||||
expanded = list(map(lambda x: pathlib.Path(x).resolve(), expanded_filter))
|
||||
filenames.extend(expanded)
|
||||
|
||||
return filenames
|
||||
|
||||
|
||||
def recursive_lookup_dict(key: str, dictionary: t.Dict[str, t.Any]) -> t.Any:
|
||||
if key in dictionary:
|
||||
return dictionary[key]
|
||||
for value in dictionary.values():
|
||||
if isinstance(value, dict):
|
||||
try:
|
||||
item = recursive_lookup_dict(key, value)
|
||||
except KeyError:
|
||||
continue
|
||||
else:
|
||||
return item
|
||||
|
||||
raise KeyError
|
||||
|
||||
|
||||
def get_aaxc_credentials(voucher_file: pathlib.Path):
|
||||
if not voucher_file.exists() or not voucher_file.is_file():
|
||||
raise AudibleCliException(f"Voucher file {voucher_file} not found.")
|
||||
|
||||
voucher_dict = json.loads(voucher_file.read_text())
|
||||
try:
|
||||
key = recursive_lookup_dict("key", voucher_dict)
|
||||
iv = recursive_lookup_dict("iv", voucher_dict)
|
||||
except KeyError:
|
||||
raise AudibleCliException(f"No key/iv found in file {voucher_file}.") from None
|
||||
|
||||
return key, iv
|
||||
|
||||
|
||||
class ApiChapterInfo:
|
||||
def __init__(self, content_metadata: t.Dict[str, t.Any]) -> None:
|
||||
chapter_info = self._parse(content_metadata)
|
||||
self._chapter_info = chapter_info
|
||||
|
||||
@classmethod
|
||||
def from_file(cls, file: t.Union[pathlib.Path, str]) -> "ApiChapterInfo":
|
||||
file = pathlib.Path(file)
|
||||
if not file.exists() or not file.is_file():
|
||||
raise ChapterError(f"Chapter file {file} not found.")
|
||||
content_string = pathlib.Path(file).read_text("utf-8")
|
||||
content_json = json.loads(content_string)
|
||||
return cls(content_json)
|
||||
|
||||
@staticmethod
|
||||
def _parse(content_metadata: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
|
||||
if "chapters" in content_metadata:
|
||||
return content_metadata
|
||||
|
||||
try:
|
||||
return recursive_lookup_dict("chapter_info", content_metadata)
|
||||
except KeyError:
|
||||
raise ChapterError("No chapter info found.") from None
|
||||
|
||||
def count_chapters(self):
|
||||
return len(self.get_chapters())
|
||||
|
||||
def get_chapters(self, separate_intro_outro=False, remove_intro_outro=False):
|
||||
def extract_chapters(initial, current):
|
||||
if "chapters" in current:
|
||||
return initial + [current] + current["chapters"]
|
||||
else:
|
||||
return initial + [current]
|
||||
|
||||
chapters = list(
|
||||
reduce(
|
||||
extract_chapters,
|
||||
self._chapter_info["chapters"],
|
||||
[],
|
||||
)
|
||||
)
|
||||
|
||||
if separate_intro_outro:
|
||||
return self._separate_intro_outro(chapters)
|
||||
elif remove_intro_outro:
|
||||
return self._remove_intro_outro(chapters)
|
||||
|
||||
return chapters
|
||||
|
||||
def get_intro_duration_ms(self):
|
||||
return self._chapter_info["brandIntroDurationMs"]
|
||||
|
||||
def get_outro_duration_ms(self):
|
||||
return self._chapter_info["brandOutroDurationMs"]
|
||||
|
||||
def get_runtime_length_ms(self):
|
||||
return self._chapter_info["runtime_length_ms"]
|
||||
|
||||
def is_accurate(self):
|
||||
return self._chapter_info["is_accurate"]
|
||||
|
||||
def _separate_intro_outro(self, chapters):
|
||||
echo("Separate Audible Brand Intro and Outro to own Chapter.")
|
||||
chapters.sort(key=operator.itemgetter("start_offset_ms"))
|
||||
|
||||
first = chapters[0]
|
||||
intro_dur_ms = self.get_intro_duration_ms()
|
||||
first["start_offset_ms"] = intro_dur_ms
|
||||
first["start_offset_sec"] = round(first["start_offset_ms"] / 1000)
|
||||
first["length_ms"] -= intro_dur_ms
|
||||
|
||||
last = chapters[-1]
|
||||
outro_dur_ms = self.get_outro_duration_ms()
|
||||
last["length_ms"] -= outro_dur_ms
|
||||
|
||||
chapters.append(
|
||||
{
|
||||
"length_ms": intro_dur_ms,
|
||||
"start_offset_ms": 0,
|
||||
"start_offset_sec": 0,
|
||||
"title": "Intro",
|
||||
}
|
||||
)
|
||||
chapters.append(
|
||||
{
|
||||
"length_ms": outro_dur_ms,
|
||||
"start_offset_ms": self.get_runtime_length_ms() - outro_dur_ms,
|
||||
"start_offset_sec": round(
|
||||
(self.get_runtime_length_ms() - outro_dur_ms) / 1000
|
||||
),
|
||||
"title": "Outro",
|
||||
}
|
||||
)
|
||||
chapters.sort(key=operator.itemgetter("start_offset_ms"))
|
||||
|
||||
return chapters
|
||||
|
||||
def _remove_intro_outro(self, chapters):
|
||||
echo("Delete Audible Brand Intro and Outro.")
|
||||
chapters.sort(key=operator.itemgetter("start_offset_ms"))
|
||||
|
||||
intro_dur_ms = self.get_intro_duration_ms()
|
||||
outro_dur_ms = self.get_outro_duration_ms()
|
||||
|
||||
first = chapters[0]
|
||||
first["length_ms"] -= intro_dur_ms
|
||||
|
||||
for chapter in chapters[1:]:
|
||||
chapter["start_offset_ms"] -= intro_dur_ms
|
||||
chapter["start_offset_sec"] -= round(chapter["start_offset_ms"] / 1000)
|
||||
|
||||
last = chapters[-1]
|
||||
last["length_ms"] -= outro_dur_ms
|
||||
|
||||
return chapters
|
||||
|
||||
class FFMeta:
|
||||
SECTION = re.compile(r"\[(?P<header>[^]]+)\]")
|
||||
OPTION = re.compile(r"(?P<option>.*?)\s*(?:(?P<vi>=)\s*(?P<value>.*))?$")
|
||||
|
||||
def __init__(self, ffmeta_file: t.Union[str, pathlib.Path]) -> None:
|
||||
self._ffmeta_raw = pathlib.Path(ffmeta_file).read_text("utf-8")
|
||||
self._ffmeta_parsed = self._parse_ffmeta()
|
||||
|
||||
def _parse_ffmeta(self):
|
||||
parsed_dict = {}
|
||||
start_section = "_"
|
||||
cursec = parsed_dict[start_section] = {}
|
||||
num_chap = 0
|
||||
|
||||
for line in iter(self._ffmeta_raw.splitlines()):
|
||||
mo = self.SECTION.match(line)
|
||||
if mo:
|
||||
sec_name = mo.group("header")
|
||||
if sec_name == "CHAPTER":
|
||||
num_chap += 1
|
||||
if sec_name not in parsed_dict:
|
||||
parsed_dict[sec_name] = {}
|
||||
cursec = parsed_dict[sec_name][num_chap] = {}
|
||||
else:
|
||||
cursec = parsed_dict[sec_name] = {}
|
||||
else:
|
||||
match = self.OPTION.match(line)
|
||||
cursec.update({match.group("option"): match.group("value")})
|
||||
|
||||
return parsed_dict
|
||||
|
||||
def count_chapters(self):
|
||||
return len(self._ffmeta_parsed["CHAPTER"])
|
||||
|
||||
def set_chapter_option(self, num, option, value):
|
||||
chapter = self._ffmeta_parsed["CHAPTER"][num]
|
||||
for chapter_option in chapter:
|
||||
if chapter_option == option:
|
||||
chapter[chapter_option] = value
|
||||
|
||||
def write(self, filename):
|
||||
fp = pathlib.Path(filename).open("w", encoding="utf-8")
|
||||
d = "="
|
||||
|
||||
for section in self._ffmeta_parsed:
|
||||
if section == "_":
|
||||
self._write_section(fp, None, self._ffmeta_parsed[section], d)
|
||||
elif section == "CHAPTER":
|
||||
# TODO: Tue etwas
|
||||
for chapter in self._ffmeta_parsed[section]:
|
||||
self._write_section(
|
||||
fp, section, self._ffmeta_parsed[section][chapter], d
|
||||
)
|
||||
else:
|
||||
self._write_section(fp, section, self._ffmeta_parsed[section], d)
|
||||
|
||||
@staticmethod
|
||||
def _write_section(fp, section_name, section_items, delimiter):
|
||||
"""Write a single section to the specified `fp`."""
|
||||
if section_name is not None:
|
||||
fp.write(f"[{section_name}]\n")
|
||||
|
||||
for key, value in section_items.items():
|
||||
if value is None:
|
||||
fp.write(f"{key}\n")
|
||||
else:
|
||||
fp.write(f"{key}{delimiter}{value}\n")
|
||||
|
||||
def update_chapters_from_chapter_info(
|
||||
self,
|
||||
chapter_info: ApiChapterInfo,
|
||||
force_rebuild_chapters: bool = False,
|
||||
separate_intro_outro: bool = False,
|
||||
remove_intro_outro: bool = False
|
||||
) -> None:
|
||||
if not chapter_info.is_accurate():
|
||||
echo("Metadata from API is not accurate. Skip.")
|
||||
return
|
||||
|
||||
if chapter_info.count_chapters() != self.count_chapters():
|
||||
if force_rebuild_chapters:
|
||||
echo("Force rebuild chapters due to chapter mismatch.")
|
||||
else:
|
||||
raise ChapterError("Chapter mismatch")
|
||||
|
||||
echo(f"Found {chapter_info.count_chapters()} chapters to prepare.")
|
||||
|
||||
api_chapters = chapter_info.get_chapters(separate_intro_outro, remove_intro_outro)
|
||||
|
||||
num_chap = 0
|
||||
new_chapters = {}
|
||||
for chapter in api_chapters:
|
||||
chap_start = chapter["start_offset_ms"]
|
||||
chap_end = chap_start + chapter["length_ms"]
|
||||
num_chap += 1
|
||||
new_chapters[num_chap] = {
|
||||
"TIMEBASE": "1/1000",
|
||||
"START": chap_start,
|
||||
"END": chap_end,
|
||||
"title": chapter["title"],
|
||||
}
|
||||
self._ffmeta_parsed["CHAPTER"] = new_chapters
|
||||
|
||||
def get_start_end_without_intro_outro(
|
||||
self,
|
||||
chapter_info: ApiChapterInfo,
|
||||
):
|
||||
intro_dur_ms = chapter_info.get_intro_duration_ms()
|
||||
outro_dur_ms = chapter_info.get_outro_duration_ms()
|
||||
total_runtime_ms = chapter_info.get_runtime_length_ms()
|
||||
|
||||
start_new = intro_dur_ms
|
||||
duration_new = total_runtime_ms - intro_dur_ms - outro_dur_ms
|
||||
|
||||
return start_new, duration_new
|
||||
|
||||
|
||||
|
||||
def _get_voucher_filename(file: pathlib.Path) -> pathlib.Path:
|
||||
return file.with_suffix(".voucher")
|
||||
|
||||
|
||||
def _get_chapter_filename(file: pathlib.Path) -> pathlib.Path:
|
||||
base_filename = file.stem.rsplit("-", 1)[0]
|
||||
return file.with_name(base_filename + "-chapters.json")
|
||||
|
||||
|
||||
def _get_ffmeta_file(file: pathlib.Path, tempdir: pathlib.Path) -> pathlib.Path:
|
||||
metaname = file.with_suffix(".meta").name
|
||||
metafile = tempdir / metaname
|
||||
return metafile
|
||||
|
||||
|
||||
class FfmpegFileDecrypter:
|
||||
def __init__(
|
||||
self,
|
||||
file: pathlib.Path,
|
||||
target_dir: pathlib.Path,
|
||||
tempdir: pathlib.Path,
|
||||
activation_bytes: t.Optional[str],
|
||||
overwrite: bool,
|
||||
rebuild_chapters: bool,
|
||||
force_rebuild_chapters: bool,
|
||||
skip_rebuild_chapters: bool,
|
||||
separate_intro_outro: bool,
|
||||
remove_intro_outro: bool
|
||||
) -> None:
|
||||
file_type = SupportedFiles(file.suffix)
|
||||
|
||||
credentials = None
|
||||
if file_type == SupportedFiles.AAX:
|
||||
if activation_bytes is None:
|
||||
raise AudibleCliException(
|
||||
"No activation bytes found. Do you ever run "
|
||||
"`audible activation-bytes`?"
|
||||
)
|
||||
credentials = activation_bytes
|
||||
elif file_type == SupportedFiles.AAXC:
|
||||
voucher_filename = _get_voucher_filename(file)
|
||||
credentials = get_aaxc_credentials(voucher_filename)
|
||||
|
||||
self._source = file
|
||||
self._credentials: t.Optional[t.Union[str, t.Tuple[str]]] = credentials
|
||||
self._target_dir = target_dir
|
||||
self._tempdir = tempdir
|
||||
self._overwrite = overwrite
|
||||
self._rebuild_chapters = rebuild_chapters
|
||||
self._force_rebuild_chapters = force_rebuild_chapters
|
||||
self._skip_rebuild_chapters = skip_rebuild_chapters
|
||||
self._separate_intro_outro = separate_intro_outro
|
||||
self._remove_intro_outro = remove_intro_outro
|
||||
self._api_chapter: t.Optional[ApiChapterInfo] = None
|
||||
self._ffmeta: t.Optional[FFMeta] = None
|
||||
self._is_rebuilded: bool = False
|
||||
|
||||
@property
|
||||
def api_chapter(self) -> ApiChapterInfo:
|
||||
if self._api_chapter is None:
|
||||
try:
|
||||
voucher_filename = _get_voucher_filename(self._source)
|
||||
self._api_chapter = ApiChapterInfo.from_file(voucher_filename)
|
||||
except ChapterError:
|
||||
voucher_filename = _get_chapter_filename(self._source)
|
||||
self._api_chapter = ApiChapterInfo.from_file(voucher_filename)
|
||||
echo(f"Using chapters from {voucher_filename}")
|
||||
return self._api_chapter
|
||||
|
||||
@property
|
||||
def ffmeta(self) -> FFMeta:
|
||||
if self._ffmeta is None:
|
||||
metafile = _get_ffmeta_file(self._source, self._tempdir)
|
||||
|
||||
base_cmd = [
|
||||
"ffmpeg",
|
||||
"-v",
|
||||
"quiet",
|
||||
"-stats",
|
||||
]
|
||||
if isinstance(self._credentials, tuple):
|
||||
key, iv = self._credentials
|
||||
credentials_cmd = [
|
||||
"-audible_key",
|
||||
key,
|
||||
"-audible_iv",
|
||||
iv,
|
||||
]
|
||||
else:
|
||||
credentials_cmd = [
|
||||
"-activation_bytes",
|
||||
self._credentials,
|
||||
]
|
||||
base_cmd.extend(credentials_cmd)
|
||||
|
||||
extract_cmd = [
|
||||
"-i",
|
||||
str(self._source),
|
||||
"-f",
|
||||
"ffmetadata",
|
||||
str(metafile),
|
||||
]
|
||||
base_cmd.extend(extract_cmd)
|
||||
|
||||
subprocess.check_output(base_cmd, text=True) # noqa: S603
|
||||
self._ffmeta = FFMeta(metafile)
|
||||
|
||||
return self._ffmeta
|
||||
|
||||
def rebuild_chapters(self) -> None:
|
||||
if not self._is_rebuilded:
|
||||
self.ffmeta.update_chapters_from_chapter_info(
|
||||
self.api_chapter, self._force_rebuild_chapters, self._separate_intro_outro, self._remove_intro_outro
|
||||
)
|
||||
self._is_rebuilded = True
|
||||
|
||||
def run(self):
|
||||
oname = self._source.with_suffix(".m4b").name
|
||||
outfile = self._target_dir / oname
|
||||
|
||||
if outfile.exists():
|
||||
if self._overwrite:
|
||||
secho(f"Overwrite {outfile}: already exists", fg="blue")
|
||||
else:
|
||||
secho(f"Skip {outfile}: already exists", fg="blue")
|
||||
return
|
||||
|
||||
base_cmd = [
|
||||
"ffmpeg",
|
||||
"-v",
|
||||
"quiet",
|
||||
"-stats",
|
||||
]
|
||||
if self._overwrite:
|
||||
base_cmd.append("-y")
|
||||
if isinstance(self._credentials, tuple):
|
||||
key, iv = self._credentials
|
||||
credentials_cmd = [
|
||||
"-audible_key",
|
||||
key,
|
||||
"-audible_iv",
|
||||
iv,
|
||||
]
|
||||
else:
|
||||
credentials_cmd = [
|
||||
"-activation_bytes",
|
||||
self._credentials,
|
||||
]
|
||||
base_cmd.extend(credentials_cmd)
|
||||
|
||||
if self._rebuild_chapters:
|
||||
metafile = _get_ffmeta_file(self._source, self._tempdir)
|
||||
try:
|
||||
self.rebuild_chapters()
|
||||
self.ffmeta.write(metafile)
|
||||
except ChapterError:
|
||||
if self._skip_rebuild_chapters:
|
||||
echo("Skip rebuild chapters due to chapter mismatch.")
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
if self._remove_intro_outro:
|
||||
start_new, duration_new = self.ffmeta.get_start_end_without_intro_outro(self.api_chapter)
|
||||
|
||||
base_cmd.extend(
|
||||
[
|
||||
"-ss",
|
||||
f"{start_new}ms",
|
||||
"-t",
|
||||
f"{duration_new}ms",
|
||||
"-i",
|
||||
str(self._source),
|
||||
"-i",
|
||||
str(metafile),
|
||||
"-map_metadata",
|
||||
"0",
|
||||
"-map_chapters",
|
||||
"1",
|
||||
]
|
||||
)
|
||||
else:
|
||||
base_cmd.extend(
|
||||
[
|
||||
"-i",
|
||||
str(self._source),
|
||||
"-i",
|
||||
str(metafile),
|
||||
"-map_metadata",
|
||||
"0",
|
||||
"-map_chapters",
|
||||
"1",
|
||||
]
|
||||
)
|
||||
else:
|
||||
base_cmd.extend(
|
||||
[
|
||||
"-i",
|
||||
str(self._source),
|
||||
]
|
||||
)
|
||||
|
||||
base_cmd.extend(
|
||||
[
|
||||
"-c",
|
||||
"copy",
|
||||
str(outfile),
|
||||
]
|
||||
)
|
||||
|
||||
subprocess.check_output(base_cmd, text=True) # noqa: S603
|
||||
|
||||
echo(f"File decryption successful: {outfile}")
|
||||
|
||||
@click.command("decrypt")
|
||||
@click.argument("files", nargs=-1)
|
||||
@click.option(
|
||||
"--dir",
|
||||
"-d",
|
||||
"directory",
|
||||
type=click.Path(exists=True, dir_okay=True),
|
||||
default=pathlib.Path.cwd(),
|
||||
help="Folder where the decrypted files should be saved.",
|
||||
show_default=True
|
||||
)
|
||||
@click.option(
|
||||
"--all",
|
||||
"-a",
|
||||
"all_",
|
||||
is_flag=True,
|
||||
help="Decrypt all aax and aaxc files in current folder."
|
||||
)
|
||||
@click.option("--overwrite", is_flag=True, help="Overwrite existing files.")
|
||||
@click.option(
|
||||
"--rebuild-chapters",
|
||||
"-r",
|
||||
is_flag=True,
|
||||
help="Rebuild chapters with chapters from voucher or chapter file."
|
||||
)
|
||||
@click.option(
|
||||
"--force-rebuild-chapters",
|
||||
"-f",
|
||||
is_flag=True,
|
||||
help=(
|
||||
"Force rebuild chapters with chapters from voucher or chapter file "
|
||||
"if the built-in chapters in the audio file mismatch. "
|
||||
"Only use with `--rebuild-chapters`."
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--skip-rebuild-chapters",
|
||||
"-t",
|
||||
is_flag=True,
|
||||
help=(
|
||||
"Decrypt without rebuilding chapters when chapters mismatch. "
|
||||
"Only use with `--rebuild-chapters`."
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--separate-intro-outro",
|
||||
"-s",
|
||||
is_flag=True,
|
||||
help=(
|
||||
"Separate Audible Brand Intro and Outro to own Chapter. "
|
||||
"Only use with `--rebuild-chapters`."
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--remove-intro-outro",
|
||||
"-c",
|
||||
is_flag=True,
|
||||
help=(
|
||||
"Remove Audible Brand Intro and Outro. "
|
||||
"Only use with `--rebuild-chapters`."
|
||||
),
|
||||
)
|
||||
@pass_session
|
||||
def cli(
|
||||
session,
|
||||
files: str,
|
||||
directory: t.Union[pathlib.Path, str],
|
||||
all_: bool,
|
||||
overwrite: bool,
|
||||
rebuild_chapters: bool,
|
||||
force_rebuild_chapters: bool,
|
||||
skip_rebuild_chapters: bool,
|
||||
separate_intro_outro: bool,
|
||||
remove_intro_outro: bool,
|
||||
):
|
||||
"""Decrypt audiobooks downloaded with audible-cli.
|
||||
|
||||
FILES are the names of the file to decrypt.
|
||||
Wildcards `*` and recursive lookup with `**` are supported.
|
||||
|
||||
Only FILES with `aax` or `aaxc` suffix are processed.
|
||||
Other files are skipped silently.
|
||||
"""
|
||||
if not which("ffmpeg"):
|
||||
ctx = click.get_current_context()
|
||||
ctx.fail("ffmpeg not found")
|
||||
|
||||
if (force_rebuild_chapters or skip_rebuild_chapters or separate_intro_outro or remove_intro_outro) and not rebuild_chapters:
|
||||
raise click.BadOptionUsage(
|
||||
"",
|
||||
"`--force-rebuild-chapters`, `--skip-rebuild-chapters`, `--separate-intro-outro` "
|
||||
"and `--remove-intro-outro` can only be used together with `--rebuild-chapters`"
|
||||
)
|
||||
|
||||
if force_rebuild_chapters and skip_rebuild_chapters:
|
||||
raise click.BadOptionUsage(
|
||||
"",
|
||||
"`--force-rebuild-chapters` and `--skip-rebuild-chapters` can "
|
||||
"not be used together"
|
||||
)
|
||||
|
||||
if separate_intro_outro and remove_intro_outro:
|
||||
raise click.BadOptionUsage(
|
||||
"",
|
||||
"`--separate-intro-outro` and `--remove-intro-outro` can not be used together"
|
||||
)
|
||||
|
||||
if all_:
|
||||
if files:
|
||||
raise click.BadOptionUsage(
|
||||
"",
|
||||
"If using `--all`, no FILES arguments can be used."
|
||||
)
|
||||
files = [f"*{suffix}" for suffix in SupportedFiles.get_supported_list()]
|
||||
|
||||
files = _get_input_files(files, recursive=True)
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
for file in files:
|
||||
decrypter = FfmpegFileDecrypter(
|
||||
file=file,
|
||||
target_dir=pathlib.Path(directory).resolve(),
|
||||
tempdir=pathlib.Path(tempdir).resolve(),
|
||||
activation_bytes=session.auth.activation_bytes,
|
||||
overwrite=overwrite,
|
||||
rebuild_chapters=rebuild_chapters,
|
||||
force_rebuild_chapters=force_rebuild_chapters,
|
||||
skip_rebuild_chapters=skip_rebuild_chapters,
|
||||
separate_intro_outro=separate_intro_outro,
|
||||
remove_intro_outro=remove_intro_outro
|
||||
)
|
||||
decrypter.run()
|
332
plugin_cmds/cmd_remove-encryption.py
Normal file
332
plugin_cmds/cmd_remove-encryption.py
Normal file
|
@ -0,0 +1,332 @@
|
|||
"""
|
||||
This is a proof-of-concept and for testing purposes only. No error handling.
|
||||
Need further work. Some options does not work or options are missing.
|
||||
|
||||
Needs at least ffmpeg 4.4
|
||||
"""
|
||||
|
||||
|
||||
import json
|
||||
import operator
|
||||
import pathlib
|
||||
import re
|
||||
import subprocess
|
||||
from shutil import which
|
||||
|
||||
import click
|
||||
from audible_cli.decorators import pass_session
|
||||
from click import echo, secho
|
||||
|
||||
|
||||
class ApiMeta:
|
||||
def __init__(self, api_meta):
|
||||
if not isinstance(api_meta, dict):
|
||||
api_meta = pathlib.Path(api_meta).read_text("utf-8")
|
||||
self._meta_raw = api_meta
|
||||
self._meta_parsed = self._parse_meta()
|
||||
|
||||
def _parse_meta(self):
|
||||
if isinstance(self._meta_raw, dict):
|
||||
return self._meta_raw
|
||||
return json.loads(self._meta_raw)
|
||||
|
||||
def count_chapters(self):
|
||||
return len(self.get_chapters())
|
||||
|
||||
def get_chapters(self):
|
||||
return self._meta_parsed["content_metadata"]["chapter_info"][
|
||||
"chapters"]
|
||||
|
||||
def get_intro_duration_ms(self):
|
||||
return self._meta_parsed["content_metadata"]["chapter_info"][
|
||||
"brandIntroDurationMs"]
|
||||
|
||||
def get_outro_duration_ms(self):
|
||||
return self._meta_parsed["content_metadata"]["chapter_info"][
|
||||
"brandOutroDurationMs"]
|
||||
|
||||
def get_runtime_length_ms(self):
|
||||
return self._meta_parsed["content_metadata"]["chapter_info"][
|
||||
"runtime_length_ms"]
|
||||
|
||||
def is_accurate(self):
|
||||
return self._meta_parsed["content_metadata"]["chapter_info"][
|
||||
"is_accurate"]
|
||||
|
||||
|
||||
class FFMeta:
|
||||
SECTION = re.compile(r"\[(?P<header>[^]]+)\]")
|
||||
OPTION = re.compile(r"(?P<option>.*?)\s*(?:(?P<vi>=)\s*(?P<value>.*))?$")
|
||||
|
||||
def __init__(self, ffmeta_file):
|
||||
self._ffmeta_raw = pathlib.Path(ffmeta_file).read_text("utf-8")
|
||||
self._ffmeta_parsed = self._parse_ffmeta()
|
||||
|
||||
def _parse_ffmeta(self):
|
||||
parsed_dict = {}
|
||||
start_section = "_"
|
||||
cursec = parsed_dict[start_section] = {}
|
||||
num_chap = 0
|
||||
|
||||
for line in iter(self._ffmeta_raw.splitlines()):
|
||||
mo = self.SECTION.match(line)
|
||||
if mo:
|
||||
sec_name = mo.group("header")
|
||||
if sec_name == "CHAPTER":
|
||||
num_chap += 1
|
||||
if sec_name not in parsed_dict:
|
||||
parsed_dict[sec_name] = {}
|
||||
cursec = parsed_dict[sec_name][num_chap] = {}
|
||||
else:
|
||||
cursec = parsed_dict[sec_name] = {}
|
||||
else:
|
||||
match = self.OPTION.match(line)
|
||||
cursec.update({match.group("option"): match.group("value")})
|
||||
|
||||
return parsed_dict
|
||||
|
||||
def count_chapters(self):
|
||||
return len(self._ffmeta_parsed["CHAPTER"])
|
||||
|
||||
def set_chapter_option(self, num, option, value):
|
||||
chapter = self._ffmeta_parsed["CHAPTER"][num]
|
||||
for chapter_option in chapter:
|
||||
if chapter_option == option:
|
||||
chapter[chapter_option] = value
|
||||
|
||||
def write(self, filename):
|
||||
fp = pathlib.Path(filename).open("w", encoding="utf-8")
|
||||
d = "="
|
||||
|
||||
for section in self._ffmeta_parsed:
|
||||
if section == "_":
|
||||
self._write_section(fp, None, self._ffmeta_parsed[section], d)
|
||||
elif section == "CHAPTER":
|
||||
# TODO: Tue etwas
|
||||
for chapter in self._ffmeta_parsed[section]:
|
||||
self._write_section(fp, section,
|
||||
self._ffmeta_parsed[section][chapter],
|
||||
d)
|
||||
else:
|
||||
self._write_section(fp, section, self._ffmeta_parsed[section],
|
||||
d)
|
||||
|
||||
@staticmethod
|
||||
def _write_section(fp, section_name, section_items, delimiter):
|
||||
"""Write a single section to the specified `fp`."""
|
||||
if section_name is not None:
|
||||
fp.write(f"[{section_name}]\n")
|
||||
|
||||
for key, value in section_items.items():
|
||||
if value is None:
|
||||
fp.write(f"{key}\n")
|
||||
else:
|
||||
fp.write(f"{key}{delimiter}{value}\n")
|
||||
|
||||
def update_chapters_from_api_meta(self, api_meta, separate_intro_outro=True):
|
||||
if not isinstance(api_meta, ApiMeta):
|
||||
api_meta = ApiMeta(api_meta)
|
||||
|
||||
if not api_meta.is_accurate():
|
||||
echo("Metadata from API is not accurate. Skip.")
|
||||
return
|
||||
|
||||
# assert api_meta.count_chapters() == self.count_chapters()
|
||||
|
||||
echo(f"Found {self.count_chapters()} chapters to prepare.")
|
||||
|
||||
api_chapters = api_meta.get_chapters()
|
||||
if separate_intro_outro:
|
||||
echo("Separate Audible Brand Intro and Outro to own Chapter.")
|
||||
api_chapters.sort(key=operator.itemgetter("start_offset_ms"))
|
||||
|
||||
first = api_chapters[0]
|
||||
intro_dur_ms = api_meta.get_intro_duration_ms()
|
||||
first["start_offset_ms"] = intro_dur_ms
|
||||
first["start_offset_sec"] = round(first["start_offset_ms"] / 1000)
|
||||
first["length_ms"] -= intro_dur_ms
|
||||
|
||||
last = api_chapters[-1]
|
||||
outro_dur_ms = api_meta.get_outro_duration_ms()
|
||||
last["length_ms"] -= outro_dur_ms
|
||||
|
||||
api_chapters.append({
|
||||
"length_ms": intro_dur_ms,
|
||||
"start_offset_ms": 0,
|
||||
"start_offset_sec": 0,
|
||||
"title": "Intro"
|
||||
})
|
||||
api_chapters.append({
|
||||
"length_ms": outro_dur_ms,
|
||||
"start_offset_ms": api_meta.get_runtime_length_ms() - outro_dur_ms,
|
||||
"start_offset_sec": round((api_meta.get_runtime_length_ms() - outro_dur_ms) / 1000),
|
||||
"title": "Outro"
|
||||
})
|
||||
api_chapters.sort(key=operator.itemgetter("start_offset_ms"))
|
||||
|
||||
num_chap = 0
|
||||
new_chapters = {}
|
||||
for chapter in api_chapters:
|
||||
chap_start = chapter["start_offset_ms"]
|
||||
chap_end = chap_start + chapter["length_ms"]
|
||||
num_chap += 1
|
||||
new_chapters[num_chap] = {
|
||||
"TIMEBASE": "1/1000",
|
||||
"START": chap_start,
|
||||
"END": chap_end,
|
||||
"title": chapter["title"]
|
||||
}
|
||||
self._ffmeta_parsed["CHAPTER"] = new_chapters
|
||||
|
||||
|
||||
def decrypt_aax(files, activation_bytes, rebuild_chapters):
|
||||
for file in files:
|
||||
outfile = file.with_suffix(".m4b")
|
||||
metafile = file.with_suffix(".meta")
|
||||
metafile_new = file.with_suffix(".new.meta")
|
||||
base_filename = file.stem.rsplit("-")[0]
|
||||
chapters = file.with_name(base_filename + "-chapters").with_suffix(".json")
|
||||
apimeta = json.loads(chapters.read_text())
|
||||
|
||||
if outfile.exists():
|
||||
secho(f"file {outfile} already exists Skip.", fg="blue")
|
||||
continue
|
||||
|
||||
if rebuild_chapters and apimeta["content_metadata"]["chapter_info"][
|
||||
"is_accurate"]:
|
||||
cmd = ["ffmpeg",
|
||||
"-activation_bytes", activation_bytes,
|
||||
"-i", str(file),
|
||||
"-f", "ffmetadata",
|
||||
str(metafile)]
|
||||
subprocess.check_output(cmd, universal_newlines=True)
|
||||
|
||||
ffmeta_class = FFMeta(metafile)
|
||||
ffmeta_class.update_chapters_from_api_meta(apimeta)
|
||||
ffmeta_class.write(metafile_new)
|
||||
click.echo("Replaced all titles.")
|
||||
|
||||
cmd = ["ffmpeg",
|
||||
"-activation_bytes", activation_bytes,
|
||||
"-i", str(file),
|
||||
"-i", str(metafile_new),
|
||||
"-map_metadata", "0",
|
||||
"-map_chapters", "1",
|
||||
"-c", "copy",
|
||||
str(outfile)]
|
||||
subprocess.check_output(cmd, universal_newlines=True)
|
||||
metafile.unlink()
|
||||
metafile_new.unlink()
|
||||
else:
|
||||
cmd = ["ffmpeg",
|
||||
"-activation_bytes", activation_bytes,
|
||||
"-i", str(file),
|
||||
"-c", "copy",
|
||||
str(outfile)]
|
||||
subprocess.check_output(cmd, universal_newlines=True)
|
||||
|
||||
|
||||
def decrypt_aaxc(files, rebuild_chapters):
|
||||
for file in files:
|
||||
metafile = file.with_suffix(".meta")
|
||||
metafile_new = file.with_suffix(".new.meta")
|
||||
voucher = file.with_suffix(".voucher")
|
||||
voucher = json.loads(voucher.read_text())
|
||||
outfile = file.with_suffix(".m4b")
|
||||
|
||||
if outfile.exists():
|
||||
secho(f"file {outfile} already exists Skip.", fg="blue")
|
||||
continue
|
||||
|
||||
apimeta = voucher["content_license"]
|
||||
audible_key = apimeta["license_response"]["key"]
|
||||
audible_iv = apimeta["license_response"]["iv"]
|
||||
|
||||
if rebuild_chapters and apimeta["content_metadata"]["chapter_info"][
|
||||
"is_accurate"]:
|
||||
cmd = ["ffmpeg",
|
||||
"-audible_key", audible_key,
|
||||
"-audible_iv", audible_iv,
|
||||
"-i", str(file),
|
||||
"-f", "ffmetadata",
|
||||
str(metafile)]
|
||||
subprocess.check_output(cmd, universal_newlines=True)
|
||||
|
||||
ffmeta_class = FFMeta(metafile)
|
||||
ffmeta_class.update_chapters_from_api_meta(apimeta)
|
||||
ffmeta_class.write(metafile_new)
|
||||
click.echo("Replaced all titles.")
|
||||
|
||||
cmd = ["ffmpeg",
|
||||
"-audible_key", audible_key,
|
||||
"-audible_iv", audible_iv,
|
||||
"-i", str(file),
|
||||
"-i", str(metafile_new),
|
||||
"-map_metadata", "0",
|
||||
"-map_chapters", "1",
|
||||
"-c", "copy",
|
||||
str(outfile)]
|
||||
subprocess.check_output(cmd, universal_newlines=True)
|
||||
metafile.unlink()
|
||||
metafile_new.unlink()
|
||||
else:
|
||||
cmd = ["ffmpeg",
|
||||
"-audible_key", audible_key,
|
||||
"-audible_iv", audible_iv,
|
||||
"-i", str(file),
|
||||
"-c", "copy",
|
||||
str(outfile)]
|
||||
subprocess.check_output(cmd, universal_newlines=True)
|
||||
|
||||
|
||||
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
|
||||
|
||||
|
||||
@click.command("remove-encryption", context_settings=CONTEXT_SETTINGS)
|
||||
@click.option(
|
||||
"--input", "-i",
|
||||
type=click.Path(exists=True, file_okay=True),
|
||||
multiple=True,
|
||||
help="Input file")
|
||||
@click.option(
|
||||
"--all",
|
||||
is_flag=True,
|
||||
help="convert all files in folder"
|
||||
)
|
||||
@click.option(
|
||||
"--overwrite",
|
||||
is_flag=True,
|
||||
help="overwrite existing files"
|
||||
)
|
||||
@click.option(
|
||||
"--rebuild-chapters",
|
||||
is_flag=True,
|
||||
help="Rebuild chapters from chapter file"
|
||||
)
|
||||
@pass_session
|
||||
def cli(session, **options):
|
||||
if not which("ffmpeg"):
|
||||
ctx = click.get_current_context()
|
||||
ctx.fail("ffmpeg not found")
|
||||
|
||||
rebuild_chapters = options.get("rebuild_chapters")
|
||||
|
||||
jobs = {"aaxc": [], "aax":[]}
|
||||
|
||||
if options.get("all"):
|
||||
cwd = pathlib.Path.cwd()
|
||||
jobs["aaxc"].extend(list(cwd.glob('*.aaxc')))
|
||||
jobs["aax"].extend(list(cwd.glob('*.aax')))
|
||||
|
||||
else:
|
||||
for file in options.get("input"):
|
||||
file = pathlib.Path(file).resolve()
|
||||
if file.match("*.aaxc"):
|
||||
jobs["aaxc"].append(file)
|
||||
elif file.match("*.aax"):
|
||||
jobs["aax"].append(file)
|
||||
else:
|
||||
secho(f"file suffix {file.suffix} not supported", fg="red")
|
||||
|
||||
decrypt_aaxc(jobs["aaxc"], rebuild_chapters)
|
||||
decrypt_aax(jobs["aax"], session.auth.activation_bytes, rebuild_chapters)
|
|
@ -1,9 +1,4 @@
|
|||
import multiprocessing
|
||||
from audible_cli import cli
|
||||
|
||||
|
||||
multiprocessing.freeze_support()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from audible_cli import cli
|
||||
cli.main()
|
||||
cli.main()
|
||||
|
|
7
setup.py
7
setup.py
|
@ -46,17 +46,16 @@ setup(
|
|||
],
|
||||
install_requires=[
|
||||
"aiofiles",
|
||||
"audible>=0.8.2",
|
||||
"audible>=0.8.1",
|
||||
"click>=8",
|
||||
"colorama; platform_system=='Windows'",
|
||||
"httpx>=0.23.3,<0.28.0",
|
||||
"httpx>=0.20.0,<0.23.0",
|
||||
"packaging",
|
||||
"Pillow",
|
||||
"tabulate",
|
||||
"toml",
|
||||
"tqdm",
|
||||
"questionary",
|
||||
"importlib-metadata; python_version<'3.10'",
|
||||
"questionary"
|
||||
],
|
||||
extras_require={
|
||||
'pyi': [
|
||||
|
|
|
@ -4,7 +4,6 @@ from typing import Optional, Union
|
|||
from warnings import warn
|
||||
|
||||
import click
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
audible_cli_logger = logging.getLogger("audible_cli")
|
||||
|
@ -101,13 +100,10 @@ class ClickHandler(logging.Handler):
|
|||
try:
|
||||
msg = self.format(record)
|
||||
level = record.levelname.lower()
|
||||
|
||||
# Avoid tqdm progress bar interruption by logger's output to console
|
||||
with tqdm.external_write_mode():
|
||||
if self.echo_kwargs.get(level):
|
||||
click.echo(msg, **self.echo_kwargs[level])
|
||||
else:
|
||||
click.echo(msg)
|
||||
if self.echo_kwargs.get(level):
|
||||
click.echo(msg, **self.echo_kwargs[level])
|
||||
else:
|
||||
click.echo(msg)
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
__title__ = "audible-cli"
|
||||
__description__ = "Command line interface (cli) for the audible package."
|
||||
__url__ = "https://github.com/mkb79/audible-cli"
|
||||
__version__ = "0.3.2b3"
|
||||
__version__ = "0.2.b1"
|
||||
__author__ = "mkb79"
|
||||
__author_email__ = "mkb79@hackitall.de"
|
||||
__license__ = "AGPL"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
from pkg_resources import iter_entry_points
|
||||
|
||||
import click
|
||||
|
||||
|
@ -17,11 +17,6 @@ from .exceptions import AudibleCliException
|
|||
from ._logging import click_basic_config
|
||||
from . import plugins
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from importlib.metadata import entry_points
|
||||
else: # Python < 3.10 (backport)
|
||||
from importlib_metadata import entry_points
|
||||
|
||||
|
||||
logger = logging.getLogger("audible_cli")
|
||||
click_basic_config(logger)
|
||||
|
@ -30,7 +25,7 @@ CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
|
|||
|
||||
|
||||
@plugins.from_folder(get_plugin_dir())
|
||||
@plugins.from_entry_point(entry_points(group=PLUGIN_ENTRY_POINT))
|
||||
@plugins.from_entry_point(iter_entry_points(PLUGIN_ENTRY_POINT))
|
||||
@build_in_cmds
|
||||
@click.group(context_settings=CONTEXT_SETTINGS)
|
||||
@profile_option
|
||||
|
@ -66,9 +61,6 @@ def main(*args, **kwargs):
|
|||
except click.Abort:
|
||||
logger.error("Aborted")
|
||||
sys.exit(1)
|
||||
except asyncio.CancelledError:
|
||||
logger.error("Aborted with Asyncio CancelledError")
|
||||
sys.exit(2)
|
||||
except AudibleCliException as e:
|
||||
logger.error(e)
|
||||
sys.exit(2)
|
||||
|
|
|
@ -6,7 +6,6 @@ import sys
|
|||
import click
|
||||
from audible import Client
|
||||
|
||||
from ..constants import AVAILABLE_MARKETPLACES
|
||||
from ..decorators import pass_session
|
||||
|
||||
|
||||
|
@ -55,7 +54,9 @@ logger = logging.getLogger("audible_cli.cmds.cmd_api")
|
|||
)
|
||||
@click.option(
|
||||
"--country-code", "-c",
|
||||
type=click.Choice(AVAILABLE_MARKETPLACES),
|
||||
type=click.Choice(
|
||||
["us", "ca", "uk", "au", "fr", "de", "es", "jp", "it", "in"]
|
||||
),
|
||||
help="Requested Audible marketplace. If not set, the country code for "
|
||||
"the current profile is used."
|
||||
)
|
||||
|
|
|
@ -4,7 +4,6 @@ import asyncio.sslproto
|
|||
import json
|
||||
import pathlib
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import aiofiles
|
||||
import click
|
||||
|
@ -15,22 +14,13 @@ from click import echo
|
|||
|
||||
from ..decorators import (
|
||||
bunch_size_option,
|
||||
end_date_option,
|
||||
start_date_option,
|
||||
timeout_option,
|
||||
pass_client,
|
||||
pass_session
|
||||
)
|
||||
from ..downloader import Downloader as NewDownloader, Status
|
||||
from ..exceptions import (
|
||||
AudibleCliException,
|
||||
DirectoryDoesNotExists,
|
||||
DownloadUrlExpired,
|
||||
NotDownloadableAsAAX,
|
||||
VoucherNeedRefresh
|
||||
)
|
||||
from ..exceptions import DirectoryDoesNotExists, NotDownloadableAsAAX
|
||||
from ..models import Library
|
||||
from ..utils import datetime_type, Downloader
|
||||
from ..utils import Downloader
|
||||
|
||||
|
||||
logger = logging.getLogger("audible_cli.cmds.cmd_download")
|
||||
|
@ -39,8 +29,6 @@ CLIENT_HEADERS = {
|
|||
"User-Agent": "Audible/671 CFNetwork/1240.0.4 Darwin/20.6.0"
|
||||
}
|
||||
|
||||
QUEUE = None
|
||||
|
||||
|
||||
class DownloadCounter:
|
||||
def __init__(self):
|
||||
|
@ -52,8 +40,6 @@ class DownloadCounter:
|
|||
self._pdf: int = 0
|
||||
self._voucher: int = 0
|
||||
self._voucher_saved: int = 0
|
||||
self._aycl = 0
|
||||
self._aycl_voucher = 0
|
||||
|
||||
@property
|
||||
def aax(self):
|
||||
|
@ -71,24 +57,6 @@ class DownloadCounter:
|
|||
self._aaxc += 1
|
||||
logger.debug(f"Currently downloaded aaxc files: {self.aaxc}")
|
||||
|
||||
@property
|
||||
def aycl(self):
|
||||
return self._aycl
|
||||
|
||||
def count_aycl(self):
|
||||
self._aycl += 1
|
||||
# log as error to display this message in any cases
|
||||
logger.debug(f"Currently downloaded aycl files: {self.aycl}")
|
||||
|
||||
@property
|
||||
def aycl_voucher(self):
|
||||
return self._aycl_voucher
|
||||
|
||||
def count_aycl_voucher(self):
|
||||
self._aycl_voucher += 1
|
||||
# log as error to display this message in any cases
|
||||
logger.debug(f"Currently downloaded aycl voucher files: {self.aycl_voucher}")
|
||||
|
||||
@property
|
||||
def annotation(self):
|
||||
return self._annotation
|
||||
|
@ -146,9 +114,7 @@ class DownloadCounter:
|
|||
"cover": self.cover,
|
||||
"pdf": self.pdf,
|
||||
"voucher": self.voucher,
|
||||
"voucher_saved": self.voucher_saved,
|
||||
"aycl": self.aycl,
|
||||
"aycl_voucher": self.aycl_voucher
|
||||
"voucher_saved": self.voucher_saved
|
||||
}
|
||||
|
||||
def has_downloads(self):
|
||||
|
@ -171,7 +137,7 @@ async def download_cover(
|
|||
url = item.get_cover_url(res)
|
||||
if url is None:
|
||||
logger.error(
|
||||
f"No COVER with size {res} found for {item.full_title}"
|
||||
f"No COVER found for {item.full_title} with given resolution"
|
||||
)
|
||||
return
|
||||
|
||||
|
@ -203,7 +169,7 @@ async def download_pdf(
|
|||
|
||||
|
||||
async def download_chapters(
|
||||
output_dir, base_filename, item, quality, overwrite_existing, chapter_type
|
||||
output_dir, base_filename, item, quality, overwrite_existing
|
||||
):
|
||||
if not output_dir.is_dir():
|
||||
raise DirectoryDoesNotExists(output_dir)
|
||||
|
@ -217,7 +183,7 @@ async def download_chapters(
|
|||
return True
|
||||
|
||||
try:
|
||||
metadata = await item.get_content_metadata(quality, chapter_type=chapter_type)
|
||||
metadata = await item.get_content_metadata(quality)
|
||||
except NotFoundError:
|
||||
logger.info(
|
||||
f"No chapters found for {item.full_title}."
|
||||
|
@ -226,7 +192,7 @@ async def download_chapters(
|
|||
metadata = json.dumps(metadata, indent=4)
|
||||
async with aiofiles.open(file, "w") as f:
|
||||
await f.write(metadata)
|
||||
logger.info(f"Chapter file saved in style '{chapter_type.upper()}' to {file}.")
|
||||
logger.info(f"Chapter file saved to {file}.")
|
||||
counter.count_chapter()
|
||||
|
||||
|
||||
|
@ -258,56 +224,9 @@ async def download_annotations(
|
|||
counter.count_annotation()
|
||||
|
||||
|
||||
async def _get_audioparts(item):
|
||||
parts = []
|
||||
child_library: Library = await item.get_child_items()
|
||||
if child_library is not None:
|
||||
for child in child_library:
|
||||
if (
|
||||
child.content_delivery_type is not None
|
||||
and child.content_delivery_type == "AudioPart"
|
||||
):
|
||||
parts.append(child)
|
||||
|
||||
return parts
|
||||
|
||||
|
||||
async def _add_audioparts_to_queue(
|
||||
client, output_dir, filename_mode, item, quality, overwrite_existing,
|
||||
aax_fallback, download_mode
|
||||
):
|
||||
parts = await _get_audioparts(item)
|
||||
|
||||
if download_mode == "aax":
|
||||
get_aax = True
|
||||
get_aaxc = False
|
||||
else:
|
||||
get_aax = False
|
||||
get_aaxc = True
|
||||
|
||||
for part in parts:
|
||||
queue_job(
|
||||
get_cover=None,
|
||||
get_pdf=None,
|
||||
get_annotation=None,
|
||||
get_chapters=None,
|
||||
chapter_type=None,
|
||||
get_aax=get_aax,
|
||||
get_aaxc=get_aaxc,
|
||||
client=client,
|
||||
output_dir=output_dir,
|
||||
filename_mode=filename_mode,
|
||||
item=part,
|
||||
cover_sizes=None,
|
||||
quality=quality,
|
||||
overwrite_existing=overwrite_existing,
|
||||
aax_fallback=aax_fallback
|
||||
)
|
||||
|
||||
|
||||
async def download_aax(
|
||||
client, output_dir, base_filename, item, quality, overwrite_existing,
|
||||
aax_fallback, filename_mode
|
||||
aax_fallback
|
||||
):
|
||||
# url, codec = await item.get_aax_url(quality)
|
||||
try:
|
||||
|
@ -321,94 +240,25 @@ async def download_aax(
|
|||
base_filename=base_filename,
|
||||
item=item,
|
||||
quality=quality,
|
||||
overwrite_existing=overwrite_existing,
|
||||
filename_mode=filename_mode
|
||||
overwrite_existing=overwrite_existing
|
||||
)
|
||||
raise
|
||||
|
||||
filename = base_filename + f"-{codec}.aax"
|
||||
filepath = output_dir / filename
|
||||
|
||||
dl = NewDownloader(
|
||||
source=url,
|
||||
client=client,
|
||||
expected_types=[
|
||||
"audio/aax", "audio/vnd.audible.aax", "audio/audible"
|
||||
]
|
||||
dl = Downloader(
|
||||
url, filepath, client, overwrite_existing,
|
||||
["audio/aax", "audio/vnd.audible.aax", "audio/audible"]
|
||||
)
|
||||
downloaded = await dl.run(target=filepath, force_reload=overwrite_existing)
|
||||
downloaded = await dl.run(pb=True)
|
||||
|
||||
if downloaded.status == Status.Success:
|
||||
if downloaded:
|
||||
counter.count_aax()
|
||||
elif downloaded.status == Status.DownloadIndividualParts:
|
||||
logger.info(
|
||||
f"Item {filepath} must be downloaded in parts. Adding parts to queue"
|
||||
)
|
||||
await _add_audioparts_to_queue(
|
||||
client=client,
|
||||
output_dir=output_dir,
|
||||
filename_mode=filename_mode,
|
||||
item=item,
|
||||
quality=quality,
|
||||
overwrite_existing=overwrite_existing,
|
||||
download_mode="aax",
|
||||
aax_fallback=aax_fallback,
|
||||
)
|
||||
|
||||
|
||||
async def _reuse_voucher(lr_file, item):
|
||||
logger.info(f"Loading data from voucher file {lr_file}.")
|
||||
async with aiofiles.open(lr_file, "r") as f:
|
||||
lr = await f.read()
|
||||
lr = json.loads(lr)
|
||||
content_license = lr["content_license"]
|
||||
|
||||
assert content_license["status_code"] == "Granted", "License not granted"
|
||||
|
||||
# try to get the user id
|
||||
user_id = None
|
||||
if item._client is not None:
|
||||
auth = item._client.auth
|
||||
if auth.customer_info is not None:
|
||||
user_id = auth.customer_info.get("user_id")
|
||||
|
||||
# Verification of allowed user
|
||||
if user_id is None:
|
||||
logger.debug("No user id found. Skip user verification.")
|
||||
else:
|
||||
if "allowed_users" in content_license:
|
||||
allowed_users = content_license["allowed_users"]
|
||||
if allowed_users and user_id not in allowed_users:
|
||||
# Don't proceed here to prevent overwriting voucher file
|
||||
msg = f"The current user is not entitled to use the voucher {lr_file}."
|
||||
raise AudibleCliException(msg)
|
||||
else:
|
||||
logger.debug(f"{lr_file} does not contain allowed users key.")
|
||||
|
||||
# Verification of voucher validity
|
||||
if "refresh_date" in content_license:
|
||||
refresh_date = content_license["refresh_date"]
|
||||
refresh_date = datetime_type.convert(refresh_date, None, None)
|
||||
if refresh_date < datetime.utcnow():
|
||||
raise VoucherNeedRefresh(lr_file)
|
||||
|
||||
content_metadata = content_license["content_metadata"]
|
||||
url = httpx.URL(content_metadata["content_url"]["offline_url"])
|
||||
codec = content_metadata["content_reference"]["content_format"]
|
||||
|
||||
expires = url.params.get("Expires")
|
||||
if expires:
|
||||
expires = datetime.utcfromtimestamp(int(expires))
|
||||
now = datetime.utcnow()
|
||||
if expires < now:
|
||||
raise DownloadUrlExpired(lr_file)
|
||||
|
||||
return lr, url, codec
|
||||
|
||||
|
||||
async def download_aaxc(
|
||||
client, output_dir, base_filename, item, quality, overwrite_existing,
|
||||
filename_mode
|
||||
client, output_dir, base_filename, item,
|
||||
quality, overwrite_existing
|
||||
):
|
||||
lr, url, codec = None, None, None
|
||||
|
||||
|
@ -419,7 +269,7 @@ async def download_aaxc(
|
|||
filepath = pathlib.Path(
|
||||
output_dir) / f"{base_filename}-{codec}.aaxc"
|
||||
lr_file = filepath.with_suffix(".voucher")
|
||||
|
||||
|
||||
if lr_file.is_file():
|
||||
if filepath.is_file():
|
||||
logger.info(
|
||||
|
@ -429,23 +279,23 @@ async def download_aaxc(
|
|||
f"File {filepath} already exists. Skip download."
|
||||
)
|
||||
return
|
||||
else:
|
||||
logger.info(
|
||||
f"Loading data from voucher file {lr_file}."
|
||||
)
|
||||
async with aiofiles.open(lr_file, "r") as f:
|
||||
lr = await f.read()
|
||||
lr = json.loads(lr)
|
||||
content_metadata = lr["content_license"][
|
||||
"content_metadata"]
|
||||
url = httpx.URL(
|
||||
content_metadata["content_url"]["offline_url"])
|
||||
codec = content_metadata["content_reference"][
|
||||
"content_format"]
|
||||
|
||||
try:
|
||||
lr, url, codec = await _reuse_voucher(lr_file, item)
|
||||
except DownloadUrlExpired:
|
||||
logger.debug(f"Download url in {lr_file} is expired. Refreshing license.")
|
||||
overwrite_existing = True
|
||||
except VoucherNeedRefresh:
|
||||
logger.debug(f"Refresh date for voucher {lr_file} reached. Refreshing license.")
|
||||
overwrite_existing = True
|
||||
|
||||
is_aycl = item.benefit_id == "AYCL"
|
||||
|
||||
if lr is None or url is None or codec is None:
|
||||
if url is None or codec is None or lr is None:
|
||||
url, codec, lr = await item.get_aaxc_url(quality)
|
||||
counter.count_voucher()
|
||||
if is_aycl:
|
||||
counter.count_aycl_voucher()
|
||||
|
||||
if codec.lower() == "mpeg":
|
||||
ext = "mp3"
|
||||
|
@ -467,50 +317,36 @@ async def download_aaxc(
|
|||
logger.info(f"Voucher file saved to {lr_file}.")
|
||||
counter.count_voucher_saved()
|
||||
|
||||
dl = NewDownloader(
|
||||
source=url,
|
||||
client=client,
|
||||
expected_types=[
|
||||
dl = Downloader(
|
||||
url,
|
||||
filepath,
|
||||
client,
|
||||
overwrite_existing,
|
||||
[
|
||||
"audio/aax", "audio/vnd.audible.aax", "audio/mpeg", "audio/x-m4a",
|
||||
"audio/audible"
|
||||
],
|
||||
]
|
||||
)
|
||||
downloaded = await dl.run(target=filepath, force_reload=overwrite_existing)
|
||||
downloaded = await dl.run(pb=True)
|
||||
|
||||
if downloaded.status == Status.Success:
|
||||
if downloaded:
|
||||
counter.count_aaxc()
|
||||
if is_aycl:
|
||||
counter.count_aycl()
|
||||
elif downloaded.status == Status.DownloadIndividualParts:
|
||||
logger.info(
|
||||
f"Item {filepath} must be downloaded in parts. Adding parts to queue"
|
||||
)
|
||||
await _add_audioparts_to_queue(
|
||||
client=client,
|
||||
output_dir=output_dir,
|
||||
filename_mode=filename_mode,
|
||||
item=item,
|
||||
quality=quality,
|
||||
overwrite_existing=overwrite_existing,
|
||||
aax_fallback=False,
|
||||
download_mode="aaxc"
|
||||
)
|
||||
|
||||
|
||||
async def consume(ignore_errors):
|
||||
async def consume(queue):
|
||||
while True:
|
||||
cmd, kwargs = await QUEUE.get()
|
||||
item = await queue.get()
|
||||
try:
|
||||
await cmd(**kwargs)
|
||||
await item
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
if not ignore_errors:
|
||||
raise
|
||||
raise
|
||||
finally:
|
||||
QUEUE.task_done()
|
||||
queue.task_done()
|
||||
|
||||
|
||||
def queue_job(
|
||||
queue,
|
||||
get_cover,
|
||||
get_pdf,
|
||||
get_annotation,
|
||||
|
@ -521,8 +357,7 @@ def queue_job(
|
|||
output_dir,
|
||||
filename_mode,
|
||||
item,
|
||||
cover_sizes,
|
||||
chapter_type,
|
||||
cover_size,
|
||||
quality,
|
||||
overwrite_existing,
|
||||
aax_fallback
|
||||
|
@ -530,77 +365,73 @@ def queue_job(
|
|||
base_filename = item.create_base_filename(filename_mode)
|
||||
|
||||
if get_cover:
|
||||
for cover_size in cover_sizes:
|
||||
cmd = download_cover
|
||||
kwargs = {
|
||||
"client": client,
|
||||
"output_dir": output_dir,
|
||||
"base_filename": base_filename,
|
||||
"item": item,
|
||||
"res": cover_size,
|
||||
"overwrite_existing": overwrite_existing
|
||||
}
|
||||
QUEUE.put_nowait((cmd, kwargs))
|
||||
queue.put_nowait(
|
||||
download_cover(
|
||||
client=client,
|
||||
output_dir=output_dir,
|
||||
base_filename=base_filename,
|
||||
item=item,
|
||||
res=cover_size,
|
||||
overwrite_existing=overwrite_existing
|
||||
)
|
||||
)
|
||||
|
||||
if get_pdf:
|
||||
cmd = download_pdf
|
||||
kwargs = {
|
||||
"client": client,
|
||||
"output_dir": output_dir,
|
||||
"base_filename": base_filename,
|
||||
"item": item,
|
||||
"overwrite_existing": overwrite_existing
|
||||
}
|
||||
QUEUE.put_nowait((cmd, kwargs))
|
||||
queue.put_nowait(
|
||||
download_pdf(
|
||||
client=client,
|
||||
output_dir=output_dir,
|
||||
base_filename=base_filename,
|
||||
item=item,
|
||||
overwrite_existing=overwrite_existing
|
||||
)
|
||||
)
|
||||
|
||||
if get_chapters:
|
||||
cmd = download_chapters
|
||||
kwargs = {
|
||||
"output_dir": output_dir,
|
||||
"base_filename": base_filename,
|
||||
"item": item,
|
||||
"quality": quality,
|
||||
"overwrite_existing": overwrite_existing,
|
||||
"chapter_type": chapter_type
|
||||
}
|
||||
QUEUE.put_nowait((cmd, kwargs))
|
||||
queue.put_nowait(
|
||||
download_chapters(
|
||||
output_dir=output_dir,
|
||||
base_filename=base_filename,
|
||||
item=item,
|
||||
quality=quality,
|
||||
overwrite_existing=overwrite_existing
|
||||
)
|
||||
)
|
||||
|
||||
if get_annotation:
|
||||
cmd = download_annotations
|
||||
kwargs = {
|
||||
"output_dir": output_dir,
|
||||
"base_filename": base_filename,
|
||||
"item": item,
|
||||
"overwrite_existing": overwrite_existing
|
||||
}
|
||||
QUEUE.put_nowait((cmd, kwargs))
|
||||
queue.put_nowait(
|
||||
download_annotations(
|
||||
output_dir=output_dir,
|
||||
base_filename=base_filename,
|
||||
item=item,
|
||||
overwrite_existing=overwrite_existing
|
||||
)
|
||||
)
|
||||
|
||||
if get_aax:
|
||||
cmd = download_aax
|
||||
kwargs = {
|
||||
"client": client,
|
||||
"output_dir": output_dir,
|
||||
"base_filename": base_filename,
|
||||
"item": item,
|
||||
"quality": quality,
|
||||
"overwrite_existing": overwrite_existing,
|
||||
"aax_fallback": aax_fallback,
|
||||
"filename_mode": filename_mode
|
||||
}
|
||||
QUEUE.put_nowait((cmd, kwargs))
|
||||
queue.put_nowait(
|
||||
download_aax(
|
||||
client=client,
|
||||
output_dir=output_dir,
|
||||
base_filename=base_filename,
|
||||
item=item,
|
||||
quality=quality,
|
||||
overwrite_existing=overwrite_existing,
|
||||
aax_fallback=aax_fallback
|
||||
)
|
||||
)
|
||||
|
||||
if get_aaxc:
|
||||
cmd = download_aaxc
|
||||
kwargs = {
|
||||
"client": client,
|
||||
"output_dir": output_dir,
|
||||
"base_filename": base_filename,
|
||||
"item": item,
|
||||
"quality": quality,
|
||||
"overwrite_existing": overwrite_existing,
|
||||
"filename_mode": filename_mode
|
||||
}
|
||||
QUEUE.put_nowait((cmd, kwargs))
|
||||
queue.put_nowait(
|
||||
download_aaxc(
|
||||
client=client,
|
||||
output_dir=output_dir,
|
||||
base_filename=base_filename,
|
||||
item=item,
|
||||
quality=quality,
|
||||
overwrite_existing=overwrite_existing
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def display_counter():
|
||||
|
@ -612,8 +443,6 @@ def display_counter():
|
|||
|
||||
if k == "voucher_saved":
|
||||
k = "voucher"
|
||||
elif k == "aycl_voucher":
|
||||
k = "aycl voucher"
|
||||
elif k == "voucher":
|
||||
diff = v - counter.voucher_saved
|
||||
if diff > 0:
|
||||
|
@ -682,28 +511,19 @@ def display_counter():
|
|||
"--cover-size",
|
||||
type=click.Choice(["252", "315", "360", "408", "500", "558", "570", "882",
|
||||
"900", "1215"]),
|
||||
default=["500"],
|
||||
multiple=True,
|
||||
help="The cover pixel size. This option can be provided multiple times."
|
||||
default="500",
|
||||
help="the cover pixel size"
|
||||
)
|
||||
@click.option(
|
||||
"--chapter",
|
||||
is_flag=True,
|
||||
help="Saves chapter metadata as JSON file."
|
||||
)
|
||||
@click.option(
|
||||
"--chapter-type",
|
||||
default="config",
|
||||
type=click.Choice(["Flat", "Tree", "config"], case_sensitive=False),
|
||||
help="The chapter type."
|
||||
help="saves chapter metadata as JSON file"
|
||||
)
|
||||
@click.option(
|
||||
"--annotation",
|
||||
is_flag=True,
|
||||
help="saves the annotations (e.g. bookmarks, notes) as JSON file"
|
||||
)
|
||||
@start_date_option
|
||||
@end_date_option
|
||||
@click.option(
|
||||
"--no-confirm", "-y",
|
||||
is_flag=True,
|
||||
|
@ -758,10 +578,8 @@ async def cli(session, api_client, **params):
|
|||
asins = params.get("asin")
|
||||
titles = params.get("title")
|
||||
if get_all and (asins or titles):
|
||||
raise click.BadOptionUsage(
|
||||
"--all",
|
||||
"`--all` can not be used together with `--asin` or `--title`"
|
||||
)
|
||||
logger.error(f"Do not mix *asin* or *title* option with *all* option.")
|
||||
click.Abort()
|
||||
|
||||
# what to download
|
||||
get_aax = params.get("aax")
|
||||
|
@ -769,9 +587,7 @@ async def cli(session, api_client, **params):
|
|||
aax_fallback = params.get("aax_fallback")
|
||||
if aax_fallback:
|
||||
if get_aax:
|
||||
logger.info(
|
||||
"Using --aax is redundant and can be left when using --aax-fallback"
|
||||
)
|
||||
logger.info("Using --aax is redundant and can be left when using --aax-fallback")
|
||||
get_aax = True
|
||||
if get_aaxc:
|
||||
logger.warning("Do not mix --aaxc with --aax-fallback option.")
|
||||
|
@ -782,49 +598,20 @@ async def cli(session, api_client, **params):
|
|||
if not any(
|
||||
[get_aax, get_aaxc, get_annotation, get_chapters, get_cover, get_pdf]
|
||||
):
|
||||
raise click.BadOptionUsage(
|
||||
"",
|
||||
"Please select an option what you want download."
|
||||
)
|
||||
logger.error("Please select an option what you want download.")
|
||||
click.Abort()
|
||||
|
||||
# additional options
|
||||
sim_jobs = params.get("jobs")
|
||||
quality = params.get("quality")
|
||||
cover_sizes = list(set(params.get("cover_size")))
|
||||
cover_size = params.get("cover_size")
|
||||
overwrite_existing = params.get("overwrite")
|
||||
ignore_errors = params.get("ignore_errors")
|
||||
no_confirm = params.get("no_confirm")
|
||||
resolve_podcasts = params.get("resolve_podcasts")
|
||||
resolve_podcats = params.get("resolve_podcasts")
|
||||
ignore_podcasts = params.get("ignore_podcasts")
|
||||
if all([resolve_podcasts, ignore_podcasts]):
|
||||
raise click.BadOptionUsage(
|
||||
"",
|
||||
"Do not mix *ignore-podcasts* with *resolve-podcasts* option."
|
||||
)
|
||||
bunch_size = session.params.get("bunch_size")
|
||||
|
||||
start_date = session.params.get("start_date")
|
||||
end_date = session.params.get("end_date")
|
||||
if all([start_date, end_date]) and start_date > end_date:
|
||||
raise click.BadOptionUsage(
|
||||
"",
|
||||
"start date must be before or equal the end date"
|
||||
)
|
||||
|
||||
if start_date is not None:
|
||||
logger.info(
|
||||
f"Selected start date: {start_date.strftime('%Y-%m-%dT%H:%M:%S.%fZ')}"
|
||||
)
|
||||
if end_date is not None:
|
||||
logger.info(
|
||||
f"Selected end date: {end_date.strftime('%Y-%m-%dT%H:%M:%S.%fZ')}"
|
||||
)
|
||||
|
||||
chapter_type = params.get("chapter_type")
|
||||
if chapter_type == "config":
|
||||
chapter_type = session.config.get_profile_option(
|
||||
session.selected_profile, "chapter_type") or "Tree"
|
||||
|
||||
filename_mode = params.get("filename_mode")
|
||||
if filename_mode == "config":
|
||||
filename_mode = session.config.get_profile_option(
|
||||
|
@ -833,20 +620,12 @@ async def cli(session, api_client, **params):
|
|||
# fetch the user library
|
||||
library = await Library.from_api_full_sync(
|
||||
api_client,
|
||||
image_sizes=", ".join(cover_sizes),
|
||||
bunch_size=bunch_size,
|
||||
response_groups=(
|
||||
"product_desc, media, product_attrs, relationships, "
|
||||
"series, customer_rights, pdf_url"
|
||||
),
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
status="Active",
|
||||
image_sizes="1215, 408, 360, 882, 315, 570, 252, 558, 900, 500",
|
||||
bunch_size=bunch_size
|
||||
)
|
||||
|
||||
if resolve_podcasts:
|
||||
await library.resolve_podcasts(start_date=start_date, end_date=end_date)
|
||||
[library.data.remove(i) for i in library if i.is_parent_podcast()]
|
||||
if resolve_podcats:
|
||||
await library.resolve_podcats()
|
||||
|
||||
# collect jobs
|
||||
jobs = []
|
||||
|
@ -863,7 +642,7 @@ async def cli(session, api_client, **params):
|
|||
else:
|
||||
if not ignore_errors:
|
||||
logger.error(f"Asin {asin} not found in library.")
|
||||
raise click.Abort()
|
||||
click.Abort()
|
||||
logger.error(
|
||||
f"Skip asin {asin}: Not found in library"
|
||||
)
|
||||
|
@ -889,30 +668,22 @@ async def cli(session, api_client, **params):
|
|||
).unsafe_ask_async()
|
||||
if answer is not None:
|
||||
[jobs.append(i) for i in answer]
|
||||
|
||||
|
||||
else:
|
||||
logger.error(
|
||||
f"Skip title {title}: Not found in library"
|
||||
)
|
||||
|
||||
# set queue
|
||||
global QUEUE
|
||||
QUEUE = asyncio.Queue()
|
||||
|
||||
queue = asyncio.Queue()
|
||||
for job in jobs:
|
||||
item = library.get_item_by_asin(job)
|
||||
items = [item]
|
||||
odir = pathlib.Path(output_dir)
|
||||
|
||||
if item.is_parent_podcast():
|
||||
if ignore_podcasts:
|
||||
continue
|
||||
|
||||
if not ignore_podcasts and item.is_parent_podcast():
|
||||
items.remove(item)
|
||||
if item._children is None:
|
||||
await item.get_child_items(
|
||||
start_date=start_date, end_date=end_date
|
||||
)
|
||||
await item.get_child_items()
|
||||
|
||||
for i in item._children:
|
||||
if i.asin not in jobs:
|
||||
|
@ -925,6 +696,7 @@ async def cli(session, api_client, **params):
|
|||
|
||||
for item in items:
|
||||
queue_job(
|
||||
queue=queue,
|
||||
get_cover=get_cover,
|
||||
get_pdf=get_pdf,
|
||||
get_annotation=get_annotation,
|
||||
|
@ -935,24 +707,24 @@ async def cli(session, api_client, **params):
|
|||
output_dir=odir,
|
||||
filename_mode=filename_mode,
|
||||
item=item,
|
||||
cover_sizes=cover_sizes,
|
||||
chapter_type=chapter_type,
|
||||
cover_size=cover_size,
|
||||
quality=quality,
|
||||
overwrite_existing=overwrite_existing,
|
||||
aax_fallback=aax_fallback
|
||||
)
|
||||
|
||||
# schedule the consumer
|
||||
consumers = [
|
||||
asyncio.ensure_future(consume(ignore_errors)) for _ in range(sim_jobs)
|
||||
]
|
||||
try:
|
||||
# schedule the consumer
|
||||
consumers = [
|
||||
asyncio.ensure_future(consume(queue)) for _ in range(sim_jobs)
|
||||
]
|
||||
# wait until the consumer has processed all items
|
||||
await QUEUE.join()
|
||||
await queue.join()
|
||||
|
||||
finally:
|
||||
# the consumer is still awaiting an item, cancel it
|
||||
for consumer in consumers:
|
||||
consumer.cancel()
|
||||
|
||||
|
||||
await asyncio.gather(*consumers, return_exceptions=True)
|
||||
display_counter()
|
||||
|
|
|
@ -7,8 +7,6 @@ from click import echo
|
|||
|
||||
from ..decorators import (
|
||||
bunch_size_option,
|
||||
end_date_option,
|
||||
start_date_option,
|
||||
timeout_option,
|
||||
pass_client,
|
||||
pass_session,
|
||||
|
@ -23,12 +21,10 @@ def cli():
|
|||
"""interact with library"""
|
||||
|
||||
|
||||
async def _get_library(session, client, resolve_podcasts):
|
||||
async def _get_library(session, client):
|
||||
bunch_size = session.params.get("bunch_size")
|
||||
start_date = session.params.get("start_date")
|
||||
end_date = session.params.get("end_date")
|
||||
|
||||
library = await Library.from_api_full_sync(
|
||||
return await Library.from_api_full_sync(
|
||||
client,
|
||||
response_groups=(
|
||||
"contributors, media, price, product_attrs, product_desc, "
|
||||
|
@ -39,16 +35,9 @@ async def _get_library(session, client, resolve_podcasts):
|
|||
"is_finished, is_returnable, origin_asin, pdf_url, "
|
||||
"percent_complete, provided_review"
|
||||
),
|
||||
bunch_size=bunch_size,
|
||||
start_date=start_date,
|
||||
end_date=end_date
|
||||
bunch_size=bunch_size
|
||||
)
|
||||
|
||||
if resolve_podcasts:
|
||||
await library.resolve_podcasts(start_date=start_date, end_date=end_date)
|
||||
|
||||
return library
|
||||
|
||||
|
||||
@cli.command("export")
|
||||
@click.option(
|
||||
|
@ -72,8 +61,6 @@ async def _get_library(session, client, resolve_podcasts):
|
|||
is_flag=True,
|
||||
help="Resolve podcasts to show all episodes"
|
||||
)
|
||||
@start_date_option
|
||||
@end_date_option
|
||||
@pass_session
|
||||
@pass_client
|
||||
async def export_library(session, client, **params):
|
||||
|
@ -118,18 +105,18 @@ async def export_library(session, client, **params):
|
|||
suffix = "." + output_format
|
||||
output_filename = output_filename.with_suffix(suffix)
|
||||
|
||||
resolve_podcasts = params.get("resolve_podcasts")
|
||||
library = await _get_library(session, client, resolve_podcasts)
|
||||
library = await _get_library(session, client)
|
||||
if params.get("resolve_podcasts"):
|
||||
await library.resolve_podcats()
|
||||
|
||||
keys_with_raw_values = (
|
||||
"asin", "title", "subtitle", "extended_product_description", "runtime_length_min", "is_finished",
|
||||
"percent_complete", "release_date", "purchase_date"
|
||||
"asin", "title", "subtitle", "runtime_length_min", "is_finished",
|
||||
"percent_complete", "release_date"
|
||||
)
|
||||
|
||||
prepared_library = await asyncio.gather(
|
||||
*[_prepare_item(i) for i in library]
|
||||
)
|
||||
prepared_library = [i for i in prepared_library if i is not None]
|
||||
prepared_library.sort(key=lambda x: x["asin"])
|
||||
|
||||
if output_format in ("tsv", "csv"):
|
||||
|
@ -139,10 +126,10 @@ async def export_library(session, client, **params):
|
|||
dialect = "excel-tab"
|
||||
|
||||
headers = (
|
||||
"asin", "title", "subtitle", "extended_product_description", "authors", "narrators", "series_title",
|
||||
"asin", "title", "subtitle", "authors", "narrators", "series_title",
|
||||
"series_sequence", "genres", "runtime_length_min", "is_finished",
|
||||
"percent_complete", "rating", "num_ratings", "date_added",
|
||||
"release_date", "cover_url", "purchase_date"
|
||||
"release_date", "cover_url"
|
||||
)
|
||||
|
||||
export_to_csv(output_filename, prepared_library, headers, dialect)
|
||||
|
@ -160,11 +147,9 @@ async def export_library(session, client, **params):
|
|||
is_flag=True,
|
||||
help="Resolve podcasts to show all episodes"
|
||||
)
|
||||
@start_date_option
|
||||
@end_date_option
|
||||
@pass_session
|
||||
@pass_client
|
||||
async def list_library(session, client, resolve_podcasts):
|
||||
async def list_library(session, client, resolve_podcasts=False):
|
||||
"""list titles in library"""
|
||||
|
||||
@wrap_async
|
||||
|
@ -186,9 +171,14 @@ async def list_library(session, client, resolve_podcasts):
|
|||
fields.append(item.title)
|
||||
return ": ".join(fields)
|
||||
|
||||
library = await _get_library(session, client, resolve_podcasts)
|
||||
library = await _get_library(session, client)
|
||||
|
||||
if resolve_podcasts:
|
||||
await library.resolve_podcats()
|
||||
|
||||
books = await asyncio.gather(
|
||||
*[_prepare_item(i) for i in library]
|
||||
)
|
||||
[echo(i) for i in sorted(books) if len(i) > 0]
|
||||
|
||||
for i in sorted(books):
|
||||
echo(i)
|
||||
|
|
|
@ -6,7 +6,6 @@ from audible import Authenticator
|
|||
from click import echo, secho
|
||||
from tabulate import tabulate
|
||||
|
||||
from ..constants import AVAILABLE_MARKETPLACES
|
||||
from ..decorators import pass_session
|
||||
from ..utils import build_auth_file
|
||||
|
||||
|
@ -73,7 +72,8 @@ def list_profiles(session):
|
|||
@click.option(
|
||||
"--country-code", "-cc",
|
||||
prompt="Please enter the country code",
|
||||
type=click.Choice(AVAILABLE_MARKETPLACES),
|
||||
type=click.Choice([
|
||||
"us", "ca", "uk", "au", "fr", "de", "jp", "it", "in"]),
|
||||
help="The country code for the profile."
|
||||
)
|
||||
@click.option(
|
||||
|
@ -160,7 +160,7 @@ def check_if_auth_file_not_exists(session, ctx, param, value):
|
|||
)
|
||||
@click.option(
|
||||
"--country-code", "-cc",
|
||||
type=click.Choice(AVAILABLE_MARKETPLACES),
|
||||
type=click.Choice(["us", "ca", "uk", "au", "fr", "de", "jp", "it", "in"]),
|
||||
prompt="Please enter the country code",
|
||||
help="The country code for the marketplace you want to authenticate."
|
||||
)
|
||||
|
|
|
@ -8,11 +8,7 @@ from tabulate import tabulate
|
|||
|
||||
from .. import __version__
|
||||
from ..config import ConfigFile
|
||||
from ..constants import (
|
||||
AVAILABLE_MARKETPLACES,
|
||||
CONFIG_FILE,
|
||||
DEFAULT_AUTH_FILE_EXTENSION
|
||||
)
|
||||
from ..constants import CONFIG_FILE, DEFAULT_AUTH_FILE_EXTENSION
|
||||
from ..decorators import pass_session
|
||||
from ..utils import build_auth_file
|
||||
|
||||
|
@ -71,11 +67,13 @@ an authentication to the audible server is necessary to register a new device.
|
|||
"Please enter a name for your primary profile",
|
||||
default="audible")
|
||||
|
||||
available_country_codes = [
|
||||
"us", "ca", "uk", "au", "fr", "de", "es", "jp", "it", "in"]
|
||||
echo()
|
||||
d["country_code"] = prompt(
|
||||
"Enter a country code for the profile",
|
||||
show_choices=False,
|
||||
type=click.Choice(AVAILABLE_MARKETPLACES)
|
||||
type=click.Choice(available_country_codes)
|
||||
)
|
||||
|
||||
echo()
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
from typing import Dict
|
||||
|
||||
from audible.localization import LOCALE_TEMPLATES
|
||||
|
||||
|
||||
APP_NAME: str = "Audible"
|
||||
CONFIG_FILE: str = "config.toml"
|
||||
|
@ -18,7 +16,3 @@ DEFAULT_CONFIG_DATA: Dict[str, str] = {
|
|||
}
|
||||
CODEC_HIGH_QUALITY: str = "AAX_44_128"
|
||||
CODEC_NORMAL_QUALITY: str = "AAX_44_64"
|
||||
|
||||
AVAILABLE_MARKETPLACES = [
|
||||
market["country_code"] for market in LOCALE_TEMPLATES.values()
|
||||
]
|
||||
|
|
|
@ -7,7 +7,6 @@ import httpx
|
|||
from packaging.version import parse
|
||||
|
||||
from .config import Session
|
||||
from .utils import datetime_type
|
||||
from ._logging import _normalize_logger
|
||||
from . import __version__
|
||||
|
||||
|
@ -95,7 +94,7 @@ def version_option(func=None, **kwargs):
|
|||
response.raise_for_status()
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
raise click.Abort()
|
||||
click.Abort()
|
||||
|
||||
content = response.json()
|
||||
|
||||
|
@ -201,7 +200,7 @@ def timeout_option(func=None, **kwargs):
|
|||
return value
|
||||
|
||||
kwargs.setdefault("type", click.INT)
|
||||
kwargs.setdefault("default", 30)
|
||||
kwargs.setdefault("default", 10)
|
||||
kwargs.setdefault("show_default", True)
|
||||
kwargs.setdefault(
|
||||
"help", ("Increase the timeout time if you got any TimeoutErrors. "
|
||||
|
@ -237,37 +236,3 @@ def bunch_size_option(func=None, **kwargs):
|
|||
return option(func)
|
||||
|
||||
return option
|
||||
|
||||
|
||||
def start_date_option(func=None, **kwargs):
|
||||
kwargs.setdefault("type", datetime_type)
|
||||
kwargs.setdefault(
|
||||
"help",
|
||||
"Only considers books added to library on or after this UTC date."
|
||||
)
|
||||
kwargs.setdefault("callback", add_param_to_session)
|
||||
kwargs.setdefault("expose_value", False)
|
||||
|
||||
option = click.option("--start-date", **kwargs)
|
||||
|
||||
if callable(func):
|
||||
return option(func)
|
||||
|
||||
return option
|
||||
|
||||
|
||||
def end_date_option(func=None, **kwargs):
|
||||
kwargs.setdefault("type", datetime_type)
|
||||
kwargs.setdefault(
|
||||
"help",
|
||||
"Only considers books added to library on or before this UTC date."
|
||||
)
|
||||
kwargs.setdefault("callback", add_param_to_session)
|
||||
kwargs.setdefault("expose_value", False)
|
||||
|
||||
option = click.option("--end-date", **kwargs)
|
||||
|
||||
if callable(func):
|
||||
return option(func)
|
||||
|
||||
return option
|
||||
|
|
|
@ -1,563 +0,0 @@
|
|||
import logging
|
||||
import pathlib
|
||||
import re
|
||||
from enum import Enum, auto
|
||||
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Union
|
||||
|
||||
import aiofiles
|
||||
import click
|
||||
import httpx
|
||||
import tqdm
|
||||
from aiofiles.os import path, unlink
|
||||
|
||||
try:
|
||||
from typing import Literal
|
||||
except ImportError:
|
||||
from typing_extensions import Literal
|
||||
|
||||
|
||||
FileMode = Literal["ab", "wb"]
|
||||
|
||||
logger = logging.getLogger("audible_cli.downloader")
|
||||
|
||||
ACCEPT_RANGES_HEADER = "Accept-Ranges"
|
||||
ACCEPT_RANGES_NONE_VALUE = "none"
|
||||
CONTENT_LENGTH_HEADER = "Content-Length"
|
||||
CONTENT_TYPE_HEADER = "Content-Type"
|
||||
MAX_FILE_READ_SIZE = 3 * 1024 * 1024
|
||||
ETAG_HEADER = "ETag"
|
||||
|
||||
|
||||
class ETag:
|
||||
def __init__(self, etag: str) -> None:
|
||||
self._etag = etag
|
||||
|
||||
@property
|
||||
def value(self) -> str:
|
||||
return self._etag
|
||||
|
||||
@property
|
||||
def parsed_etag(self) -> str:
|
||||
return re.search('"([^"]*)"', self.value).group(1)
|
||||
|
||||
@property
|
||||
def is_weak(self) -> bool:
|
||||
return bool(re.search("^W/", self.value))
|
||||
|
||||
|
||||
class File:
|
||||
def __init__(self, file: Union[pathlib.Path, str]) -> None:
|
||||
if not isinstance(file, pathlib.Path):
|
||||
file = pathlib.Path(file)
|
||||
self._file = file
|
||||
|
||||
@property
|
||||
def path(self) -> pathlib.Path:
|
||||
return self._file
|
||||
|
||||
async def get_size(self) -> int:
|
||||
if await path.isfile(self.path):
|
||||
return await path.getsize(self.path)
|
||||
return 0
|
||||
|
||||
async def remove(self) -> None:
|
||||
if await path.isfile(self.path):
|
||||
await unlink(self.path)
|
||||
|
||||
async def directory_exists(self) -> bool:
|
||||
return await path.isdir(self.path.parent)
|
||||
|
||||
async def is_file(self) -> bool:
|
||||
return await path.isfile(self.path) and not await self.is_link()
|
||||
|
||||
async def is_link(self) -> bool:
|
||||
return await path.islink(self.path)
|
||||
|
||||
async def exists(self) -> bool:
|
||||
return await path.exists(self.path)
|
||||
|
||||
async def read_text_content(
|
||||
self, max_bytes: int = MAX_FILE_READ_SIZE, encoding: str = "utf-8", errors=None
|
||||
) -> str:
|
||||
file_size = await self.get_size()
|
||||
read_size = min(max_bytes, file_size)
|
||||
try:
|
||||
async with aiofiles.open(
|
||||
file=self.path, mode="r", encoding=encoding, errors=errors
|
||||
) as file:
|
||||
return await file.read(read_size)
|
||||
except Exception: # noqa
|
||||
return "Unknown"
|
||||
|
||||
|
||||
class ResponseInfo:
|
||||
def __init__(self, response: httpx.Response) -> None:
|
||||
self._response = response
|
||||
self.headers: httpx.Headers = response.headers
|
||||
self.status_code: int = response.status_code
|
||||
self.content_length: Optional[int] = self._get_content_length(self.headers)
|
||||
self.content_type: Optional[str] = self._get_content_type(self.headers)
|
||||
self.accept_ranges: bool = self._does_accept_ranges(self.headers)
|
||||
self.etag: Optional[ETag] = self._get_etag(self.headers)
|
||||
|
||||
@property
|
||||
def response(self) -> httpx.Response:
|
||||
return self._response
|
||||
|
||||
def supports_resume(self) -> bool:
|
||||
return bool(self.accept_ranges)
|
||||
|
||||
@staticmethod
|
||||
def _does_accept_ranges(headers: httpx.Headers) -> bool:
|
||||
# 'Accept-Ranges' indicates if the source accepts range requests,
|
||||
# that let you retrieve a part of the response
|
||||
accept_ranges_value = headers.get(
|
||||
ACCEPT_RANGES_HEADER, ACCEPT_RANGES_NONE_VALUE
|
||||
)
|
||||
does_accept_ranges = accept_ranges_value != ACCEPT_RANGES_NONE_VALUE
|
||||
|
||||
return does_accept_ranges
|
||||
|
||||
@staticmethod
|
||||
def _get_content_length(headers: httpx.Headers) -> Optional[int]:
|
||||
content_length = headers.get(CONTENT_LENGTH_HEADER)
|
||||
|
||||
if content_length is not None:
|
||||
return int(content_length)
|
||||
|
||||
return content_length
|
||||
|
||||
@staticmethod
|
||||
def _get_content_type(headers: httpx.Headers) -> Optional[str]:
|
||||
return headers.get(CONTENT_TYPE_HEADER)
|
||||
|
||||
@staticmethod
|
||||
def _get_etag(headers: httpx.Headers) -> Optional[ETag]:
|
||||
etag_header = headers.get(ETAG_HEADER)
|
||||
if etag_header is None:
|
||||
return etag_header
|
||||
return ETag(etag_header)
|
||||
|
||||
|
||||
class Status(Enum):
|
||||
Success = auto()
|
||||
DestinationAlreadyExists = auto()
|
||||
DestinationFolderNotExists = auto()
|
||||
DestinationNotAFile = auto()
|
||||
DownloadError = auto()
|
||||
DownloadErrorStatusCode = auto()
|
||||
DownloadSizeMismatch = auto()
|
||||
DownloadContentTypeMismatch = auto()
|
||||
DownloadIndividualParts = auto()
|
||||
SourceDoesNotSupportResume = auto()
|
||||
StatusCode = auto()
|
||||
|
||||
|
||||
async def check_target_file_status(
|
||||
target_file: File, force_reload: bool, **kwargs: Any
|
||||
) -> Status:
|
||||
if not await target_file.directory_exists():
|
||||
logger.error(
|
||||
f"Folder {target_file.path} does not exists! Skip download."
|
||||
)
|
||||
return Status.DestinationFolderNotExists
|
||||
|
||||
if await target_file.exists() and not await target_file.is_file():
|
||||
logger.error(
|
||||
f"Object {target_file.path} exists but is not a file. Skip download."
|
||||
)
|
||||
return Status.DestinationNotAFile
|
||||
|
||||
if await target_file.is_file() and not force_reload:
|
||||
logger.info(
|
||||
f"File {target_file.path} already exists. Skip download."
|
||||
)
|
||||
return Status.DestinationAlreadyExists
|
||||
|
||||
return Status.Success
|
||||
|
||||
|
||||
async def check_download_size(
|
||||
tmp_file: File, target_file: File, head_response: ResponseInfo, **kwargs: Any
|
||||
) -> Status:
|
||||
tmp_file_size = await tmp_file.get_size()
|
||||
content_length = head_response.content_length
|
||||
|
||||
if tmp_file_size is not None and content_length is not None:
|
||||
if tmp_file_size != content_length:
|
||||
logger.error(
|
||||
f"Error downloading {target_file.path}. File size missmatch. "
|
||||
f"Expected size: {content_length}; Downloaded: {tmp_file_size}"
|
||||
)
|
||||
return Status.DownloadSizeMismatch
|
||||
|
||||
return Status.Success
|
||||
|
||||
|
||||
async def check_status_code(
|
||||
response: ResponseInfo, tmp_file: File, target_file: File, **kwargs: Any
|
||||
) -> Status:
|
||||
if not 200 <= response.status_code < 400:
|
||||
content = await tmp_file.read_text_content()
|
||||
logger.error(
|
||||
f"Error downloading {target_file.path}. Message: {content}"
|
||||
)
|
||||
return Status.StatusCode
|
||||
|
||||
return Status.Success
|
||||
|
||||
|
||||
async def check_content_type(
|
||||
response: ResponseInfo, target_file: File, tmp_file: File,
|
||||
expected_types: List[str], **kwargs: Any
|
||||
) -> Status:
|
||||
if not expected_types:
|
||||
return Status.Success
|
||||
|
||||
if response.content_type not in expected_types:
|
||||
content = await tmp_file.read_text_content()
|
||||
logger.error(
|
||||
f"Error downloading {target_file.path}. Wrong content type. "
|
||||
f"Expected type(s): {expected_types}; "
|
||||
f"Got: {response.content_type}; Message: {content}"
|
||||
)
|
||||
return Status.DownloadContentTypeMismatch
|
||||
|
||||
return Status.Success
|
||||
|
||||
|
||||
def _status_for_message(message: str) -> Status:
|
||||
if "please download individual parts" in message:
|
||||
return Status.DownloadIndividualParts
|
||||
return Status.Success
|
||||
|
||||
|
||||
async def check_status_for_message(
|
||||
response: ResponseInfo, tmp_file: File, **kwargs: Any
|
||||
) -> Status:
|
||||
if response.content_type and "text" in response.content_type:
|
||||
length = response.content_length or await tmp_file.get_size()
|
||||
if length <= MAX_FILE_READ_SIZE:
|
||||
message = await tmp_file.read_text_content()
|
||||
return _status_for_message(message)
|
||||
|
||||
return Status.Success
|
||||
|
||||
|
||||
class DownloadResult(NamedTuple):
|
||||
status: Status
|
||||
destination: File
|
||||
head_response: Optional[ResponseInfo]
|
||||
response: Optional[ResponseInfo]
|
||||
message: Optional[str]
|
||||
|
||||
|
||||
class DummyProgressBar:
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
pass
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
def get_progressbar(
|
||||
destination: pathlib.Path, total: Optional[int], start: int = 0
|
||||
) -> Union[tqdm.tqdm, DummyProgressBar]:
|
||||
if total is None:
|
||||
return DummyProgressBar()
|
||||
|
||||
description = click.format_filename(destination, shorten=True)
|
||||
progressbar = tqdm.tqdm(
|
||||
desc=description,
|
||||
total=total,
|
||||
unit="B",
|
||||
unit_scale=True,
|
||||
unit_divisor=1024
|
||||
)
|
||||
if start > 0:
|
||||
progressbar.update(start)
|
||||
|
||||
return progressbar
|
||||
|
||||
|
||||
class Downloader:
|
||||
|
||||
MIN_STREAM_LENGTH = 10*1024*1024 # using stream mode if source is greater than
|
||||
MIN_RESUME_FILE_LENGTH = 10*1024*1024 # keep resume file if file is greater than
|
||||
RESUME_SUFFIX = ".resume"
|
||||
TMP_SUFFIX = ".tmp"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
source: httpx.URL,
|
||||
client: httpx.AsyncClient,
|
||||
expected_types: Optional[Union[List[str], str]] = None,
|
||||
additional_headers: Optional[Dict[str, str]] = None
|
||||
) -> None:
|
||||
self._source = source
|
||||
self._client = client
|
||||
self._expected_types = self._normalize_expected_types(expected_types)
|
||||
self._additional_headers = self._normalize_headers(additional_headers)
|
||||
self._head_request: Optional[ResponseInfo] = None
|
||||
|
||||
@staticmethod
|
||||
def _normalize_expected_types(
|
||||
expected_types: Optional[Union[List[str], str]]
|
||||
) -> List[str]:
|
||||
if not isinstance(expected_types, list):
|
||||
if expected_types is None:
|
||||
expected_types = []
|
||||
else:
|
||||
expected_types = [expected_types]
|
||||
return expected_types
|
||||
|
||||
@staticmethod
|
||||
def _normalize_headers(headers: Optional[Dict[str, str]]) -> Dict[str, str]:
|
||||
if headers is None:
|
||||
return {}
|
||||
return headers
|
||||
|
||||
async def get_head_response(self, force_recreate: bool = False) -> ResponseInfo:
|
||||
if self._head_request is None or force_recreate:
|
||||
# switched from HEAD to GET request without loading the body
|
||||
# HEAD request to cds.audible.de will responded in 1 - 2 minutes
|
||||
# a GET request to the same URI will take ~4-6 seconds
|
||||
async with self._client.stream(
|
||||
"GET", self._source, headers=self._additional_headers,
|
||||
follow_redirects=True,
|
||||
) as head_response:
|
||||
if head_response.request.url != self._source:
|
||||
self._source = head_response.request.url
|
||||
self._head_request = ResponseInfo(head_response)
|
||||
|
||||
return self._head_request
|
||||
|
||||
async def _determine_resume_file(self, target_file: File) -> File:
|
||||
head_response = await self.get_head_response()
|
||||
etag = head_response.etag
|
||||
|
||||
if etag is None:
|
||||
resume_name = target_file.path
|
||||
else:
|
||||
parsed_etag = etag.parsed_etag
|
||||
resume_name = target_file.path.with_name(parsed_etag)
|
||||
|
||||
resume_file = resume_name.with_suffix(self.RESUME_SUFFIX)
|
||||
|
||||
return File(resume_file)
|
||||
|
||||
def _determine_tmp_file(self, target_file: File) -> File:
|
||||
tmp_file = pathlib.Path(target_file.path).with_suffix(self.TMP_SUFFIX)
|
||||
return File(tmp_file)
|
||||
|
||||
async def _handle_tmp_file(
|
||||
self, tmp_file: File, supports_resume: bool, response: ResponseInfo
|
||||
) -> None:
|
||||
tmp_file_size = await tmp_file.get_size()
|
||||
expected_size = response.content_length
|
||||
|
||||
if (
|
||||
supports_resume and expected_size is not None
|
||||
and self.MIN_RESUME_FILE_LENGTH < tmp_file_size < expected_size
|
||||
):
|
||||
logger.debug(f"Keep resume file {tmp_file.path}")
|
||||
else:
|
||||
await tmp_file.remove()
|
||||
|
||||
@staticmethod
|
||||
async def _rename_file(
|
||||
tmp_file: File, target_file: File, force_reload: bool, response: ResponseInfo
|
||||
) -> Status:
|
||||
target_path = target_file.path
|
||||
|
||||
if await target_file.exists() and force_reload:
|
||||
i = 0
|
||||
while target_path.with_suffix(f"{target_path.suffix}.old.{i}").exists():
|
||||
i += 1
|
||||
target_path.rename(target_path.with_suffix(f"{target_path.suffix}.old.{i}"))
|
||||
|
||||
tmp_file.path.rename(target_path)
|
||||
logger.info(
|
||||
f"File {target_path} downloaded in {response.response.elapsed}."
|
||||
)
|
||||
return Status.Success
|
||||
|
||||
@staticmethod
|
||||
async def _check_and_return_download_result(
|
||||
status_check_func: Callable,
|
||||
tmp_file: File,
|
||||
target_file: File,
|
||||
response: ResponseInfo,
|
||||
head_response: ResponseInfo,
|
||||
expected_types: List[str]
|
||||
) -> Optional[DownloadResult]:
|
||||
status = await status_check_func(
|
||||
response=response,
|
||||
tmp_file=tmp_file,
|
||||
target_file=target_file,
|
||||
expected_types=expected_types
|
||||
)
|
||||
if status != Status.Success:
|
||||
message = await tmp_file.read_text_content()
|
||||
return DownloadResult(
|
||||
status=status,
|
||||
destination=target_file,
|
||||
head_response=head_response,
|
||||
response=response,
|
||||
message=message
|
||||
)
|
||||
return None
|
||||
|
||||
async def _postprocessing(
|
||||
self, tmp_file: File, target_file: File, response: ResponseInfo,
|
||||
force_reload: bool
|
||||
) -> DownloadResult:
|
||||
head_response = await self.get_head_response()
|
||||
|
||||
status_checks = [
|
||||
check_status_for_message,
|
||||
check_status_code,
|
||||
check_status_code,
|
||||
check_content_type
|
||||
]
|
||||
for check in status_checks:
|
||||
result = await self._check_and_return_download_result(
|
||||
check, tmp_file, target_file, response,
|
||||
head_response, self._expected_types
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
|
||||
await self._rename_file(
|
||||
tmp_file=tmp_file,
|
||||
target_file=target_file,
|
||||
force_reload=force_reload,
|
||||
response=response,
|
||||
)
|
||||
|
||||
return DownloadResult(
|
||||
status=Status.Success,
|
||||
destination=target_file,
|
||||
head_response=head_response,
|
||||
response=response,
|
||||
message=None
|
||||
)
|
||||
|
||||
async def _stream_download(
|
||||
self,
|
||||
tmp_file: File,
|
||||
target_file: File,
|
||||
start: int,
|
||||
progressbar: Union[tqdm.tqdm, DummyProgressBar],
|
||||
force_reload: bool = True
|
||||
) -> DownloadResult:
|
||||
headers = self._additional_headers.copy()
|
||||
if start > 0:
|
||||
headers.update(Range=f"bytes={start}-")
|
||||
file_mode: FileMode = "ab"
|
||||
else:
|
||||
file_mode: FileMode = "wb"
|
||||
|
||||
async with self._client.stream(
|
||||
method="GET", url=self._source, follow_redirects=True, headers=headers
|
||||
) as response:
|
||||
with progressbar:
|
||||
async with aiofiles.open(tmp_file.path, mode=file_mode) as file:
|
||||
async for chunk in response.aiter_bytes():
|
||||
await file.write(chunk)
|
||||
progressbar.update(len(chunk))
|
||||
|
||||
return await self._postprocessing(
|
||||
tmp_file=tmp_file,
|
||||
target_file=target_file,
|
||||
response=ResponseInfo(response=response),
|
||||
force_reload=force_reload
|
||||
)
|
||||
|
||||
async def _download(
|
||||
self, tmp_file: File, target_file: File, start: int, force_reload: bool
|
||||
) -> DownloadResult:
|
||||
headers = self._additional_headers.copy()
|
||||
if start > 0:
|
||||
headers.update(Range=f"bytes={start}-")
|
||||
file_mode: FileMode = "ab"
|
||||
else:
|
||||
file_mode: FileMode = "wb"
|
||||
|
||||
response = await self._client.get(
|
||||
self._source, follow_redirects=True, headers=headers
|
||||
)
|
||||
async with aiofiles.open(tmp_file.path, mode=file_mode) as file:
|
||||
await file.write(response.content)
|
||||
|
||||
return await self._postprocessing(
|
||||
tmp_file=tmp_file,
|
||||
target_file=target_file,
|
||||
response=ResponseInfo(response=response),
|
||||
force_reload=force_reload
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
target: pathlib.Path,
|
||||
force_reload: bool = False
|
||||
) -> DownloadResult:
|
||||
target_file = File(target)
|
||||
destination_status = await check_target_file_status(
|
||||
target_file, force_reload
|
||||
)
|
||||
if destination_status != Status.Success:
|
||||
return DownloadResult(
|
||||
status=destination_status,
|
||||
destination=target_file,
|
||||
head_response=None,
|
||||
response=None,
|
||||
message=None
|
||||
)
|
||||
|
||||
head_response = await self.get_head_response()
|
||||
supports_resume = head_response.supports_resume()
|
||||
if supports_resume:
|
||||
tmp_file = await self._determine_resume_file(target_file=target_file)
|
||||
start = await tmp_file.get_size()
|
||||
else:
|
||||
tmp_file = self._determine_tmp_file(target_file=target_file)
|
||||
await tmp_file.remove()
|
||||
start = 0
|
||||
|
||||
should_stream = False
|
||||
progressbar = None
|
||||
if (
|
||||
head_response.content_length is not None and
|
||||
head_response.content_length >= self.MIN_STREAM_LENGTH
|
||||
):
|
||||
should_stream = True
|
||||
progressbar = get_progressbar(
|
||||
target_file.path, head_response.content_length, start
|
||||
)
|
||||
|
||||
try:
|
||||
if should_stream:
|
||||
return await self._stream_download(
|
||||
tmp_file=tmp_file,
|
||||
target_file=target_file,
|
||||
start=start,
|
||||
progressbar=progressbar,
|
||||
force_reload=force_reload
|
||||
)
|
||||
else:
|
||||
return await self._download(
|
||||
tmp_file=tmp_file,
|
||||
target_file=target_file,
|
||||
start=start,
|
||||
force_reload=force_reload
|
||||
)
|
||||
finally:
|
||||
await self._handle_tmp_file(
|
||||
tmp_file=tmp_file,
|
||||
supports_resume=supports_resume,
|
||||
response=head_response
|
||||
)
|
|
@ -1,4 +1,3 @@
|
|||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
|
@ -42,53 +41,3 @@ class ProfileAlreadyExists(AudibleCliException):
|
|||
def __init__(self, name):
|
||||
message = f"Profile {name} already exist"
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class LicenseDenied(AudibleCliException):
|
||||
"""Raised if a license request is not granted"""
|
||||
|
||||
|
||||
class NoDownloadUrl(AudibleCliException):
|
||||
"""Raised if a license response does not contain a download url"""
|
||||
|
||||
def __init__(self, asin):
|
||||
message = f"License response for {asin} does not contain a download url"
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class DownloadUrlExpired(AudibleCliException):
|
||||
"""Raised if a download url is expired"""
|
||||
|
||||
def __init__(self, lr_file):
|
||||
message = f"Download url in {lr_file} is expired."
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class VoucherNeedRefresh(AudibleCliException):
|
||||
"""Raised if a voucher reached his refresh date"""
|
||||
|
||||
def __init__(self, lr_file):
|
||||
message = f"Refresh date for voucher {lr_file} reached."
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ItemNotPublished(AudibleCliException):
|
||||
"""Raised if a voucher reached his refresh date"""
|
||||
|
||||
def __init__(self, asin: str, pub_date):
|
||||
pub_date = datetime.strptime(pub_date, "%Y-%m-%dT%H:%M:%SZ")
|
||||
now = datetime.utcnow()
|
||||
published_in = pub_date - now
|
||||
|
||||
pub_str = ""
|
||||
if published_in.days > 0:
|
||||
pub_str += f"{published_in.days} days, "
|
||||
|
||||
seconds = published_in.seconds
|
||||
hours, remainder = divmod(seconds, 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
hms = "{:02}h:{:02}m:{:02}s".format(int(hours), int(minutes), int(seconds))
|
||||
pub_str += hms
|
||||
|
||||
message = f"{asin} is not published. It will be available in {pub_str}"
|
||||
super().__init__(message)
|
||||
|
|
|
@ -1,12 +1,9 @@
|
|||
import asyncio
|
||||
import logging
|
||||
import secrets
|
||||
import string
|
||||
import unicodedata
|
||||
from datetime import datetime
|
||||
from math import ceil
|
||||
from typing import List, Optional, Union
|
||||
from warnings import warn
|
||||
|
||||
import audible
|
||||
import httpx
|
||||
|
@ -14,13 +11,7 @@ from audible.aescipher import decrypt_voucher_from_licenserequest
|
|||
from audible.client import convert_response_content
|
||||
|
||||
from .constants import CODEC_HIGH_QUALITY, CODEC_NORMAL_QUALITY
|
||||
from .exceptions import (
|
||||
AudibleCliException,
|
||||
LicenseDenied,
|
||||
NoDownloadUrl,
|
||||
NotDownloadableAsAAX,
|
||||
ItemNotPublished
|
||||
)
|
||||
from .exceptions import AudibleCliException, NotDownloadableAsAAX
|
||||
from .utils import full_response_callback, LongestSubString
|
||||
|
||||
|
||||
|
@ -70,7 +61,7 @@ class BaseItem:
|
|||
@property
|
||||
def full_title_slugify(self):
|
||||
valid_chars = "-_.() " + string.ascii_letters + string.digits
|
||||
cleaned_title = unicodedata.normalize("NFKD", self.full_title or "")
|
||||
cleaned_title = unicodedata.normalize("NFKD", self.full_title)
|
||||
cleaned_title = cleaned_title.encode("ASCII", "ignore")
|
||||
cleaned_title = cleaned_title.replace(b" ", b"_")
|
||||
slug_title = "".join(
|
||||
|
@ -93,7 +84,7 @@ class BaseItem:
|
|||
base_filename = self.full_title_slugify
|
||||
|
||||
elif "unicode" in mode:
|
||||
base_filename = unicodedata.normalize("NFKD", self.full_title or "")
|
||||
base_filename = unicodedata.normalize("NFKD", self.full_title)
|
||||
|
||||
else:
|
||||
base_filename = self.asin
|
||||
|
@ -118,9 +109,6 @@ class BaseItem:
|
|||
return images[res]
|
||||
|
||||
def get_pdf_url(self):
|
||||
if not self.is_published():
|
||||
raise ItemNotPublished(self.asin, self.publication_datetime)
|
||||
|
||||
if self.pdf_url is not None:
|
||||
domain = self._client.auth.locale.domain
|
||||
return f"https://www.audible.{domain}/companion-file/{self.asin}"
|
||||
|
@ -131,22 +119,6 @@ class BaseItem:
|
|||
or self.content_type == "Podcast") and self.has_children:
|
||||
return True
|
||||
|
||||
def is_published(self):
|
||||
if (
|
||||
self.content_delivery_type and self.content_delivery_type == "AudioPart"
|
||||
and self._parent
|
||||
):
|
||||
publication_datetime = self._parent.publication_datetime
|
||||
else:
|
||||
publication_datetime = self.publication_datetime
|
||||
|
||||
if publication_datetime is not None:
|
||||
pub_date = datetime.strptime(
|
||||
publication_datetime, "%Y-%m-%dT%H:%M:%SZ"
|
||||
)
|
||||
now = datetime.utcnow()
|
||||
return now > pub_date
|
||||
|
||||
|
||||
class LibraryItem(BaseItem):
|
||||
def _prepare_data(self, data: dict) -> dict:
|
||||
|
@ -244,9 +216,6 @@ class LibraryItem(BaseItem):
|
|||
return False
|
||||
|
||||
async def get_aax_url_old(self, quality: str = "high"):
|
||||
if not self.is_published():
|
||||
raise ItemNotPublished(self.asin, self.publication_datetime)
|
||||
|
||||
if not self.is_downloadable():
|
||||
raise AudibleCliException(
|
||||
f"{self.full_title} is not downloadable."
|
||||
|
@ -283,8 +252,6 @@ class LibraryItem(BaseItem):
|
|||
return httpx.URL(link), codec_name
|
||||
|
||||
async def get_aax_url(self, quality: str = "high"):
|
||||
if not self.is_published():
|
||||
raise ItemNotPublished(self.asin, self.publication_datetime)
|
||||
|
||||
if not self.is_downloadable():
|
||||
raise AudibleCliException(
|
||||
|
@ -305,108 +272,46 @@ class LibraryItem(BaseItem):
|
|||
}
|
||||
return httpx.URL(url, params=params), codec_name
|
||||
|
||||
async def get_aaxc_url(
|
||||
self,
|
||||
quality: str = "high",
|
||||
license_response_groups: Optional[str] = None
|
||||
):
|
||||
if not self.is_published():
|
||||
raise ItemNotPublished(self.asin, self.publication_datetime)
|
||||
|
||||
async def get_aaxc_url(self, quality: str = "high"):
|
||||
if not self.is_downloadable():
|
||||
raise AudibleCliException(
|
||||
f"{self.full_title} is not downloadable."
|
||||
)
|
||||
|
||||
lr = await self.get_license(quality, license_response_groups)
|
||||
assert quality in ("best", "high", "normal",)
|
||||
|
||||
body = {
|
||||
"supported_drm_types": ["Mpeg", "Adrm"],
|
||||
"quality": "Extreme" if quality in ("best", "high") else "Normal",
|
||||
"consumption_type": "Download",
|
||||
"response_groups": (
|
||||
"last_position_heard, pdf_url, content_reference, chapter_info"
|
||||
)
|
||||
}
|
||||
|
||||
lr = await self._client.post(
|
||||
f"content/{self.asin}/licenserequest",
|
||||
body=body
|
||||
)
|
||||
|
||||
content_metadata = lr["content_license"]["content_metadata"]
|
||||
url = httpx.URL(content_metadata["content_url"]["offline_url"])
|
||||
codec = content_metadata["content_reference"]["content_format"]
|
||||
|
||||
voucher = decrypt_voucher_from_licenserequest(self._client.auth, lr)
|
||||
lr["content_license"]["license_response"] = voucher
|
||||
|
||||
return url, codec, lr
|
||||
|
||||
async def get_license(
|
||||
self,
|
||||
quality: str = "high",
|
||||
response_groups: Optional[str] = None
|
||||
):
|
||||
async def get_content_metadata(self, quality: str = "high"):
|
||||
assert quality in ("best", "high", "normal",)
|
||||
|
||||
if response_groups is None:
|
||||
response_groups = "last_position_heard, pdf_url, content_reference"
|
||||
|
||||
body = {
|
||||
"supported_drm_types": ["Mpeg", "Adrm"],
|
||||
"quality": "High" if quality in ("best", "high") else "Normal",
|
||||
"consumption_type": "Download",
|
||||
"response_groups": response_groups
|
||||
}
|
||||
|
||||
headers = {
|
||||
"X-Amzn-RequestId": secrets.token_hex(20).upper(),
|
||||
"X-ADP-SW": "37801821",
|
||||
"X-ADP-Transport": "WIFI",
|
||||
"X-ADP-LTO": "120",
|
||||
"X-Device-Type-Id": "A2CZJZGLK2JJVM",
|
||||
"device_idiom": "phone"
|
||||
}
|
||||
lr = await self._client.post(
|
||||
f"content/{self.asin}/licenserequest",
|
||||
body=body,
|
||||
headers=headers
|
||||
)
|
||||
content_license = lr["content_license"]
|
||||
|
||||
if content_license["status_code"] == "Denied":
|
||||
if "license_denial_reasons" in content_license:
|
||||
for reason in content_license["license_denial_reasons"]:
|
||||
message = reason.get("message", "UNKNOWN")
|
||||
rejection_reason = reason.get("rejectionReason", "UNKNOWN")
|
||||
validation_type = reason.get("validationType", "UNKNOWN")
|
||||
logger.debug(
|
||||
f"License denied message for {self.asin}: {message}."
|
||||
f"Reason: {rejection_reason}."
|
||||
f"Type: {validation_type}"
|
||||
)
|
||||
|
||||
msg = content_license["message"]
|
||||
raise LicenseDenied(msg)
|
||||
|
||||
content_url = content_license["content_metadata"]\
|
||||
.get("content_url", {}).get("offline_url")
|
||||
if content_url is None:
|
||||
raise NoDownloadUrl(self.asin)
|
||||
|
||||
if "license_response" in content_license:
|
||||
try:
|
||||
voucher = decrypt_voucher_from_licenserequest(
|
||||
self._client.auth, lr
|
||||
)
|
||||
except Exception:
|
||||
logger.error(f"Decrypting voucher for {self.asin} failed")
|
||||
else:
|
||||
content_license["license_response"] = voucher
|
||||
else:
|
||||
logger.error(f"No voucher for {self.asin} found")
|
||||
|
||||
return lr
|
||||
|
||||
async def get_content_metadata(
|
||||
self, quality: str = "high", chapter_type: str = "Tree", **request_kwargs
|
||||
):
|
||||
chapter_type = chapter_type.capitalize()
|
||||
assert quality in ("best", "high", "normal",)
|
||||
assert chapter_type in ("Flat", "Tree")
|
||||
|
||||
url = f"content/{self.asin}/metadata"
|
||||
params = {
|
||||
"response_groups": "last_position_heard, content_reference, "
|
||||
"chapter_info",
|
||||
"quality": "High" if quality in ("best", "high") else "Normal",
|
||||
"drm_type": "Adrm",
|
||||
"chapter_titles_type": chapter_type,
|
||||
**request_kwargs
|
||||
"quality": "Extreme" if quality in ("best", "high") else "Normal",
|
||||
"drm_type": "Adrm"
|
||||
}
|
||||
|
||||
metadata = await self._client.get(url, params=params)
|
||||
|
@ -491,41 +396,8 @@ class Library(BaseList):
|
|||
cls,
|
||||
api_client: audible.AsyncClient,
|
||||
include_total_count_header: bool = False,
|
||||
start_date: Optional[datetime] = None,
|
||||
end_date: Optional[datetime] = None,
|
||||
**request_params
|
||||
):
|
||||
def filter_by_date(item):
|
||||
if item.purchase_date is not None:
|
||||
date_added = datetime.strptime(
|
||||
item.purchase_date,
|
||||
"%Y-%m-%dT%H:%M:%S.%fZ"
|
||||
)
|
||||
elif item.library_status.get("date_added") is not None:
|
||||
date_added = datetime.strptime(
|
||||
item.library_status.get("date_added"),
|
||||
"%Y-%m-%dT%H:%M:%S.%fZ"
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
f"{item.asin}: {item.full_title} can not determine date added."
|
||||
)
|
||||
return True
|
||||
|
||||
if start_date is not None and start_date > date_added:
|
||||
return False
|
||||
# If a new episode is added to a parent podcast, the purchase_date
|
||||
# and date_added is set to this date. This can makes things
|
||||
# difficult to get older podcast episodes
|
||||
# the end date will be filtered by the resolve_podcasts function later
|
||||
if item.is_parent_podcast():
|
||||
return True
|
||||
|
||||
if end_date is not None and end_date < date_added:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
if "response_groups" not in request_params:
|
||||
request_params["response_groups"] = (
|
||||
"contributors, customer_rights, media, price, product_attrs, "
|
||||
|
@ -539,14 +411,6 @@ class Library(BaseList):
|
|||
"periodicals, provided_review, product_details"
|
||||
)
|
||||
|
||||
if start_date is not None:
|
||||
if "purchase_date" in request_params:
|
||||
raise AudibleCliException(
|
||||
"Do not use purchase_date and start_date together"
|
||||
)
|
||||
request_params["purchased_after"] = start_date.strftime(
|
||||
"%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
|
||||
resp: httpx.Response = await api_client.get(
|
||||
"library",
|
||||
response_callback=full_response_callback,
|
||||
|
@ -556,9 +420,6 @@ class Library(BaseList):
|
|||
total_count_header = resp.headers.get("total-count")
|
||||
cls_instance = cls(resp_content, api_client=api_client)
|
||||
|
||||
if start_date is not None or end_date is not None:
|
||||
cls_instance._data = list(filter(filter_by_date, cls_instance.data))
|
||||
|
||||
if include_total_count_header:
|
||||
return cls_instance, total_count_header
|
||||
return cls_instance
|
||||
|
@ -576,8 +437,8 @@ class Library(BaseList):
|
|||
library, total_count = await cls.from_api(
|
||||
api_client,
|
||||
page=1,
|
||||
params=request_params,
|
||||
include_total_count_header=True,
|
||||
**request_params
|
||||
)
|
||||
pages = ceil(int(total_count) / bunch_size)
|
||||
if pages == 1:
|
||||
|
@ -589,7 +450,7 @@ class Library(BaseList):
|
|||
cls.from_api(
|
||||
api_client,
|
||||
page=page,
|
||||
**request_params
|
||||
params=request_params,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -600,26 +461,9 @@ class Library(BaseList):
|
|||
|
||||
return library
|
||||
|
||||
async def resolve_podcats(
|
||||
self,
|
||||
start_date: Optional[datetime] = None,
|
||||
end_date: Optional[datetime] = None
|
||||
):
|
||||
warn(
|
||||
"resolve_podcats is deprecated, use resolve_podcasts instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.resolve_podcasts(start_date, end_date)
|
||||
|
||||
async def resolve_podcasts(
|
||||
self,
|
||||
start_date: Optional[datetime] = None,
|
||||
end_date: Optional[datetime] = None
|
||||
):
|
||||
async def resolve_podcats(self):
|
||||
podcast_items = await asyncio.gather(
|
||||
*[i.get_child_items(start_date=start_date, end_date=end_date)
|
||||
for i in self if i.is_parent_podcast()]
|
||||
*[i.get_child_items() for i in self if i.is_parent_podcast()]
|
||||
)
|
||||
for i in podcast_items:
|
||||
self.data.extend(i.data)
|
||||
|
@ -681,7 +525,7 @@ class Catalog(BaseList):
|
|||
|
||||
return cls(resp, api_client=api_client)
|
||||
|
||||
async def resolve_podcasts(self):
|
||||
async def resolve_podcats(self):
|
||||
podcast_items = await asyncio.gather(
|
||||
*[i.get_child_items() for i in self if i.is_parent_podcast()]
|
||||
)
|
||||
|
|
|
@ -28,49 +28,39 @@ def from_folder(plugin_dir: Union[str, pathlib.Path]):
|
|||
"""
|
||||
def decorator(group):
|
||||
if not isinstance(group, click.Group):
|
||||
raise TypeError(
|
||||
"Plugins can only be attached to an instance of click.Group()"
|
||||
)
|
||||
raise TypeError("Plugins can only be attached to an instance of "
|
||||
"click.Group()")
|
||||
|
||||
plugin_path = pathlib.Path(plugin_dir).resolve()
|
||||
sys.path.insert(0, str(plugin_path))
|
||||
pdir = pathlib.Path(plugin_dir)
|
||||
cmds = [x for x in pdir.glob("cmd_*.py")]
|
||||
sys.path.insert(0, str(pdir.resolve()))
|
||||
|
||||
for cmd_path in plugin_path.glob("cmd_*.py"):
|
||||
cmd_path_stem = cmd_path.stem
|
||||
for cmd in cmds:
|
||||
mod_name = cmd.stem
|
||||
try:
|
||||
mod = import_module(cmd_path_stem)
|
||||
cmd = mod.cli
|
||||
if cmd.name == "cli":
|
||||
# if no name given to the command, use the filename
|
||||
# excl. starting cmd_ as name
|
||||
cmd.name = cmd_path_stem[4:]
|
||||
group.add_command(cmd)
|
||||
|
||||
orig_help = cmd.help or ""
|
||||
new_help = (
|
||||
f"(P) {orig_help}\n\nPlugin loaded from file: {str(cmd_path)}"
|
||||
)
|
||||
cmd.help = new_help
|
||||
mod = import_module(mod_name)
|
||||
name = mod_name[4:] if mod.cli.name == "cli" else mod.cli.name
|
||||
group.add_command(mod.cli, name=name)
|
||||
except Exception: # noqa
|
||||
# Catch this so a busted plugin doesn't take down the CLI.
|
||||
# Handled by registering a dummy command that does nothing
|
||||
# other than explain the error.
|
||||
group.add_command(BrokenCommand(cmd_path_stem[4:]))
|
||||
group.add_command(BrokenCommand(mod_name[4:]))
|
||||
|
||||
return group
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def from_entry_point(entry_point_group):
|
||||
def from_entry_point(entry_point_group: str):
|
||||
"""
|
||||
A decorator to register external CLI commands to an instance of
|
||||
`click.Group()`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
entry_point_group : list
|
||||
A list producing one `pkg_resources.EntryPoint()` per iteration.
|
||||
entry_point_group : iter
|
||||
An iterable producing one `pkg_resources.EntryPoint()` per iteration.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
@ -78,23 +68,13 @@ def from_entry_point(entry_point_group):
|
|||
"""
|
||||
def decorator(group):
|
||||
if not isinstance(group, click.Group):
|
||||
raise TypeError(
|
||||
"Plugins can only be attached to an instance of click.Group()"
|
||||
)
|
||||
print(type(group))
|
||||
raise TypeError("Plugins can only be attached to an instance of "
|
||||
"click.Group()")
|
||||
|
||||
for entry_point in entry_point_group or ():
|
||||
try:
|
||||
cmd = entry_point.load()
|
||||
dist_name = entry_point.dist.name
|
||||
if cmd.name == "cli":
|
||||
# if no name given to the command, use the filename
|
||||
# excl. starting cmd_ as name
|
||||
cmd.name = dist_name
|
||||
group.add_command(cmd)
|
||||
|
||||
orig_help = cmd.help or ""
|
||||
new_help = f"(P) {orig_help}\n\nPlugin loaded from package: {dist_name}"
|
||||
cmd.help = new_help
|
||||
group.add_command(entry_point.load())
|
||||
except Exception: # noqa
|
||||
# Catch this so a busted plugin doesn't take down the CLI.
|
||||
# Handled by registering a dummy command that does nothing
|
||||
|
|
|
@ -21,15 +21,6 @@ from .constants import DEFAULT_AUTH_FILE_ENCRYPTION
|
|||
logger = logging.getLogger("audible_cli.utils")
|
||||
|
||||
|
||||
datetime_type = click.DateTime([
|
||||
"%Y-%m-%d",
|
||||
"%Y-%m-%dT%H:%M:%S",
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
"%Y-%m-%dT%H:%M:%S.%fZ",
|
||||
"%Y-%m-%dT%H:%M:%SZ"
|
||||
])
|
||||
|
||||
|
||||
def prompt_captcha_callback(captcha_url: str) -> str:
|
||||
"""Helper function for handling captcha."""
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue